fix linting issues and rename files for clarity

This commit is contained in:
dledoux 2025-09-08 20:09:46 -05:00
commit d618deafa3
8 changed files with 529 additions and 213 deletions

View file

Before

Width:  |  Height:  |  Size: 5.7 KiB

After

Width:  |  Height:  |  Size: 5.7 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 2.7 KiB

After

Width:  |  Height:  |  Size: 2.7 KiB

Before After
Before After

View file

@ -21,7 +21,7 @@ export class ServerlessInference implements INodeType {
outputs: [NodeConnectionType.Main],
credentials: [
{
name: 'digitalOceanServerlessInference',
name: 'digitalOceanServerlessInferenceApi',
required: true,
},
],

View file

@ -17,7 +17,7 @@ export const textOperations: INodeProperties[] = [
{
name: 'Complete',
value: 'complete',
action: 'Create a Text Completion',
action: 'Create a text completion',
description: 'Create one or more completions for a given text',
routing: {
request: {
@ -37,8 +37,7 @@ const completeOperations: INodeProperties[] = [
displayName: 'Model',
name: 'model',
type: 'options',
description:
'The model which will generate the completion. <a href="https://docs.digitalocean.com/products/gradient-ai-platform/details/models/">Learn more</a>',
description: 'The model which will generate the completion. <a href="https://docs.digitalocean.com/products/gradient-ai-platform/details/models/">Learn more</a>.',
displayOptions: {
show: {
operation: ['complete'],
@ -84,7 +83,7 @@ const completeOperations: INodeProperties[] = [
property: 'model',
},
},
default: 'openai-gpt-oss-120b',
default: '',
},
{
displayName: 'Input Type',
@ -230,6 +229,69 @@ const sharedOperations: INodeProperties[] = [
},
},
options: [
{
displayName: 'Frequency Penalty',
name: 'frequencyPenalty',
description: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far.',
type: 'number',
default: undefined,
typeOptions: {
maxValue: 2,
minValue: -2,
numberPrecision: 2,
},
routing: {
send: {
type: 'body',
property: 'frequency_penalty',
},
},
},
{
displayName: 'Logit Bias',
name: 'logitBias',
description: 'Modify the likelihood of specified tokens appearing in the completion (JSON object mapping token IDs to bias values)',
type: 'string',
default: '',
placeholder: '{"50256": -100}',
routing: {
send: {
type: 'body',
property: 'logit_bias',
value: '={{$parameter.logitBias ? JSON.parse($parameter.logitBias) : undefined}}',
},
},
},
{
displayName: 'Logprobs',
name: 'logprobs',
description: 'Whether to return log probabilities of the output tokens',
type: 'boolean',
default: false,
routing: {
send: {
type: 'body',
property: 'logprobs',
},
},
},
{
displayName: 'Max Completion Tokens',
name: 'maxCompletionTokens',
description:
'The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.',
type: 'number',
default: undefined,
typeOptions: {
minValue: 1,
},
routing: {
send: {
type: 'body',
property: 'max_completion_tokens',
},
},
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
@ -254,53 +316,17 @@ const sharedOperations: INodeProperties[] = [
},
},
{
displayName: 'Max Completion Tokens',
name: 'maxCompletionTokens',
description:
'The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.',
type: 'number',
default: undefined,
typeOptions: {
minValue: 1,
},
displayName: 'Metadata',
name: 'metadata',
description: 'Developer-defined metadata to attach to the completion (JSON object)',
type: 'string',
default: '',
placeholder: '{"purpose": "testing"}',
routing: {
send: {
type: 'body',
property: 'max_completion_tokens',
},
},
},
{
displayName: 'Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 2 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
routing: {
send: {
type: 'body',
property: 'temperature',
},
},
},
{
displayName: 'Top P',
name: 'topP',
description:
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.',
type: 'number',
default: undefined,
typeOptions: {
maxValue: 1,
minValue: 0,
numberPrecision: 3,
},
routing: {
send: {
type: 'body',
property: 'top_p',
property: 'metadata',
value: '={{$parameter.metadata ? JSON.parse($parameter.metadata) : undefined}}',
},
},
},
@ -321,10 +347,43 @@ const sharedOperations: INodeProperties[] = [
},
},
},
{
displayName: 'Presence Penalty',
name: 'presencePenalty',
description: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far.',
type: 'number',
default: undefined,
typeOptions: {
maxValue: 2,
minValue: -2,
numberPrecision: 2,
},
routing: {
send: {
type: 'body',
property: 'presence_penalty',
},
},
},
{
displayName: 'Stop Sequences',
name: 'stop',
description: 'Up to 4 sequences where the API will stop generating further tokens',
type: 'string',
default: '',
placeholder: 'e.g. \\n, Human:, AI:',
routing: {
send: {
type: 'body',
property: 'stop',
value: '={{$parameter.stop ? $parameter.stop.split(",").map(s => s.trim()) : undefined}}',
},
},
},
{
displayName: 'Stream',
name: 'stream',
description: 'If set, partial message deltas will be sent, like in ChatGPT',
description: 'Whether partial message deltas will be sent, like in ChatGPT',
type: 'boolean',
default: false,
routing: {
@ -349,7 +408,7 @@ const sharedOperations: INodeProperties[] = [
{
displayName: 'Include Usage',
name: 'includeUsage',
description: 'If set, an additional chunk will be streamed before the data: [DONE] message',
description: 'Whether to include an additional chunk before the data: [DONE] message',
type: 'boolean',
default: false,
},
@ -363,133 +422,71 @@ const sharedOperations: INodeProperties[] = [
},
},
{
displayName: 'Stop Sequences',
name: 'stop',
description: 'Up to 4 sequences where the API will stop generating further tokens',
displayName: 'Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 2 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
routing: {
send: {
type: 'body',
property: 'temperature',
},
},
},
{
displayName: 'Tool Choice',
name: 'toolChoice',
description: 'Controls which (if any) tool is called by the model',
type: 'options',
options: [
{
name: 'Auto',
value: 'auto',
description: 'The model can pick between generating a message or calling one or more tools',
},
{
name: 'None',
value: 'none',
description: 'The model will not call any tool and instead generates a message',
},
{
name: 'Required',
value: 'required',
description: 'The model must call one or more tools',
},
{
name: 'Function',
value: 'function',
description: 'Specifies a particular tool via {"type": "function", "function": {"name": "my_function"}}',
},
],
default: 'auto',
routing: {
send: {
type: 'body',
property: 'tool_choice',
},
},
},
{
displayName: 'Tool Choice Function Name',
name: 'toolChoiceFunctionName',
description: 'The name of the function to call when tool choice is set to function',
type: 'string',
default: '',
placeholder: 'e.g. \\n, Human:, AI:',
routing: {
send: {
type: 'body',
property: 'stop',
value: '={{$parameter.stop ? $parameter.stop.split(",").map(s => s.trim()) : undefined}}',
},
},
},
{
displayName: 'Presence Penalty',
name: 'presencePenalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far',
type: 'number',
default: undefined,
typeOptions: {
maxValue: 2,
minValue: -2,
numberPrecision: 2,
},
routing: {
send: {
type: 'body',
property: 'presence_penalty',
},
},
},
{
displayName: 'Frequency Penalty',
name: 'frequencyPenalty',
description:
'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far',
type: 'number',
default: undefined,
typeOptions: {
maxValue: 2,
minValue: -2,
numberPrecision: 2,
},
routing: {
send: {
type: 'body',
property: 'frequency_penalty',
},
},
},
{
displayName: 'Logprobs',
name: 'logprobs',
description: 'Whether to return log probabilities of the output tokens',
type: 'boolean',
default: false,
routing: {
send: {
type: 'body',
property: 'logprobs',
},
},
},
{
displayName: 'Top Logprobs',
name: 'topLogprobs',
description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position',
type: 'number',
default: undefined,
displayOptions: {
show: {
logprobs: [true],
toolChoice: ['function'],
},
},
typeOptions: {
minValue: 0,
maxValue: 20,
},
routing: {
send: {
type: 'body',
property: 'top_logprobs',
},
},
},
{
displayName: 'User Identifier',
name: 'user',
description: 'A unique identifier representing your end-user, which can help monitor and detect abuse',
type: 'string',
default: '',
routing: {
send: {
type: 'body',
property: 'user',
},
},
},
{
displayName: 'Logit Bias',
name: 'logitBias',
description: 'Modify the likelihood of specified tokens appearing in the completion (JSON object mapping token IDs to bias values)',
type: 'string',
default: '',
placeholder: '{"50256": -100}',
routing: {
send: {
type: 'body',
property: 'logit_bias',
value: '={{$parameter.logitBias ? JSON.parse($parameter.logitBias) : undefined}}',
},
},
},
{
displayName: 'Metadata',
name: 'metadata',
description: 'Developer-defined metadata to attach to the completion (JSON object)',
type: 'string',
default: '',
placeholder: '{"purpose": "testing"}',
routing: {
send: {
type: 'body',
property: 'metadata',
value: '={{$parameter.metadata ? JSON.parse($parameter.metadata) : undefined}}',
property: 'tool_choice',
value: '={{"type": "function", "function": {"name": $parameter.toolChoiceFunctionName}}}',
},
},
},
@ -553,56 +550,55 @@ const sharedOperations: INodeProperties[] = [
},
},
{
displayName: 'Tool Choice',
name: 'toolChoice',
description: 'Controls which (if any) tool is called by the model',
type: 'options',
options: [
{
name: 'Auto',
value: 'auto',
description: 'The model can pick between generating a message or calling one or more tools',
displayName: 'Top Logprobs',
name: 'topLogprobs',
description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position',
type: 'number',
default: undefined,
displayOptions: {
show: {
logprobs: [true],
},
{
name: 'None',
value: 'none',
description: 'The model will not call any tool and instead generates a message',
},
{
name: 'Required',
value: 'required',
description: 'The model must call one or more tools',
},
{
name: 'Function',
value: 'function',
description: 'Specifies a particular tool via {"type": "function", "function": {"name": "my_function"}}',
},
],
default: 'auto',
},
typeOptions: {
minValue: 0,
maxValue: 20,
},
routing: {
send: {
type: 'body',
property: 'tool_choice',
property: 'top_logprobs',
},
},
},
{
displayName: 'Tool Choice Function Name',
name: 'toolChoiceFunctionName',
description: 'The name of the function to call when tool choice is set to function',
type: 'string',
default: '',
displayOptions: {
show: {
toolChoice: ['function'],
},
displayName: 'Top P',
name: 'topP',
description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass',
type: 'number',
default: undefined,
typeOptions: {
maxValue: 1,
minValue: 0,
numberPrecision: 3,
},
routing: {
send: {
type: 'body',
property: 'tool_choice',
value: '={{"type": "function", "function": {"name": $parameter.toolChoiceFunctionName}}}',
property: 'top_p',
},
},
},
{
displayName: 'User Identifier',
name: 'user',
description: 'A unique identifier representing your end-user, which can help monitor and detect abuse',
type: 'string',
default: '',
routing: {
send: {
type: 'body',
property: 'user',
},
},
},