diff --git a/credentials/DigitalOceanServerlessInference.credentials.ts b/credentials/DigitalOceanServerlessInferenceApi.credentials.ts
similarity index 81%
rename from credentials/DigitalOceanServerlessInference.credentials.ts
rename to credentials/DigitalOceanServerlessInferenceApi.credentials.ts
index 07173a1..1cb04f0 100644
--- a/credentials/DigitalOceanServerlessInference.credentials.ts
+++ b/credentials/DigitalOceanServerlessInferenceApi.credentials.ts
@@ -4,9 +4,9 @@ import {
INodeProperties,
} from 'n8n-workflow';
-export class DigitalOceanServerlessInference implements ICredentialType {
- name = 'digitalOceanServerlessInference';
- displayName = 'DigitalOcean Gradientâ„¢ AI Platform';
+export class DigitalOceanServerlessInferenceApi implements ICredentialType {
+ name = 'digitalOceanServerlessInferenceApi';
+ displayName = 'DigitalOcean Gradientâ„¢ AI Platform API';
documentationUrl = 'https://docs.digitalocean.com/products/gradient-ai-platform/how-to/use-serverless-inference/';
properties: INodeProperties[] = [
{
diff --git a/nodes/ServerlessInferenceNode/DO-Gradient-AI-Agentic-Cloud-logo.svg b/nodes/ServerlessInference/DO-Gradient-AI-Agentic-Cloud-logo.svg
similarity index 100%
rename from nodes/ServerlessInferenceNode/DO-Gradient-AI-Agentic-Cloud-logo.svg
rename to nodes/ServerlessInference/DO-Gradient-AI-Agentic-Cloud-logo.svg
diff --git a/nodes/ServerlessInferenceNode/DO-gradient-ai-logo-white.svg b/nodes/ServerlessInference/DO-gradient-ai-logo-white.svg
similarity index 100%
rename from nodes/ServerlessInferenceNode/DO-gradient-ai-logo-white.svg
rename to nodes/ServerlessInference/DO-gradient-ai-logo-white.svg
diff --git a/nodes/ServerlessInferenceNode/ServerlessInference.node.json b/nodes/ServerlessInference/DigitalOceanGradientServerlessInference.node.json
similarity index 100%
rename from nodes/ServerlessInferenceNode/ServerlessInference.node.json
rename to nodes/ServerlessInference/DigitalOceanGradientServerlessInference.node.json
diff --git a/nodes/ServerlessInferenceNode/ServerlessInference.node.ts b/nodes/ServerlessInference/DigitalOceanGradientServerlessInference.node.ts
similarity index 94%
rename from nodes/ServerlessInferenceNode/ServerlessInference.node.ts
rename to nodes/ServerlessInference/DigitalOceanGradientServerlessInference.node.ts
index 831aa46..c637672 100644
--- a/nodes/ServerlessInferenceNode/ServerlessInference.node.ts
+++ b/nodes/ServerlessInference/DigitalOceanGradientServerlessInference.node.ts
@@ -21,7 +21,7 @@ export class ServerlessInference implements INodeType {
outputs: [NodeConnectionType.Main],
credentials: [
{
- name: 'digitalOceanServerlessInference',
+ name: 'digitalOceanServerlessInferenceApi',
required: true,
},
],
@@ -31,7 +31,7 @@ export class ServerlessInference implements INodeType {
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
- 'User-Agent': 'Gradient/n8n/1.0.0',
+ 'User-Agent': 'Gradient/n8n/1.0.1',
},
},
properties: [
diff --git a/nodes/ServerlessInferenceNode/GenericFunctions.ts b/nodes/ServerlessInference/GenericFunctions.ts
similarity index 100%
rename from nodes/ServerlessInferenceNode/GenericFunctions.ts
rename to nodes/ServerlessInference/GenericFunctions.ts
diff --git a/nodes/ServerlessInferenceNode/TextDescription.ts b/nodes/ServerlessInference/TextDescription.ts
similarity index 94%
rename from nodes/ServerlessInferenceNode/TextDescription.ts
rename to nodes/ServerlessInference/TextDescription.ts
index a836c83..b738ed2 100644
--- a/nodes/ServerlessInferenceNode/TextDescription.ts
+++ b/nodes/ServerlessInference/TextDescription.ts
@@ -17,7 +17,7 @@ export const textOperations: INodeProperties[] = [
{
name: 'Complete',
value: 'complete',
- action: 'Create a Text Completion',
+ action: 'Create a text completion',
description: 'Create one or more completions for a given text',
routing: {
request: {
@@ -37,8 +37,7 @@ const completeOperations: INodeProperties[] = [
displayName: 'Model',
name: 'model',
type: 'options',
- description:
- 'The model which will generate the completion. Learn more',
+ description: 'The model which will generate the completion. Learn more.',
displayOptions: {
show: {
operation: ['complete'],
@@ -84,7 +83,7 @@ const completeOperations: INodeProperties[] = [
property: 'model',
},
},
- default: 'openai-gpt-oss-120b',
+ default: '',
},
{
displayName: 'Input Type',
@@ -230,6 +229,69 @@ const sharedOperations: INodeProperties[] = [
},
},
options: [
+ {
+ displayName: 'Frequency Penalty',
+ name: 'frequencyPenalty',
+ description: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far.',
+ type: 'number',
+ default: undefined,
+ typeOptions: {
+ maxValue: 2,
+ minValue: -2,
+ numberPrecision: 2,
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'frequency_penalty',
+ },
+ },
+ },
+ {
+ displayName: 'Logit Bias',
+ name: 'logitBias',
+ description: 'Modify the likelihood of specified tokens appearing in the completion (JSON object mapping token IDs to bias values)',
+ type: 'string',
+ default: '',
+ placeholder: '{"50256": -100}',
+ routing: {
+ send: {
+ type: 'body',
+ property: 'logit_bias',
+ value: '={{$parameter.logitBias ? JSON.parse($parameter.logitBias) : undefined}}',
+ },
+ },
+ },
+ {
+ displayName: 'Logprobs',
+ name: 'logprobs',
+ description: 'Whether to return log probabilities of the output tokens',
+ type: 'boolean',
+ default: false,
+ routing: {
+ send: {
+ type: 'body',
+ property: 'logprobs',
+ },
+ },
+ },
+ {
+ displayName: 'Max Completion Tokens',
+ name: 'maxCompletionTokens',
+ description:
+ 'The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.',
+ type: 'number',
+ default: undefined,
+ typeOptions: {
+ minValue: 1,
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'max_completion_tokens',
+ },
+ },
+ },
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
@@ -254,53 +316,17 @@ const sharedOperations: INodeProperties[] = [
},
},
{
- displayName: 'Max Completion Tokens',
- name: 'maxCompletionTokens',
- description:
- 'The maximum number of tokens that can be generated in the chat completion. This value can be used to control costs for text generated via API.',
- type: 'number',
- default: undefined,
- typeOptions: {
- minValue: 1,
- },
+ displayName: 'Metadata',
+ name: 'metadata',
+ description: 'Developer-defined metadata to attach to the completion (JSON object)',
+ type: 'string',
+ default: '',
+ placeholder: '{"purpose": "testing"}',
routing: {
send: {
type: 'body',
- property: 'max_completion_tokens',
- },
- },
- },
- {
- displayName: 'Temperature',
- name: 'temperature',
- default: 0.7,
- typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 2 },
- description:
- 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
- type: 'number',
- routing: {
- send: {
- type: 'body',
- property: 'temperature',
- },
- },
- },
- {
- displayName: 'Top P',
- name: 'topP',
- description:
- 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.',
- type: 'number',
- default: undefined,
- typeOptions: {
- maxValue: 1,
- minValue: 0,
- numberPrecision: 3,
- },
- routing: {
- send: {
- type: 'body',
- property: 'top_p',
+ property: 'metadata',
+ value: '={{$parameter.metadata ? JSON.parse($parameter.metadata) : undefined}}',
},
},
},
@@ -321,10 +347,43 @@ const sharedOperations: INodeProperties[] = [
},
},
},
+ {
+ displayName: 'Presence Penalty',
+ name: 'presencePenalty',
+ description: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far.',
+ type: 'number',
+ default: undefined,
+ typeOptions: {
+ maxValue: 2,
+ minValue: -2,
+ numberPrecision: 2,
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'presence_penalty',
+ },
+ },
+ },
+ {
+ displayName: 'Stop Sequences',
+ name: 'stop',
+ description: 'Up to 4 sequences where the API will stop generating further tokens',
+ type: 'string',
+ default: '',
+ placeholder: 'e.g. \\n, Human:, AI:',
+ routing: {
+ send: {
+ type: 'body',
+ property: 'stop',
+ value: '={{$parameter.stop ? $parameter.stop.split(",").map(s => s.trim()) : undefined}}',
+ },
+ },
+ },
{
displayName: 'Stream',
name: 'stream',
- description: 'If set, partial message deltas will be sent, like in ChatGPT',
+ description: 'Whether partial message deltas will be sent, like in ChatGPT',
type: 'boolean',
default: false,
routing: {
@@ -349,7 +408,7 @@ const sharedOperations: INodeProperties[] = [
{
displayName: 'Include Usage',
name: 'includeUsage',
- description: 'If set, an additional chunk will be streamed before the data: [DONE] message',
+ description: 'Whether to include an additional chunk before the data: [DONE] message',
type: 'boolean',
default: false,
},
@@ -363,133 +422,71 @@ const sharedOperations: INodeProperties[] = [
},
},
{
- displayName: 'Stop Sequences',
- name: 'stop',
- description: 'Up to 4 sequences where the API will stop generating further tokens',
+ displayName: 'Temperature',
+ name: 'temperature',
+ default: 0.7,
+ typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 2 },
+ description:
+ 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
+ type: 'number',
+ routing: {
+ send: {
+ type: 'body',
+ property: 'temperature',
+ },
+ },
+ },
+ {
+ displayName: 'Tool Choice',
+ name: 'toolChoice',
+ description: 'Controls which (if any) tool is called by the model',
+ type: 'options',
+ options: [
+ {
+ name: 'Auto',
+ value: 'auto',
+ description: 'The model can pick between generating a message or calling one or more tools',
+ },
+ {
+ name: 'None',
+ value: 'none',
+ description: 'The model will not call any tool and instead generates a message',
+ },
+ {
+ name: 'Required',
+ value: 'required',
+ description: 'The model must call one or more tools',
+ },
+ {
+ name: 'Function',
+ value: 'function',
+ description: 'Specifies a particular tool via {"type": "function", "function": {"name": "my_function"}}',
+ },
+ ],
+ default: 'auto',
+ routing: {
+ send: {
+ type: 'body',
+ property: 'tool_choice',
+ },
+ },
+ },
+ {
+ displayName: 'Tool Choice Function Name',
+ name: 'toolChoiceFunctionName',
+ description: 'The name of the function to call when tool choice is set to function',
type: 'string',
default: '',
- placeholder: 'e.g. \\n, Human:, AI:',
- routing: {
- send: {
- type: 'body',
- property: 'stop',
- value: '={{$parameter.stop ? $parameter.stop.split(",").map(s => s.trim()) : undefined}}',
- },
- },
- },
- {
- displayName: 'Presence Penalty',
- name: 'presencePenalty',
- description:
- 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far',
- type: 'number',
- default: undefined,
- typeOptions: {
- maxValue: 2,
- minValue: -2,
- numberPrecision: 2,
- },
- routing: {
- send: {
- type: 'body',
- property: 'presence_penalty',
- },
- },
- },
- {
- displayName: 'Frequency Penalty',
- name: 'frequencyPenalty',
- description:
- 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far',
- type: 'number',
- default: undefined,
- typeOptions: {
- maxValue: 2,
- minValue: -2,
- numberPrecision: 2,
- },
- routing: {
- send: {
- type: 'body',
- property: 'frequency_penalty',
- },
- },
- },
- {
- displayName: 'Logprobs',
- name: 'logprobs',
- description: 'Whether to return log probabilities of the output tokens',
- type: 'boolean',
- default: false,
- routing: {
- send: {
- type: 'body',
- property: 'logprobs',
- },
- },
- },
- {
- displayName: 'Top Logprobs',
- name: 'topLogprobs',
- description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position',
- type: 'number',
- default: undefined,
displayOptions: {
show: {
- logprobs: [true],
+ toolChoice: ['function'],
},
},
- typeOptions: {
- minValue: 0,
- maxValue: 20,
- },
routing: {
send: {
type: 'body',
- property: 'top_logprobs',
- },
- },
- },
- {
- displayName: 'User Identifier',
- name: 'user',
- description: 'A unique identifier representing your end-user, which can help monitor and detect abuse',
- type: 'string',
- default: '',
- routing: {
- send: {
- type: 'body',
- property: 'user',
- },
- },
- },
- {
- displayName: 'Logit Bias',
- name: 'logitBias',
- description: 'Modify the likelihood of specified tokens appearing in the completion (JSON object mapping token IDs to bias values)',
- type: 'string',
- default: '',
- placeholder: '{"50256": -100}',
- routing: {
- send: {
- type: 'body',
- property: 'logit_bias',
- value: '={{$parameter.logitBias ? JSON.parse($parameter.logitBias) : undefined}}',
- },
- },
- },
- {
- displayName: 'Metadata',
- name: 'metadata',
- description: 'Developer-defined metadata to attach to the completion (JSON object)',
- type: 'string',
- default: '',
- placeholder: '{"purpose": "testing"}',
- routing: {
- send: {
- type: 'body',
- property: 'metadata',
- value: '={{$parameter.metadata ? JSON.parse($parameter.metadata) : undefined}}',
+ property: 'tool_choice',
+ value: '={{"type": "function", "function": {"name": $parameter.toolChoiceFunctionName}}}',
},
},
},
@@ -553,56 +550,55 @@ const sharedOperations: INodeProperties[] = [
},
},
{
- displayName: 'Tool Choice',
- name: 'toolChoice',
- description: 'Controls which (if any) tool is called by the model',
- type: 'options',
- options: [
- {
- name: 'Auto',
- value: 'auto',
- description: 'The model can pick between generating a message or calling one or more tools',
+ displayName: 'Top Logprobs',
+ name: 'topLogprobs',
+ description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position',
+ type: 'number',
+ default: undefined,
+ displayOptions: {
+ show: {
+ logprobs: [true],
},
- {
- name: 'None',
- value: 'none',
- description: 'The model will not call any tool and instead generates a message',
- },
- {
- name: 'Required',
- value: 'required',
- description: 'The model must call one or more tools',
- },
- {
- name: 'Function',
- value: 'function',
- description: 'Specifies a particular tool via {"type": "function", "function": {"name": "my_function"}}',
- },
- ],
- default: 'auto',
+ },
+ typeOptions: {
+ minValue: 0,
+ maxValue: 20,
+ },
routing: {
send: {
type: 'body',
- property: 'tool_choice',
+ property: 'top_logprobs',
},
},
},
{
- displayName: 'Tool Choice Function Name',
- name: 'toolChoiceFunctionName',
- description: 'The name of the function to call when tool choice is set to function',
- type: 'string',
- default: '',
- displayOptions: {
- show: {
- toolChoice: ['function'],
- },
+ displayName: 'Top P',
+ name: 'topP',
+ description: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass',
+ type: 'number',
+ default: undefined,
+ typeOptions: {
+ maxValue: 1,
+ minValue: 0,
+ numberPrecision: 3,
},
routing: {
send: {
type: 'body',
- property: 'tool_choice',
- value: '={{"type": "function", "function": {"name": $parameter.toolChoiceFunctionName}}}',
+ property: 'top_p',
+ },
+ },
+ },
+ {
+ displayName: 'User Identifier',
+ name: 'user',
+ description: 'A unique identifier representing your end-user, which can help monitor and detect abuse',
+ type: 'string',
+ default: '',
+ routing: {
+ send: {
+ type: 'body',
+ property: 'user',
},
},
},
diff --git a/package.json b/package.json
index 1374717..f9e9034 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
- "name": "n8n-node-digitalocean-gradient-serverless-inference",
- "version": "1.0.0",
+ "name": "@digitalocean/n8n-nodes-digitalocean-gradient-serverless-inference",
+ "version": "1.0.1",
"description": "This is an n8n community node for the DigitalOcean Gradientâ„¢ AI Platform Serverless Inference API",
"keywords": [
"n8n-community-node-package"
@@ -13,7 +13,7 @@
},
"repository": {
"type": "git",
- "url": "https://github.com/digitalocean-labs/n8n-node-gradient-serverless-inference.git"
+ "url": "git+https://github.com/digitalocean-labs/n8n-node-gradient-serverless-inference.git"
},
"engines": {
"node": ">=20.15"
@@ -49,5 +49,8 @@
},
"peerDependencies": {
"n8n-workflow": "*"
+ },
+ "bugs": {
+ "url": "https://github.com/digitalocean-labs/n8n-node-gradient-serverless-inference/issues"
}
}
\ No newline at end of file