const CHAT_RESPONSE_FORMAT = {
TEXT: {
label: "Text",
value: "text",
},
JSON_OBJECT: {
label: "JSON Object",
value: "json_object",
},
JSON_SCHEMA: {
label: "JSON Schema",
value: "json_schema",
},
}
export default {
name: "Chat",
version: "0.0.2",
key: "openai-passthrough-chat",
description:
"The Chat API, using the `gpt-3.5-turbo` or `gpt-4` model. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
type: "action",
props: {
app: {
type: "app",
app: "openai_passthrough",
},
modelId: {
label: "Model ID",
type: "string",
options: ["gpt-4.1", "o4-mini", "o3", "gpt-3.5-turbo"],
},
userMessage: {
label: "User Message",
type: "string",
description:
"The user messages provide instructions to the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.",
},
systemInstructions: {
label: "System Instructions",
type: "string",
description:
'The system message helps set the behavior of the assistant. For example: "You are a helpful assistant." [See these docs](https://platform.openai.com/docs/guides/chat/instructing-chat-models) for tips on writing good instructions.',
optional: true,
},
messages: {
label: "Prior Message History",
type: "string[]",
description:
"_Advanced_. Because [the models have no memory of past chat requests](https://platform.openai.com/docs/guides/chat/introduction), all relevant information must be supplied via the conversation. You can provide [an array of messages](https://platform.openai.com/docs/guides/chat/introduction) from prior conversations here. If this param is set, the action ignores the values passed to **System Instructions** and **Assistant Response**, appends the new **User Message** to the end of this array, and sends it to the API.",
optional: true,
},
temperature: {
type: "string",
label: "Temperature",
description:
"Controls randomness: 0 is deterministic, higher values (up to 2) make output more random",
default: "1",
optional: true,
},
maxTokens: {
type: "integer",
label: "Maximum Length",
description: "The maximum number of tokens to generate in the completion",
optional: true,
},
topP: {
type: "string",
label: "Top P",
description:
"Alternative to temperature, nucleus sampling considers the results of the tokens with top_p probability mass",
default: "1",
optional: true,
},
frequencyPenalty: {
type: "string",
label: "Frequency Penalty",
description:
"Decreases the likelihood of repeating the same line verbatim",
default: "0",
optional: true,
},
presencePenalty: {
type: "string",
label: "Presence Penalty",
description: "Increases the likelihood of talking about new topics",
default: "0",
optional: true,
},
responseFormat: {
type: "string",
label: "Response Format",
description:
"- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a [specific structure for the model's output using a JSON schema](https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses).",
options: Object.values(CHAT_RESPONSE_FORMAT),
default: CHAT_RESPONSE_FORMAT.TEXT.value,
optional: true,
},
jsonSchema: {
type: "string",
label: "JSON Schema",
description:
"If responseFormat is jsonSchema: Define the schema that the model's output must adhere to. [See the documentation here](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).",
optional: true,
},
},
async run({ $ }) {
const messages = []
if (this.systemInstructions) {
messages.push({
role: "system",
content: this.systemInstructions,
})
}
if (this.messages && this.messages.length > 0) {
messages.push(...this.messages)
}
messages.push({
role: "user",
content: this.userMessage,
})
const params = {
model: this.modelId,
messages: messages,
}
if (this.temperature !== undefined) params.temperature = +this.temperature
if (this.maxTokens !== undefined) params.max_tokens = this.maxTokens
if (this.topP !== undefined) params.top_p = +this.topP
if (this.frequencyPenalty !== undefined)
params.frequency_penalty = +this.frequencyPenalty
if (this.presencePenalty !== undefined)
params.presence_penalty = +this.presencePenalty
if (
this.responseFormat === CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value &&
this.jsonSchema
) {
params.response_format = {
type: "json_object",
schema: JSON.parse(this.jsonSchema),
}
} else if (this.responseFormat === CHAT_RESPONSE_FORMAT.JSON_OBJECT.value) {
params.response_format = { type: "json_object" }
}
const response = await $.services.openai.completions.create(params)
const assistantMessage = response.choices[0].message.content
$.export("$summary", "Generated response from OpenAI")
return {
message: assistantMessage,
raw_response: response,
}
},
}