import openai from "../../openai.app.mjs";
import common from "../common/common.mjs";
import constants from "../../common/constants.mjs";
export default {
  ...common,
  name: "Chat",
  version: "0.3.4",
  annotations: {
    destructiveHint: false,
    openWorldHint: true,
    readOnlyHint: false,
  },
  key: "openai-chat",
  description: "The Chat API, using the `gpt-3.5-turbo` or `gpt-4` model. [See the documentation](https://platform.openai.com/docs/api-reference/chat)",
  type: "action",
  props: {
    openai,
    alert: {
      type: "alert",
      alertType: "info",
      content: "Looking to chat with your tools? Check out our individual actions: [Chat using Web Search](https://pipedream.com/apps/openai/actions/chat-using-web-search), [Chat using File Search](https://pipedream.com/apps/openai/actions/chat-using-file-search), and [Chat using Functions](https://pipedream.com/apps/openai/actions/chat-using-functions).",
    },
    modelId: {
      propDefinition: [
        openai,
        "chatCompletionModelId",
      ],
    },
    userMessage: {
      label: "User Message",
      type: "string",
      description: "The user messages provide instructions to the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.",
    },
    ...common.props,
    systemInstructions: {
      label: "System Instructions",
      type: "string",
      description: "The system message helps set the behavior of the assistant. For example: \"You are a helpful assistant.\" [See these docs](https://platform.openai.com/docs/guides/chat/instructing-chat-models) for tips on writing good instructions.",
      optional: true,
    },
    messages: {
      label: "Prior Message History",
      type: "string[]",
      description: "_Advanced_. Because [the models have no memory of past chat requests](https://platform.openai.com/docs/guides/chat/introduction), all relevant information must be supplied via the conversation. You can provide [an array of messages](https://platform.openai.com/docs/guides/chat/introduction) from prior conversations here. If this param is set, the action ignores the values passed to **System Instructions** and **Assistant Response**, appends the new **User Message** to the end of this array, and sends it to the API.",
      optional: true,
    },
    images: {
      label: "Images",
      type: "string[]",
      description: "Provide one or more images to [OpenAI's vision model](https://platform.openai.com/docs/guides/vision). Each entry should be either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.jpg`), or raw base64-encoded image data. Compatible with the `gpt4-vision-preview` model",
      optional: true,
    },
    audio: {
      type: "string",
      label: "Audio",
      description: "The audio file to upload. Provide either a file URL or a path to a file in the `/tmp` directory (for example, `/tmp/myFile.mp3`). For use with the `gpt-4o-audio-preview` model. Currently supports `wav` and `mp3` files.",
      optional: true,
    },
    responseFormat: {
      type: "string",
      label: "Response Format",
      description: "- **Text**: Returns unstructured text output.\n- **JSON Schema**: Enables you to define a [specific structure for the model's output using a JSON schema](https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses).",
      options: Object.values(constants.CHAT_RESPONSE_FORMAT),
      default: constants.CHAT_RESPONSE_FORMAT.TEXT.value,
      optional: true,
      reloadProps: true,
    },
    toolTypes: {
      type: "string[]",
      label: "Tool Types",
      description: "The types of tools to enable on the assistant",
      options: constants.TOOL_TYPES.filter((toolType) => toolType === "function"),
      optional: true,
      reloadProps: true,
    },
  },
  additionalProps() {
    const {
      responseFormat,
      toolTypes,
      numberOfFunctions,
    } = this;
    const props = {};
    if (responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) {
      props.jsonSchema = {
        type: "string",
        label: "JSON Schema",
        description: "Define the schema that the model's output must adhere to. [See the documentation here](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas).",
      };
    }
    if (toolTypes?.includes("function")) {
      props.numberOfFunctions = {
        type: "integer",
        label: "Number of Functions",
        description: "The number of functions to define",
        optional: true,
        reloadProps: true,
        default: 1,
      };
      for (let i = 0; i < (numberOfFunctions || 1); i++) {
        props[`functionName_${i}`] = {
          type: "string",
          label: `Function Name ${i + 1}`,
          description: "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.",
        };
        props[`functionDescription_${i}`] = {
          type: "string",
          label: `Function Description ${i + 1}`,
          description: "A description of what the function does, used by the model to choose when and how to call the function.",
          optional: true,
        };
        props[`functionParameters_${i}`] = {
          type: "object",
          label: `Function Parameters ${i + 1}`,
          description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.",
          optional: true,
        };
      }
    }
    return props;
  },
  methods: {
    ...common.methods,
    _buildTools() {
      const tools = this.toolTypes?.filter((toolType) => toolType !== "function")?.map((toolType) => ({
        type: toolType,
      })) || [];
      if (this.toolTypes?.includes("function")) {
        const numberOfFunctions = this.numberOfFunctions || 1;
        for (let i = 0; i < numberOfFunctions; i++) {
          tools.push({
            type: "function",
            function: {
              name: this[`functionName_${i}`],
              description: this[`functionDescription_${i}`],
              parameters: this[`functionParameters_${i}`],
            },
          });
        }
      }
      return tools.length
        ? tools
        : undefined;
    },
  },
  async run({ $ }) {
    const args = await this._getChatArgs();
    const response = await this.openai.createChatCompletion({
      $,
      data: {
        ...args,
        tools: this._buildTools(),
      },
    });
    if (this.responseFormat === constants.CHAT_RESPONSE_FORMAT.JSON_SCHEMA.value) {
      for (const choice of response.choices) {
        try {
          choice.message.content = JSON.parse(choice.message.content);
        } catch {
          console.log(`Unable to parse JSON: ${choice.message.content}`);
        }
      }
    }
    if (response) {
      $.export("$summary", `Successfully sent chat with id ${response.id}`);
    }
    const { messages } = args;
    return {
      original_messages: messages,
      original_messages_with_assistant_response: messages.concat(response.choices[0]?.message),
      ...response,
    };
  },
};