How to Improve an OpenAI Node.js Script with Exponential Backoff Logic?

This topic was automatically generated from Slack. You can find the original thread here.

Could you help me clean up this script to be used in an OpenAI Node.js script?

import { axios } from "@pipedream/platform";

const openai = import('openai');

export default defineComponent({
  async run({ steps, $ }) {
    openai.apiKey = 'OPENAI_API_KEY';

    async function getGptResponse(chunk) {
  const promptText = `

{{steps.chunking_script.$return_value}}

Please analyze the above document to look for the below items. // rest of prompt

  `;  // This is a truncated version of your prompt for brevity. Make sure to include the entire prompt.

  return await openai.Completion.create({
    model: 'gpt-4',
    prompt: promptText,
    temperature: 0.2,
    max_tokens: 600,
    // Add other parameters if needed
  });
}

// Define the exponential backoff logic
 async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) {
    let retries = 0;

    while (retries < maxRetries) {
        try {
            return await requestFunction();
        } catch (error) {
            if (error.response && error.response.status === 429) { // Check for rate-limit error
                const delay = Math.pow(2, retries) * 1000; // Exponential delay
                await new Promise(resolve => setTimeout(resolve, delay)); 
                retries++;
            } else {
                throw error; // If it's not a rate-limit error, re-throw
            }
        }
    }

    throw new Error('Max retries reached.');
}

try {
      const chunk = steps.chunking_script.$return_value; // Get the chunked content from the context
      const response = await requestWithExponentialBackoff(() => getGptResponse(chunk));
      return response.data.choices[0].text;  // Adjust based on the actual structure of the response.
    } catch (error) {
      console.error("Error:", error);
      throw error; // or handle it accordingly
    }
  }
});

I’ve cleaned up the script for you to use with OpenAI in a Pipedream Node.js component. Please replace 'OPENAI_API_KEY' with your actual API key: import { axios } from "@pipedream/platform"; import openai from "openai"; export default defineComponent({ async run({ steps, $ }) { openai.apiKey = 'OPENAI_API_KEY'; async function getGptResponse(chunk) { const promptText = ` {{steps.chunking_script.$return_value}} Please analyze the above document to look for the below items. // rest of prompt `; return await openai.Completion.create({ model: 'gpt-4', prompt: promptText, temperature: 0.2, max_tokens: 600, }); } async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) { let retries = 0; while (retries < maxRetries) { try { return await requestFunction(); } catch (error) { if (error.response && error.response.status === 429) { const delay = Math.pow(2, retries) * 1000; await new Promise(resolve => setTimeout(resolve, delay)); retries++; } else { throw error; } } } throw new Error('Max retries reached.'); } try { const chunk = steps.chunking_script.$return_value; const response = await requestWithExponentialBackoff(() => getGptResponse(chunk)); return response.choices[0].text; } catch (error) { console.error("Error:", error); throw error; } }, }); Make sure to install the openai package in your Pipedream component by adding this line at the top of your code: ```
import openai from “openai”;

Thanks . I received this error:

TypeError
Cannot read properties of undefined (reading 'create')

DETAILS
    at null.getGptResponse (file:///tmp/__pdg__/dist/code/1945cbd5909f5d4d17b8f97d13bd630e4c29923645fe273e781a8632b020d957/component.mjs:14:38)
    at null.null (file:///tmp/__pdg__/dist/code/1945cbd5909f5d4d17b8f97d13bd630e4c29923645fe273e781a8632b020d957/component.mjs:44:66)
    at null.requestWithExponentialBackoff (file:///tmp/__pdg__/dist/code/1945cbd5909f5d4d17b8f97d13bd630e4c29923645fe273e781a8632b020d957/component.mjs:27:24)
    at Object.run (file:///tmp/__pdg__/dist/code/1945cbd5909f5d4d17b8f97d13bd630e4c29923645fe273e781a8632b020d957/component.mjs:44:30)
    at null.executeComponent (/var/task/launch_worker.js:229:53)
    at MessagePort.messageHandler (/var/task/launch_worker.js:726:28)

I apologize for the confusion. It seems there’s an issue with the openai package import. Since the openai package is not available in Pipedream components, you’ll need to use the axios package to make API calls to OpenAI. Here’s the updated code: import { axios } from "@pipedream/platform"; export default defineComponent({ async run({ steps, $ }) { async function getGptResponse(chunk) { const promptText = ` {{steps.chunking_script.$return_value}} Please analyze the above document to look for the below items. // rest of prompt `; const response = await axios(this, { method: "POST", url: "https://api.openai.com/v1/engines/davinci-codex/completions", headers: { "Content-Type": "application/json", "Authorization": `Bearer OPENAI_API_KEY`, }, data: { model: "gpt-4", prompt: promptText, temperature: 0.2, max_tokens: 600, }, }); return response.choices[0].text; } async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) { let retries = 0; while (retries < maxRetries) { try { return await requestFunction(); } catch (error) { if (error.response && error.response.status === 429) { const delay = Math.pow(2, retries) * 1000; await new Promise(resolve => setTimeout(resolve, delay)); retries++; } else { throw error; } } } throw new Error("Max retries reached."); } try { const chunk = steps.chunking_script.$return_value; const response = await requestWithExponentialBackoff(() => getGptResponse(chunk)); return response; } catch (error) { console.error("Error:", error); throw error; } }, }); Make sure to replace 'OPENAI_API_KEY' with your actual API key.

Hey , is there a good reason that my environmental variables wouldn’t be working? The API call from OpenAI is set up correctly.

It’s possible that the environment variable is not being accessed correctly in your code. To use environment variables in Pipedream components, you should reference them using process.env.YOUR_VARIABLE_NAME. Make sure you’ve set the environment variable in the Pipedream UI and are using the correct variable name in your code. If you’re still having issues, please share the relevant part of your code where you’re using the environment variable, and I’ll help you troubleshoot the problem.

this is the relevant part of the code:

      const response = await axios(this, {
        method: "POST",
        url: "https://api.openai.com/v1/engines/davinci-codex/completions",
        headers: {
          "Content-Type": "application/json",
          "Authorization": `Bearer OPENAI_API_KEY`,

It seems you’re using a hardcoded string “OPENAI_API_KEY” instead of referencing the actual API key from an environment variable. To fix this, you should store your OpenAI API key as an environment variable and then reference it in your code. Here’s the updated code snippet: const response = await axios(this, { method: "POST", url: "https://api.openai.com/v1/engines/davinci-codex/completions", headers: { "Content-Type": "application/json", "Authorization": `Bearer ${process.env.OPENAI_API_KEY}`, }, // rest of the axios config }); Make sure to set the OPENAI_API_KEY environment variable in your Pipedream account.

I received this error:

Error - Request failed with status code 400
{“error”:{“message”:“Cannot specify both model and engine”,“type”:“invalid_request_error”,“param”:null,“code”:null}}
DETAILS

    at null.createError (/tmp/__pdg__/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/createError.js:16:15)
    at null.settle (/tmp/__pdg__/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/settle.js:17:12)
    at IncomingMessage.handleStreamEnd (/tmp/__pdg__/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/adapters/http.js:269:11)
    at IncomingMessage.emit (node:events:525:35)
    at null.endReadableNT (node:internal/streams/readable:1359:12)
    at process.processTicksAndRejections (node:internal/process/task_queues:82:21)

8/22/2023, 7:58:50 AM
export: debug - { “status”: 400, “statusText”: “Bad Request”, “headers”: { “date”: “Tue, 22 Aug 2023 13:58:49 GMT”, “content-type”: “application/json; charset=utf-8”, “content-length”: “169”, “connection”: “close”, “vary”: “Origin”, “x-request-id”: “38b3a076f65901ca51d639bd9861730b”, “strict-transport-security”: “max-age=15724800; includeSubDomains”, “cf-cache-status”: “DYNAMIC”, “server”: “cloudflare”, “cf-ray”: “7faba1fe1d5982f3-IAD”, “alt-svc”: “h3=":443"; ma=86400” }, “config”: { “url”: "
https://api.openai.com/v1/engines/davinci-codex/completions
“, “method”: “post”, “data”: “{"model":"gpt-4","prompt":"\n {{steps.chunking_script.$return_value}}\n Please analyze the above document to look for the below items. // rest of prompt\n ","temperature":0.2,"max_tokens":600}”, “headers”: { “Accept”: “application/json, text/plain, /”, “Content-Type”: “application/json”, “Authorization”: “Bearer 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”, “User-Agent”: “axios/0.21.4”, “Content-Length”: 211 }, “transformRequest”: [ null ], “transformResponse”: [ null ], “timeout”: 0, “xsrfCookieName”: “XSRF-TOKEN”, “xsrfHeaderName”: “X-XSRF-TOKEN”, “maxContentLength”: -1, “maxBodyLength”: -1, “transitional”: { “silentJSONParsing”: true, “forcedJSONParsing”: true, “clarifyTimeoutError”: false } }, “data”: { “error”: { “message”: “Cannot specify both model and engine”, “type”: “invalid_request_error”, “param”: null, “code”: null } } }
8/22/2023, 7:58:50 AM
Error: at null.createError (/tmp/pdg/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/createError.js:16:15) at null.settle (/tmp/pdg/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/settle.js:17:12) at IncomingMessage.handleStreamEnd (/tmp/pdg/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/adapters/http.js:269:11) at IncomingMessage.emit (node:events:525:35) at null.endReadableNT (node:internal/streams/readable:1359:12) at process.processTicksAndRejections (node:internal/process/task_queues:82:21) { config: { url: ’
https://api.openai.com/v1/engines/davinci-codex/completions
', method: ‘post’, data: '{“model”:“gpt-4”,“prompt”:”\n {{steps.chunking_script.$return_value}}\n Please analyze the above document to look for the below items. // rest of prompt\n “,“temperature”:0.2,“max_tokens”:600}', headers: { Accept: ‘application/json, text/plain, /’, ‘Content-Type’: ‘application/json’, Authorization: ‘Bearer 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’, ‘User-Agent’: ‘axios/0.21.4’, ‘Content-Length’: 211 }, transformRequest: [ [Function: transformRequest] ], transformResponse: [ [Function: transformResponse] ], timeout: 0, adapter: [Function: httpAdapter], xsrfCookieName: ‘XSRF-TOKEN’, xsrfHeaderName: ‘X-XSRF-TOKEN’, maxContentLength: -1, maxBodyLength: -1, validateStatus: [Function: validateStatus], transitional: { silentJSONParsing: true, forcedJSONParsing: true, clarifyTimeoutError: false } }, request: <ref **1> ClientRequest { _events: [Object: null prototype] { abort: [Function (anonymous)], aborted: [Function (anonymous)], connect: [Function (anonymous)], error: [Function (anonymous)], socket: [Function (anonymous)], timeout: [Function (anonymous)], finish: [Function: requestOnFinish] }, _eventsCount: 7, _maxListeners: undefined, outputData: [], outputSize: 0, writable: true, destroyed: false, _last: true, chunkedEncoding: false, shouldKeepAlive: false, maxRequestsOnConnectionReached: false, _defaultKeepAlive: true, useChunkedEncodingByDefault: true, sendDate: false, _removedConnection: false, _removedContLen: false, _removedTE: false, strictContentLength: false, _contentLength: 211, _hasBody: true, _trailer: ‘’, finished: true, _headerSent: true, _closed: false, socket: TLSSocket { _tlsOptions: [Object], _secureEstablished: true, _securePending: false, _newSessionPending: false, _controlReleased: true, secureConnecting: false, _SNICallback: null, servername: ‘api.openai.com’, alpnProtocol: false, authorized: true, authorizationError: null, encrypted: true, _events: [Object: null prototype], _eventsCount: 10, connecting: false, _hadError: false, _parent: null, _host: ‘api.openai.com’, _closeAfterHandlingError: false, _readableState: [ReadableState], _maxListeners: undefined, _writableState: [WritableState], allowHalfOpen: false, _sockname: null, _pendingData: null, _pendingEncoding: ‘’, server: undefined, _server: null, ssl: [TLSWrap], _requestCert: true, _rejectUnauthorized: true, parser: null, _httpMessage: [Circular **1], [Symbol(res)]: [TLSWrap], [Symbol(verified)]: true, [Symbol(pendingSession)]: null, [Symbol(async_id_symbol)]: 19, [Symbol(kHandle)]: [TLSWrap], [Symbol(lastWriteQueueSize)]: 0, [Symbol(timeout)]: null, [Symbol(kBuffer)]: null, [Symbol(kBufferCb)]: null, [Symbol(kBufferGen)]: null, [Symbol(kCapture)]: false, [Symbol(kSetNoDelay)]: false, [Symbol(kSetKeepAlive)]: false, [Symbol(kSetKeepAliveInitialDelay)]: 0, [Symbol(kBytesRead)]: 0, [Symbol(kBytesWritten)]: 0, [Symbol(connect-options)]: [Object] }, _header: ‘POST /v1/engines/davinci-codex/completions HTTP/1.1\r\n’ + ‘Accept: application/json, text/plain, /\r\n’ + ‘Content-Type: application/json\r\n’ + ‘Authorization: Bearer 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\r\n’ + ‘User-Agent: axios/0.21.4\r\n’ + ‘Content-Length: 211\r\n’ + ‘Host: api.openai.com\r\n’ + ‘Connection: close\r\n’ + ‘\r\n’, _keepAliveTimeout: 0, _onPendingData: [Function: nop], agent: Agent { _events: [Object: null prototype], _eventsCount: 2, _maxListeners: undefined, defaultPort: 443, protocol: ‘https:’, options: [Object: null prototype], requests: [Object: null prototype] {}, sockets: [Object: null prototype], freeSockets: [Object: null prototype] {}, keepAliveMsecs: 1000, keepAlive: false, maxSockets: Infinity, maxFreeSockets: 256, scheduling: ‘lifo’, maxTotalSockets: Infinity, totalSocketCount: 1, maxCachedSessions: 100, _sessionCache: [Object], [Symbol(kCapture)]: false }, socketPath: undefined, method: ‘POST’, maxHeaderSize: undefined, insecureHTTPParser: undefined, joinDuplicateHeaders: undefined, path: ‘/v1/engines/davinci-codex/completions’, _ended: true, res: IncomingMessage { _readableState: [ReadableState], _events: [Object: null prototype], _eventsCount: 3, _maxListeners: undefined, socket: [TLSSocket], httpVersionMajor: 1, httpVersionMinor: 1, httpVersion: ‘1.1’, complete: true, rawHeaders: [Array], rawTrailers: [], joinDuplicateHeaders: undefined, aborted: false, upgrade: false, url: ‘’, method: null, statusCode: 400, statusMessage: ‘Bad Request’, client: [TLSSocket], _consuming: false, _dumped: false, req: [Circular **1], responseUrl: ‘https://api.openai.com/v1/engines/davinci-codex/completions’, redirects: [], [Symbol(kCapture)]: false, [Symbol(kHeaders)]: [Object], [Symbol(kHeadersCount)]: 22, [Symbol(kTrailers)]: null, [Symbol(kTrailersCount)]: 0 }, aborted: false, timeoutCb: null, upgradeOrConnect: false, parser: null, maxHeadersCount: null, reusedSocket: false, host: ‘api.openai.com’, protocol: ‘https:’, _redirectable: Writable { _writableState: [WritableState], _events: [Object: null prototype], _eventsCount: 2, _maxListeners: undefined, _options: [Object], _ended: true, _ending: true, _redirectCount: 0, _redirects: [], _requestBodyLength: 211, _requestBodyBuffers: [], _onNativeResponse: [Function (anonymous)], _currentRequest: [Circular **1], _currentUrl: ‘https://api.openai.com/v1/engines/davinci-codex/completions’, [Symbol(kCapture)]: false }, [Symbol(kCapture)]: false, [Symbol(kBytesWritten)]: 0, [Symbol(kNeedDrain)]: false, [Symbol(corked)]: 0, [Symbol(kOutHeaders)]: [Object: null prototype] { accept: [Array], ‘content-type’: [Array], authorization: [Array], ‘user-agent’: [Array], ‘content-length’: [Array], host: [Array] }, [Symbol(errored)]: null, [Symbol(kUniqueHeaders)]: null }, response: { status: 400, statusText: ‘Bad Request’, headers: { date: ‘Tue, 22 Aug 2023 13:58:49 GMT’, ‘content-type’: ‘application/json; charset=utf-8’, ‘content-length’: ‘169’, connection: ‘close’, vary: ‘Origin’, ‘x-request-id’: ‘38b3a076f65901ca51d639bd9861730b’, ‘strict-transport-security’: ‘max-age=15724800; includeSubDomains’, ‘cf-cache-status’: ‘DYNAMIC’, server: ‘cloudflare’, ‘cf-ray’: ‘7faba1fe1d5982f3-IAD’, ‘alt-svc’: 'h3=”:443"; ma=86400’ }, config: { url: ‘https://api.openai.com/v1/engines/davinci-codex/completions’, method: ‘post’, data: ‘{“model”:“gpt-4”,“prompt”:"\n {{steps.chunking_script.$return_value}}\n Please analyze the above document to look for the below items. // rest of prompt\n ",“temperature”:0.2,“max_tokens”:600}’, headers: [Object], transformRequest: [Array], transformResponse: [Array], timeout: 0, adapter: [Function: httpAdapter], xsrfCookieName: ‘XSRF-TOKEN’, xsrfHeaderName: ‘X-XSRF-TOKEN’, maxContentLength: -1, maxBodyLength: -1, validateStatus: [Function: validateStatus], transitional: [Object] }, data: { error: [Object] } }, isAxiosError: true, toJSON: [Function: toJSON], name: ‘Error - Request failed with status code 400’ }

chat_1

CONFIGURE

RESULTS

parsing_script

CODE

add_row

CONFIGURE

Sorry , I can format that better. I’m receiving this error:

Error - Request failed with status code 400
{"error":{"message":"Cannot specify both model and engine","type":"invalid_request_error","param":null,"code":null}}

DETAILS
    at null.createError (/tmp/__pdg__/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/createError.js:16:15)
    at null.settle (/tmp/__pdg__/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/settle.js:17:12)
    at IncomingMessage.handleStreamEnd (/tmp/__pdg__/dist/code/169aaff5681bf2ac5a95cb9dbc868f9b800ef9a05f40819a96bca94858a2d369/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/adapters/http.js:269:11)
    at IncomingMessage.emit (node:events:525:35)
    at null.endReadableNT (node:internal/streams/readable:1359:12)
    at process.processTicksAndRejections (node:internal/process/task_queues:82:21)

It looks like you’re encountering an error because both “model” and “engine” parameters are specified in the API request, which is not allowed. To resolve this issue, you should only specify either the “model” or the “engine” parameter in your request, not both. Please review your API request and remove one of the conflicting parameters. If you need further assistance, feel free to ask.

Could you clean up this script to ensure only the model parameter is being requested?

import { axios } from "@pipedream/platform";

export default defineComponent({
  async run({ steps, $ }) {
    async function getGptResponse(chunk) {
      const promptText = `
        {{steps.chunking_script.$return_value}}
        Please analyze the above document to look for the below items. // rest of prompt
      `;

const response = await axios(this, {
  method: "POST",
  url: "https://api.openai.com/v1/engines/davinci-codex/completions",
  headers: {
    "Content-Type": "application/json",
    "Authorization": `Bearer ${process.env.OPENAI_API_KEY}`,
  },
  data: {
    model: "gpt-4",
    prompt: promptText,
    temperature: 0.2,
    max_tokens: 600,
  }
});

      return response.choices[0].text;
    }

    async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) {
      let retries = 0;

      while (retries < maxRetries) {
        try {
          return await requestFunction();
        } catch (error) {
          if (error.response && error.response.status === 429) {
            const delay = Math.pow(2, retries) * 1000;
            await new Promise(resolve => setTimeout(resolve, delay));
            retries++;
          } else {
            throw error;
          }
        }
      }

      throw new Error("Max retries reached.");
    }

    try {
      const chunk = steps.chunking_script.$return_value;
      const response = await requestWithExponentialBackoff(() => getGptResponse(chunk));
      return response;
    } catch (error) {
      console.error("Error:", error);
      throw error;
    }
  },
});

I’ve updated the script to remove the “model” parameter since the “engine” parameter is already specified in the URL. Here’s the cleaned-up script: ```
import { axios } from “@pipedream/platform”; export default defineComponent({ async run({ steps, $ }) { async function getGptResponse(chunk) { const promptText = {{steps.chunking_script.$return_value}} Please analyze the above document to look for the below items. // rest of prompt ; const response = await axios(this, { method: “POST”, url: “https://api.openai.com/v1/engines/davinci-codex/completions”, headers: { “Content-Type”: “application/json”, “Authorization”: Bearer ${process.env.OPENAI_API_KEY}, }, data: { prompt: promptText, temperature: 0.2, max_tokens: 600, }, }); return response.choices[0].text; } async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) { let retries = 0; while (retries < maxRetries) { try { return await requestFunction(); } catch (error) { if (error.response && error.response.status === 429) { const delay = Math.pow(2, retries) * 1000; await new Promise(resolve => setTimeout(resolve, delay)); retries++; } else { throw error; } } } throw new Error(“Max retries reached.”); } try { const chunk = steps.chunking_script.$return_value; const response = await requestWithExponentialBackoff(() => getGptResponse(chunk)); return response; } catch (error) { console.error(“Error:”, error); throw error; } }, });

I am now receiving this error:

Error - Request failed with status code 404
{"error":{"message":"The model `davinci-codex` does not exist or you do not have access to it.","type":"invalid_request_error","param":null,"code":"model_not_found"}}

DETAILS
    at null.createError (/tmp/__pdg__/dist/code/cb62037d21815bfc59ba9bcab0e61c99b5f1728f1307128bd12cfe32c8018825/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/createError.js:16:15)
    at null.settle (/tmp/__pdg__/dist/code/cb62037d21815bfc59ba9bcab0e61c99b5f1728f1307128bd12cfe32c8018825/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/settle.js:17:12)
    at IncomingMessage.handleStreamEnd (/tmp/__pdg__/dist/code/cb62037d21815bfc59ba9bcab0e61c99b5f1728f1307128bd12cfe32c8018825/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/adapters/http.js:269:11)
    at IncomingMessage.emit (node:events:525:35)
    at null.endReadableNT (node:internal/streams/readable:1359:12)
    at process.processTicksAndRejections (node:internal/process/task_queues:82:21)

I am receiving this error:

Error - Request failed with status code 404
{"error":{"message":"The model `davinci-codex` does not exist or you do not have access to it.","type":"invalid_request_error","param":null,"code":"model_not_found"}}

DETAILS
    at null.createError (/tmp/__pdg__/dist/code/23921202faa24fe394c81265b8278bb4d1777ac6d70b536034514dc1b7e471ce/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/createError.js:16:15)
    at null.settle (/tmp/__pdg__/dist/code/23921202faa24fe394c81265b8278bb4d1777ac6d70b536034514dc1b7e471ce/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/core/settle.js:17:12)
    at IncomingMessage.handleStreamEnd (/tmp/__pdg__/dist/code/23921202faa24fe394c81265b8278bb4d1777ac6d70b536034514dc1b7e471ce/node_modules/.pnpm/axios@0.21.4/node_modules/axios/lib/adapters/http.js:269:11)
    at IncomingMessage.emit (node:events:525:35)
    at null.endReadableNT (node:internal/streams/readable:1359:12)
    at process.processTicksAndRejections (node:internal/process/task_queues:82:21)

It looks like you’re trying to access a model called davinci-codex that either doesn’t exist or you don’t have access to. Please double-check the model name and ensure you have the correct access permissions. If you still encounter issues, please provide more information about the API you’re trying to access or the code you’re using, so I can help you better.

This is the code:

import { axios } from "@pipedream/platform";

export default defineComponent({
  async run({ steps, $ }) {
    async function getGptResponse(chunk) {
      const promptText = `
        {{steps.chunking_script.$return_value}}
        Please analyze the above document to look for the below items. // rest of prompt
      `;

      const response = await axios(this, {
        method: "POST",
        url: "https://api.openai.com/v1/engines/davinci-codex/completions",
        headers: {
          "Content-Type": "application/json",
          "Authorization": `Bearer ${process.env.CIM_SCRAPER_NEW}`,
        },
        data: {
          prompt: promptText,
          temperature: 0.2,
          max_tokens: 600,
        },
      });

      return response.choices[0].text;
    }

    async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) {
      let retries = 0;

      while (retries < maxRetries) {
        try {
          return await requestFunction();
        } catch (error) {
          if (error.response && error.response.status === 429) {
            const delay = Math.pow(2, retries) * 1000;
            await new Promise(resolve => setTimeout(resolve, delay));
            retries++;
          } else {
            throw error;
          }
        }
      }

      throw new Error("Max retries reached.");
    }

    try {
      const chunk = steps.chunking_script.$return_value;
      const response = await requestWithExponentialBackoff(() => getGptResponse(chunk));
      return response;
    } catch (error) {
      console.error("Error:", error);
      throw error;
    }
  },
});

It seems like you’re trying to use the OpenAI API with the davinci-codex model, which is causing the error. The correct model name for OpenAI’s Codex is text-davinci-002. Please update the URL in your axios request with the correct model name: ```
url: “https://api.openai.com/v1/engines/text-davinci-002/completions”,