How to Integrate a Code Snippet into a Pipedream Node.js Step and Directly Use `steps.chat_1.$return_value`?

This topic was automatically generated from Slack. You can find the original thread here.

Here is my code:

// Access the response from ChatGPT step
let chatgpt_response = steps.chat_1.$return_value;

// Split the response into segments based on commas
let segments = chatgpt_response.split(",");

// Extract values from each segment
let parsedData = segments.map(segment => segment.trim().split(":")[1].trim());

// Make parsedData available for next steps in Pipedream
this.parsedData = parsedData;

please take the code snippet shared above and integrate it into a Pipedream Node.js code step. can you tweak the Node.js code step to use steps.chat_1.$return_value directly?

Sure, here’s the code snippet integrated into a Pipedream Node.js code step: export default defineComponent({ async run({ steps, $ }) { // Access the response from ChatGPT step let chatgpt_response = steps.chat_1.$return_value; // Split the response into segments based on commas let segments = chatgpt_response.split(","); // Extract values from each segment let parsedData = segments.map(segment => segment.trim().split(":")[1].trim()); // Return parsedData for use in the next steps in Pipedream return parsedData; }, }); This code step uses steps.chat_1.$return_value directly and returns the parsedData for use in the next steps in your Pipedream workflow.

I recieved this error:

TypeError
chatgpt_response.split is not a function
DETAILS

    at Object.run (file:///tmp/__pdg__/dist/code/0b0a666e605ceb8a4154e156163bd6eae88dcccce76314fbaef962e02a9617c0/component.mjs:7:37)
    at null.executeComponent (/var/task/launch_worker.js:229:53)
    at MessagePort.messageHandler (/var/task/launch_worker.js:726:28)

Timestamp
8/9/2023, 9:07:31 PMDuration
178ms

add_row

CONFIGURE

create_document

CONFIGURE

append_text

CONFIGURE

It seems like the chatgpt_response is not a string, which is causing the error when trying to use the split() function. To fix this issue, you can check the type of chatgpt_response and convert it to a string if necessary. Here’s the updated code: export default defineComponent({ async run({ steps }) { // Access the response from ChatGPT step let chatgpt_response = steps.chat_1.$return_value; // Check if chatgpt_response is not a string and convert it to a string if necessary if (typeof chatgpt_response !== "string") { chatgpt_response = JSON.stringify(chatgpt_response); } // Split the response into segments based on commas let segments = chatgpt_response.split(","); // Extract values from each segment let parsedData = segments.map(segment => segment.trim().split(":")[1].trim()); // Return parsedData for use in the next steps in Pipedream return parsedData; }, }); This code checks if chatgpt_response is not a string and converts it to a string using JSON.stringify() if necessary. Then it proceeds with the rest of the original code.

Posted thread to Discourse: How to Integrate a Given Code Snippet into a Pipedream Node.js Code Step and Modify It to Use `steps.chat_1.$return_value` Directly?

please take the code snippet shared here and integrate it into a Pipedream Node.js code step. can you tweak the Node.js code step to use steps.chat_1.$return_value directly?

import openai from 'openai';
console.log(openai);


export default defineComponent({
  async run({ steps, $ }) {
    openai.apiKey = 'OPENAI_API_KEY';

    async function getGptResponse(chunk) {
  const promptText = `

{{steps.chunking_script.$return_value}}

Please analyze the above document to look for the below items. // rest of prompt

  `;  // This is a truncated version of your prompt for brevity. Make sure to include the entire prompt.

  return await openai.Completion.create({
    model: 'gpt-4',
    prompt: promptText,
    temperature: 0.2,
    max_tokens: 600,
    // Add other parameters if needed
  });
}

// Define the exponential backoff logic
 async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) {
    let retries = 0;

    while (retries < maxRetries) {
        try {
            return await requestFunction();
        } catch (error) {
            if (error.response && error.response.status === 429) { // Check for rate-limit error
                const delay = Math.pow(2, retries) * 1000; // Exponential delay
                await new Promise(resolve => setTimeout(resolve, delay)); 
                retries++;
            } else {
                throw error; // If it's not a rate-limit error, re-throw
            }
        }
    }

    throw new Error('Max retries reached.');
}

try {
      const chunk = steps.chunking_script.$return_value; // Get the chunked content from the context
      const response = await requestWithExponentialBackoff(() => getGptResponse(chunk));
      return response.data.choices[0].text;  // Adjust based on the actual structure of the response.
    } catch (error) {
      console.error("Error:", error);
      throw error; // or handle it accordingly
    }
  }
}); 

Here’s the modified Pipedream Node.js code step that uses steps.chat_1.$return_value directly: import openai from "openai"; export default defineComponent({ async run({ steps, $ }) { openai.apiKey = "OPENAI_API_KEY"; async function getGptResponse(chunk) { const promptText = ` ${chunk} Please analyze the above document to look for the below items. // rest of prompt `; // Make sure to include the entire prompt. return await openai.Completion.create({ model: "gpt-4", prompt: promptText, temperature: 0.2, max_tokens: 600, // Add other parameters if needed }); } async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) { let retries = 0; while (retries < maxRetries) { try { return await requestFunction(); } catch (error) { if (error.response && error.response.status === 429) { const delay = Math.pow(2, retries) * 1000; await new Promise((resolve) => setTimeout(resolve, delay)); retries++; } else { throw error; } } } throw new Error("Max retries reached."); } try { const chunk = steps.chat_1.$return_value; // Get the content from the chat_1 step const response = await requestWithExponentialBackoff(() => getGptResponse(chunk)); return response.data.choices[0].text; // Adjust based on the actual structure of the response. } catch (error) { console.error("Error:", error); throw error; // or handle it accordingly } }, }); This code uses steps.chat_1.$return_value as the input for the getGptResponse function and keeps the rest of the original code intact.

could you do the same script without the steps.chat_1.$return_value and replace it with steps.chunking_script.$return_value please?

Certainly! Here’s the modified Pipedream Node.js code step that uses steps.chunking_script.$return_value instead of steps.chat_1.$return_value: import openai from "openai"; export default defineComponent({ async run({ steps, $ }) { openai.apiKey = "OPENAI_API_KEY"; async function getGptResponse(chunk) { const promptText = ` ${chunk} Please analyze the above document to look for the below items. // rest of prompt `; // Make sure to include the entire prompt. return await openai.Completion.create({ model: "gpt-4", prompt: promptText, temperature: 0.2, max_tokens: 600, // Add other parameters if needed }); } async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) { let retries = 0; while (retries < maxRetries) { try { return await requestFunction(); } catch (error) { if (error.response && error.response.status === 429) { const delay = Math.pow(2, retries) * 1000; await new Promise((resolve) => setTimeout(resolve, delay)); retries++; } else { throw error; } } } throw new Error("Max retries reached."); } try { const chunk = steps.chunking_script.$return_value; // Get the chunked content from the context const response = await requestWithExponentialBackoff(() => getGptResponse(chunk)); return response.data.choices[0].text; // Adjust based on the actual structure of the response. } catch (error) { console.error("Error:", error); throw error; // or handle it accordingly } }, }); This code uses steps.chunking_script.$return_value as the input for the getGptResponse function and keeps the rest of the original code intact.

Thanks - I received this error:

TypeError
Cannot read properties of undefined (reading 'create')

DETAILS
    at null.getGptResponse (file:///tmp/__pdg__/dist/code/9228f12cffd051ae99cc29562b41107c62c621eb19fa31d2389ce76936fe6731/component.mjs:60:38)
    at null.null (file:///tmp/__pdg__/dist/code/9228f12cffd051ae99cc29562b41107c62c621eb19fa31d2389ce76936fe6731/component.mjs:91:66)
    at null.requestWithExponentialBackoff (file:///tmp/__pdg__/dist/code/9228f12cffd051ae99cc29562b41107c62c621eb19fa31d2389ce76936fe6731/component.mjs:74:24)
    at Object.run (file:///tmp/__pdg__/dist/code/9228f12cffd051ae99cc29562b41107c62c621eb19fa31d2389ce76936fe6731/component.mjs:91:30)
    at null.executeComponent (/var/task/launch_worker.js:229:53)
    at MessagePort.messageHandler (/var/task/launch_worker.js:726:28)

I apologize for the confusion. It seems there’s an issue with the openai package usage in the provided code. Since Pipedream doesn’t support the openai package directly, you’ll need to make an HTTP request to the OpenAI API using the axios package. Here’s the modified code: import { axios } from "@pipedream/platform"; export default defineComponent({ async run({ steps, $ }) { const apiKey = "OPENAI_API_KEY"; const apiUrl = "https://api.openai.com/v1/engines/davinci-codex/completions"; async function getGptResponse(chunk) { const promptText = ` ${chunk} Please analyze the above document to look for the below items. // rest of prompt `; // Make sure to include the entire prompt. const config = { method: "POST", url: apiUrl, headers: { "Content-Type": "application/json", "Authorization": `Bearer ${apiKey}`, }, data: { model: "gpt-4", prompt: promptText, temperature: 0.2, max_tokens: 600, // Add other parameters if needed }, }; return await axios($, config); } async function requestWithExponentialBackoff(requestFunction, maxRetries = 5) { let retries = 0; while (retries < maxRetries) { try { return await requestFunction(); } catch (error) { if (error.response && error.response.status === 429) { const delay = Math.pow(2, retries) * 1000; await new Promise((resolve) => setTimeout(resolve, delay)); retries++; } else { throw error; } } } throw new Error("Max retries reached."); } try { const chunk = steps.chunking_script.$return_value; const response = await requestWithExponentialBackoff(() => getGptResponse(chunk)); return response.data.choices[0].text; // Adjust based on the actual structure of the response. } catch (error) { console.error("Error:", error); throw error; // or handle it accordingly } }, }); This code uses the axios package to make a POST request to the