AshishHansdah Here is little code snippet that could shape this idea initially.
var apiKey = API_KEY;
// OpenAI API endpoint for the model you wish to use
var apiURL = 'https://api.openai.com/v1/chat/completions';
// Define the headers to authenticate your API request
var headers = {
"Authorization": "Bearer " + apiKey,
"Content-Type": "application/json"
};
// Define the data for your API request, including the model, prompt, and any other parameters
var payload = JSON.stringify({
"model": "gpt-3.5-turbo-0125", // Specify the model you want to use
"messages": [
{"role": "system", "content": props.instruction},
{"role": "user", "content": props.messageToGPT +"Here are list words "+ words}
],
"temperature": 0.8,
"max_tokens": 1600
});
// Define the options for the URL fetch call, including method, headers, and payload
var options = {
"method": "post",
"headers": headers,
"payload": payload,
"muteHttpExceptions": true // You might want to handle errors in production code
};
// Make the API request and log the response
try {
var response = UrlFetchApp.fetch(apiURL, options);
console.log("RESPONSE CODE: "+response.getResponseCode())
var jsonResponse = JSON.parse(response.getContentText());
// console.log(jsonResponse)
var textResponse = jsonResponse.choices[0].message.content;