mirror of
https://github.com/openai/openai-cookbook
synced 2024-11-08 01:10:29 +00:00
use ChatGPT API in nextjs example
This commit is contained in:
parent
a77ad9446c
commit
295b10461a
@ -55,6 +55,8 @@ function FileQandAArea(props: FileQandAAreaProps) {
|
|||||||
|
|
||||||
if (searchResultsResponse.status === 200) {
|
if (searchResultsResponse.status === 200) {
|
||||||
results = searchResultsResponse.data.searchResults;
|
results = searchResultsResponse.data.searchResults;
|
||||||
|
} else if (searchResultsResponse.status === 500) {
|
||||||
|
setAnswerError("Internal server error. Please try again later.");
|
||||||
} else {
|
} else {
|
||||||
setAnswerError("Sorry, something went wrong!");
|
setAnswerError("Sorry, something went wrong!");
|
||||||
}
|
}
|
||||||
@ -74,6 +76,13 @@ function FileQandAArea(props: FileQandAAreaProps) {
|
|||||||
fileChunks: results,
|
fileChunks: results,
|
||||||
}),
|
}),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (res.status === 500) {
|
||||||
|
setAnswerError("Internal server error. Please try again later.");
|
||||||
|
setAnswerLoading(false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const reader = res.body!.getReader();
|
const reader = res.body!.getReader();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -40,8 +40,6 @@ export default async function handler(
|
|||||||
.join("\n")
|
.join("\n")
|
||||||
.slice(0, MAX_FILES_LENGTH);
|
.slice(0, MAX_FILES_LENGTH);
|
||||||
|
|
||||||
console.log(filesString);
|
|
||||||
|
|
||||||
const prompt =
|
const prompt =
|
||||||
`Given a question, try to answer it using the content of the file extracts below, and if you cannot answer, or find a relevant file, just output \"I couldn't find the answer to that question in your files.\".\n\n` +
|
`Given a question, try to answer it using the content of the file extracts below, and if you cannot answer, or find a relevant file, just output \"I couldn't find the answer to that question in your files.\".\n\n` +
|
||||||
`If the answer is not contained in the files or if there are no file extracts, respond with \"I couldn't find the answer to that question in your files.\" If the question is not actually a question, respond with \"That's not a valid question.\"\n\n` +
|
`If the answer is not contained in the files or if there are no file extracts, respond with \"I couldn't find the answer to that question in your files.\" If the question is not actually a question, respond with \"That's not a valid question.\"\n\n` +
|
||||||
@ -53,7 +51,6 @@ export default async function handler(
|
|||||||
|
|
||||||
const stream = completionStream({
|
const stream = completionStream({
|
||||||
prompt,
|
prompt,
|
||||||
model: "text-davinci-003",
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Set the response headers for streaming
|
// Set the response headers for streaming
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
import { IncomingMessage } from "http";
|
import { IncomingMessage } from "http";
|
||||||
import {
|
import {
|
||||||
|
ChatCompletionRequestMessageRoleEnum,
|
||||||
Configuration,
|
Configuration,
|
||||||
|
CreateChatCompletionResponse,
|
||||||
CreateCompletionRequest,
|
CreateCompletionRequest,
|
||||||
CreateCompletionResponse,
|
|
||||||
OpenAIApi,
|
OpenAIApi,
|
||||||
} from "openai";
|
} from "openai";
|
||||||
|
|
||||||
@ -30,24 +31,30 @@ type EmbeddingOptions = {
|
|||||||
export async function completion({
|
export async function completion({
|
||||||
prompt,
|
prompt,
|
||||||
fallback,
|
fallback,
|
||||||
max_tokens = 800,
|
max_tokens,
|
||||||
temperature = 0,
|
temperature = 0,
|
||||||
model = "text-davinci-003",
|
model = "gpt-3.5-turbo", // use gpt-4 for better results
|
||||||
...otherOptions
|
|
||||||
}: CompletionOptions) {
|
}: CompletionOptions) {
|
||||||
try {
|
try {
|
||||||
const result = await openai.createCompletion({
|
// Note: this is not the proper way to use the ChatGPT conversational format, but it works for now
|
||||||
prompt,
|
const messages = [
|
||||||
max_tokens,
|
{
|
||||||
temperature,
|
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||||
|
content: prompt ?? "",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await openai.createChatCompletion({
|
||||||
model,
|
model,
|
||||||
...otherOptions,
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: max_tokens ?? 800,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!result.data.choices[0].text) {
|
if (!result.data.choices[0].message) {
|
||||||
throw new Error("No text returned from the completions endpoint.");
|
throw new Error("No text returned from completions endpoint");
|
||||||
}
|
}
|
||||||
return result.data.choices[0].text;
|
return result.data.choices[0].message.content;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (fallback) return fallback;
|
if (fallback) return fallback;
|
||||||
else throw error;
|
else throw error;
|
||||||
@ -59,33 +66,65 @@ export async function* completionStream({
|
|||||||
fallback,
|
fallback,
|
||||||
max_tokens = 800,
|
max_tokens = 800,
|
||||||
temperature = 0,
|
temperature = 0,
|
||||||
model = "text-davinci-003",
|
model = "gpt-3.5-turbo", // use gpt-4 for better results
|
||||||
}: CompletionOptions) {
|
}: CompletionOptions) {
|
||||||
try {
|
try {
|
||||||
const result = await openai.createCompletion(
|
// Note: this is not the proper way to use the ChatGPT conversational format, but it works for now
|
||||||
|
const messages = [
|
||||||
|
{
|
||||||
|
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||||
|
content: prompt ?? "",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await openai.createChatCompletion(
|
||||||
{
|
{
|
||||||
prompt,
|
|
||||||
max_tokens,
|
|
||||||
temperature,
|
|
||||||
model,
|
model,
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: max_tokens ?? 800,
|
||||||
stream: true,
|
stream: true,
|
||||||
},
|
},
|
||||||
{ responseType: "stream" }
|
{
|
||||||
|
responseType: "stream",
|
||||||
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
const stream = result.data as any as IncomingMessage;
|
const stream = result.data as any as IncomingMessage;
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
let buffer = "";
|
||||||
const line = chunk.toString().trim();
|
const textDecoder = new TextDecoder();
|
||||||
const message = line.split("data: ")[1];
|
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
buffer += textDecoder.decode(chunk, { stream: true });
|
||||||
|
const lines = buffer.split("\n");
|
||||||
|
|
||||||
|
// Check if the last line is complete
|
||||||
|
if (buffer.endsWith("\n")) {
|
||||||
|
buffer = "";
|
||||||
|
} else {
|
||||||
|
buffer = lines.pop() || "";
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
const message = line.trim().split("data: ")[1];
|
||||||
if (message === "[DONE]") {
|
if (message === "[DONE]") {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
const data = JSON.parse(message) as CreateCompletionResponse;
|
// Check if the message is not undefined and a valid JSON string
|
||||||
|
if (message) {
|
||||||
yield data.choices[0].text;
|
try {
|
||||||
|
const data = JSON.parse(message) as CreateChatCompletionResponse;
|
||||||
|
// @ts-ignore
|
||||||
|
if (data.choices[0].delta?.content) {
|
||||||
|
// @ts-ignore
|
||||||
|
yield data.choices[0].delta?.content;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error parsing JSON message:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (fallback) yield fallback;
|
if (fallback) yield fallback;
|
||||||
|
Loading…
Reference in New Issue
Block a user