@ -1,8 +1,9 @@
import { IncomingMessage } from "http" ;
import {
ChatCompletionRequestMessageRoleEnum ,
Configuration ,
CreateChatCompletionResponse ,
CreateCompletionRequest ,
CreateCompletionResponse ,
OpenAIApi ,
} from "openai" ;
@ -30,24 +31,30 @@ type EmbeddingOptions = {
export async function completion ( {
prompt ,
fallback ,
max_tokens = 800 ,
max_tokens ,
temperature = 0 ,
model = "text-davinci-003" ,
. . . otherOptions
model = "gpt-3.5-turbo" , // use gpt-4 for better results
} : CompletionOptions ) {
try {
const result = await openai . createCompletion ( {
prompt ,
max_tokens ,
temperature ,
// Note: this is not the proper way to use the ChatGPT conversational format, but it works for now
const messages = [
{
role : ChatCompletionRequestMessageRoleEnum.System ,
content : prompt ? ? "" ,
} ,
] ;
const result = await openai . createChatCompletion ( {
model ,
. . . otherOptions ,
messages ,
temperature ,
max_tokens : max_tokens ? ? 800 ,
} ) ;
if ( ! result . data . choices [ 0 ] . text ) {
throw new Error ( "No text returned from the completions endpoint. ") ;
if ( ! result . data . choices [ 0 ] . message ) {
throw new Error ( "No text returned from completions endpoint") ;
}
return result . data . choices [ 0 ] . tex t;
return result . data . choices [ 0 ] . message. conten t;
} catch ( error ) {
if ( fallback ) return fallback ;
else throw error ;
@ -59,33 +66,65 @@ export async function* completionStream({
fallback ,
max_tokens = 800 ,
temperature = 0 ,
model = " text-davinci-003",
model = " gpt-3.5-turbo", // use gpt-4 for better results
} : CompletionOptions ) {
try {
const result = await openai . createCompletion (
// Note: this is not the proper way to use the ChatGPT conversational format, but it works for now
const messages = [
{
role : ChatCompletionRequestMessageRoleEnum.System ,
content : prompt ? ? "" ,
} ,
] ;
const result = await openai . createChatCompletion (
{
prompt ,
max_tokens ,
temperature ,
model ,
messages ,
temperature ,
max_tokens : max_tokens ? ? 800 ,
stream : true ,
} ,
{ responseType : "stream" }
{
responseType : "stream" ,
}
) ;
const stream = result . data as any as IncomingMessage ;
for await ( const chunk of stream ) {
const line = chunk . toString ( ) . trim ( ) ;
const message = line . split ( "data: " ) [ 1 ] ;
let buffer = "" ;
const textDecoder = new TextDecoder ( ) ;
if ( message === "[DONE]" ) {
break ;
for await ( const chunk of stream ) {
buffer += textDecoder . decode ( chunk , { stream : true } ) ;
const lines = buffer . split ( "\n" ) ;
// Check if the last line is complete
if ( buffer . endsWith ( "\n" ) ) {
buffer = "" ;
} else {
buffer = lines . pop ( ) || "" ;
}
const data = JSON . parse ( message ) as CreateCompletionResponse ;
yield data . choices [ 0 ] . text ;
for ( const line of lines ) {
const message = line . trim ( ) . split ( "data: " ) [ 1 ] ;
if ( message === "[DONE]" ) {
break ;
}
// Check if the message is not undefined and a valid JSON string
if ( message ) {
try {
const data = JSON . parse ( message ) as CreateChatCompletionResponse ;
// @ts-ignore
if ( data . choices [ 0 ] . delta ? . content ) {
// @ts-ignore
yield data . choices [ 0 ] . delta ? . content ;
}
} catch ( error ) {
console . error ( "Error parsing JSON message:" , error ) ;
}
}
}
}
} catch ( error ) {
if ( fallback ) yield fallback ;