diff --git a/go.mod b/go.mod index 07c0f4e..8edde2c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/verdverm/chatgpt go 1.18 require ( - github.com/sashabaranov/go-gpt3 v1.0.1 + github.com/sashabaranov/go-openai v1.5.0 github.com/spf13/cobra v1.6.1 ) diff --git a/go.sum b/go.sum index 78ec907..1774fa5 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sashabaranov/go-gpt3 v1.0.1 h1:KHwY4uroFlX1qI1Hui7d31ZI6uzbNGL9zAkh1FkfhuM= -github.com/sashabaranov/go-gpt3 v1.0.1/go.mod h1:BIZdbwdzxZbCrcKGMGH6u2eyGe1xFuX9Anmh3tCP8lQ= +github.com/sashabaranov/go-openai v1.5.0 h1:4Gr/7g/KtVzW0ddn7TC2aUlyzvhZBIM+qRZ6Ae2kMa0= +github.com/sashabaranov/go-openai v1.5.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= diff --git a/main.go b/main.go index 23c4da4..929be47 100644 --- a/main.go +++ b/main.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - gpt3 "github.com/sashabaranov/go-gpt3" + gpt3 "github.com/sashabaranov/go-openai" "github.com/spf13/cobra" ) @@ -52,14 +52,18 @@ Examples: chatgpt -c ... # model options (https://platform.openai.com/docs/api-reference/completions/create) - chatgpt -T 4096 # set max tokens in reponse [0,4096] - chatgpt -C # clean whitespace before sending - chatgpt -E # echo back the prompt, useful for vim coding - chatgpt --temp # set the temperature param [0.0,2.0] - chatgpt --topp # set the TopP param [0.0,1.0] - chatgpt --pres # set the Presence Penalty [-2.0,2.0] - chatgpt --freq # set the Frequency Penalty [-2.0,2.0] - + chatgpt -T 4096 # set max tokens in reponse [0,4096] + chatgpt -C # clean whitespace before sending + chatgpt -E # echo back the prompt, useful for vim coding + chatgpt --temp # set the temperature param [0.0,2.0] + chatgpt --topp # set the TopP param [0.0,1.0] + chatgpt --pres # set the Presence Penalty [-2.0,2.0] + chatgpt --freq # set the Frequency Penalty [-2.0,2.0] + + # change model selection, available models are listed here: + # https://pkg.go.dev/github.com/sashabaranov/go-openai#Client.ListModels + chatgpt -m text-davinci-003 # set the model to text-davinci-003 (the default) + chatgpt -m text-ada-001 # set the model to text-ada-001 ` @@ -72,6 +76,7 @@ var interactiveHelp = `starting interactive session... 'topp' set the TopP param [0.0,1.0] 'pres' set the Presence Penalty [-2.0,2.0] 'freq' set the Frequency Penalty [-2.0,2.0] + 'model' to change the selected model ` //go:embed pretexts/* @@ -99,36 +104,6 @@ var PresencePenalty float64 var FrequencyPenalty float64 var Model string -// checkModel verifies that the selected mode id is one that go-gpt3 knows -// about, producing an error if not. -// -// TODO: in future, this could probably leverage gpt3.Client.ListModels() to -// support the user's fine-tuned models. -func checkModel(m string) error { - knownModels := []string{ - gpt3.GPT3TextDavinci003, - gpt3.GPT3TextDavinci002, - gpt3.GPT3TextCurie001, - gpt3.GPT3TextBabbage001, - gpt3.GPT3TextAda001, - gpt3.GPT3TextDavinci001, - gpt3.GPT3DavinciInstructBeta, - gpt3.GPT3Davinci, - gpt3.GPT3CurieInstructBeta, - gpt3.GPT3Curie, - gpt3.GPT3Ada, - gpt3.GPT3Babbage, - } - - for _, v := range knownModels { - if m == v { - return nil - } - } - - return fmt.Errorf("unknown model '%s', expected one of: %s", m, strings.Join(knownModels, ", ")) -} - func GetCompletionResponse(client *gpt3.Client, ctx context.Context, question string) ([]string, error) { if CleanPrompt { question = strings.ReplaceAll(question, "\n", " ") @@ -139,11 +114,6 @@ func GetCompletionResponse(client *gpt3.Client, ctx context.Context, question st question += "\n" } - err := checkModel(Model) - if err != nil { - return nil, err - } - req := gpt3.CompletionRequest{ Model: Model, MaxTokens: MaxTokens, @@ -173,11 +143,6 @@ func GetEditsResponse(client *gpt3.Client, ctx context.Context, input, instructi input = strings.ReplaceAll(input, " ", " ") } - err := checkModel(Model) - if err != nil { - return nil, err - } - m := Model req := gpt3.EditsRequest{ Model: &m, @@ -441,6 +406,16 @@ func RunPrompt(client *gpt3.Client) error { } continue + case "model": + if len(parts) == 1 { + fmt.Println("model is set to", Model) + continue + } + + Model = parts[1] + fmt.Println("model is now", Model) + continue + case "tokens": if len(parts) == 1 { fmt.Println("tokens is set to", MaxTokens)