|
|
|
@ -54,6 +54,7 @@ Examples:
|
|
|
|
|
# model options (https://platform.openai.com/docs/api-reference/completions/create)
|
|
|
|
|
chatgpt -T 4096 # set max tokens in reponse [0,4096]
|
|
|
|
|
chatgpt -C # clean whitespace before sending
|
|
|
|
|
chatgpt -E # echo back the prompt, useful for vim coding
|
|
|
|
|
chatgpt --temp # set the temperature param [0.0,2.0]
|
|
|
|
|
chatgpt --topp # set the TopP param [0.0,1.0]
|
|
|
|
|
chatgpt --pres # set the Presence Penalty [-2.0,2.0]
|
|
|
|
@ -72,6 +73,7 @@ var interactiveHelp = `starting interactive session...
|
|
|
|
|
'pres' set the Presence Penalty [-2.0,2.0]
|
|
|
|
|
'freq' set the Frequency Penalty [-2.0,2.0]
|
|
|
|
|
`
|
|
|
|
|
|
|
|
|
|
//go:embed pretexts/*
|
|
|
|
|
var predefined embed.FS
|
|
|
|
|
|
|
|
|
@ -90,6 +92,7 @@ var PromptText string
|
|
|
|
|
// chatgpt vars
|
|
|
|
|
var MaxTokens int
|
|
|
|
|
var Count int
|
|
|
|
|
var Echo bool
|
|
|
|
|
var Temp float64
|
|
|
|
|
var TopP float64
|
|
|
|
|
var PresencePenalty float64
|
|
|
|
@ -100,10 +103,16 @@ func GetCompletionResponse(client *gpt3.Client, ctx context.Context, question st
|
|
|
|
|
question = strings.ReplaceAll(question, "\n", " ")
|
|
|
|
|
question = strings.ReplaceAll(question, " ", " ")
|
|
|
|
|
}
|
|
|
|
|
// insert newline at end to prevent completion of question
|
|
|
|
|
if !strings.HasSuffix(question, "\n") {
|
|
|
|
|
question += "\n"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
req := gpt3.CompletionRequest{
|
|
|
|
|
Model: gpt3.GPT3TextDavinci003,
|
|
|
|
|
MaxTokens: MaxTokens,
|
|
|
|
|
Prompt: question,
|
|
|
|
|
Echo: Echo,
|
|
|
|
|
N: Count,
|
|
|
|
|
Temperature: float32(Temp),
|
|
|
|
|
TopP: float32(TopP),
|
|
|
|
@ -153,10 +162,16 @@ func GetCodeResponse(client *gpt3.Client, ctx context.Context, question string)
|
|
|
|
|
question = strings.ReplaceAll(question, "\n", " ")
|
|
|
|
|
question = strings.ReplaceAll(question, " ", " ")
|
|
|
|
|
}
|
|
|
|
|
// insert newline at end to prevent completion of question
|
|
|
|
|
if !strings.HasSuffix(question, "\n") {
|
|
|
|
|
question += "\n"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
req := gpt3.CompletionRequest{
|
|
|
|
|
Model: gpt3.CodexCodeDavinci002,
|
|
|
|
|
MaxTokens: MaxTokens,
|
|
|
|
|
Prompt: question,
|
|
|
|
|
Echo: Echo,
|
|
|
|
|
N: Count,
|
|
|
|
|
Temperature: float32(Temp),
|
|
|
|
|
TopP: float32(TopP),
|
|
|
|
@ -341,12 +356,12 @@ func main() {
|
|
|
|
|
// params related
|
|
|
|
|
rootCmd.Flags().IntVarP(&MaxTokens, "tokens", "T", 1024, "set the MaxTokens to generate per response")
|
|
|
|
|
rootCmd.Flags().IntVarP(&Count, "count", "C", 1, "set the number of response options to create")
|
|
|
|
|
rootCmd.Flags().BoolVarP(&Echo, "echo", "E", false, "Echo back the prompt, useful for vim coding")
|
|
|
|
|
rootCmd.Flags().Float64VarP(&Temp, "temp", "", 1.0, "set the temperature parameter")
|
|
|
|
|
rootCmd.Flags().Float64VarP(&TopP, "topp", "", 1.0, "set the TopP parameter")
|
|
|
|
|
rootCmd.Flags().Float64VarP(&PresencePenalty, "pres", "", 0.0, "set the Presence Penalty parameter")
|
|
|
|
|
rootCmd.Flags().Float64VarP(&FrequencyPenalty, "freq", "", 0.0, "set the Frequency Penalty parameter")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// run the command
|
|
|
|
|
rootCmd.Execute()
|
|
|
|
|
}
|
|
|
|
@ -464,7 +479,6 @@ func RunPrompt(client *gpt3.Client) error {
|
|
|
|
|
FrequencyPenalty = f
|
|
|
|
|
fmt.Println("freq is now", FrequencyPenalty)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
// add the question to the existing prompt text, to keep context
|
|
|
|
|
PromptText += "\n> " + question
|
|
|
|
|