an example from chatgpt
This commit is contained in:
parent
17a8798b1b
commit
0f6351ab29
|
@ -1,6 +1,7 @@
|
|||
build:
|
||||
# reset
|
||||
GO111MODULE="off" go build -v -x
|
||||
GO111MODULE="off" go build -v -x complicated.go
|
||||
# GO111MODULE="off" go build -v -x
|
||||
|
||||
deps:
|
||||
go get github.com/openai/gpt-3
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
package main
|
||||
|
||||
/*
|
||||
This was literally generated via chatgpt by asking it for a complicated golang example
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openai/gpt-3"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
client := gpt3.NewClient("API_KEY")
|
||||
|
||||
// Use the "text" engine to generate text based on the given prompt.
|
||||
res, err := client.Evaluate(gpt3.EvaluateOpts{
|
||||
Engine: "text",
|
||||
Prompt: "Once upon a time in a land far, far away...",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(res.Text)
|
||||
|
||||
// Use the "davinci" engine to generate code for a simple web server.
|
||||
res, err = client.Evaluate(gpt3.EvaluateOpts{
|
||||
Engine: "davinci",
|
||||
Prompt: "Write a simple web server in Go that responds with 'Hello, world!' to any request.",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(res.Text)
|
||||
|
||||
// Use the "curie" engine to generate a natural language response to a question.
|
||||
res, err = client.Evaluate(gpt3.EvaluateOpts{
|
||||
Engine: "curie",
|
||||
Prompt: "What is the capital of France?",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(res.Text)
|
||||
|
||||
// Use the "babbage" engine to generate a mathematical expression.
|
||||
res, err = client.Evaluate(gpt3.EvaluateOpts{
|
||||
Engine: "babbage",
|
||||
Prompt: "Generate a formula to calculate the volume of a sphere.",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(res.Text)
|
||||
|
||||
// Use the "ada" engine to generate a completion for a given prompt.
|
||||
res, err = client.Evaluate(gpt3.EvaluateOpts{
|
||||
Engine: "ada",
|
||||
Prompt: "The quick brown fox jumps over the lazy dog. The fox is very smart and knows how to...",
|
||||
MaxTokens: 128, // Limit the response to 128 tokens.
|
||||
Temperature: 0.5, // Use a higher temperature for more varied responses.
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(res.Text)
|
||||
|
||||
// Use the "ada" engine to generate multiple completions for a given prompt.
|
||||
completions, err := client.Completions(gpt3.CompletionsOpts{
|
||||
Engine: "ada",
|
||||
Prompt: "The quick brown fox jumps over the lazy dog. The fox is very smart and knows how to...",
|
||||
MaxTokens: 128, // Limit the response to 128 tokens.
|
||||
Temperature: 0.5, // Use a higher temperature for more varied responses.
|
||||
N: 3, // Generate 3 completions.
|
||||
Stop: "\n", // Stop the completions when a newline is encountered.
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i, c := range completions {
|
||||
log.Println("i, c =", i, c)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue