diff --git a/README.md b/README.md index 37efa22..f3374ad 100644 --- a/README.md +++ b/README.md @@ -47,16 +47,16 @@ For OpenAI, Azure OpenAI or OpenAI API compatible endpoint, you can use the foll ```shell export OPENAI_API_KEY= -export OPENAI_DEPLOYMENT_NAME= +export OPENAI_DEPLOYMENT_NAME= export OPENAI_ENDPOINT= ``` If `OPENAI_ENDPOINT` variable is set, then it will use the endpoint. Otherwise, it will use OpenAI API. -Azure OpenAI service does not allow certain characters, such as `.`, in the deployment name. Consequently, `kubectl-ai` will automatically replace `gpt-3.5-turbo` to `gpt-35-turbo` for Azure. However, if you use an Azure OpenAI deployment name completely different from the model name, you can set `AZURE_OPENAI_MAP` environment variable to map the model name to the Azure OpenAI deployment name. For example: +If you use an Azure OpenAI deployment name completely different from the model name, you can set `AZURE_OPENAI_MAP` environment variable to map the model name to the Azure OpenAI deployment name. For example: ```shell -export AZURE_OPENAI_MAP="gpt-3.5-turbo=my-deployment" +export AZURE_OPENAI_MAP="gpt-4o-mini=my-deployment" ``` ### Set up a local OpenAI API-compatible endpoint @@ -74,7 +74,7 @@ After setting up the environment like above, you can use `kubectl-ai` as usual. ### Flags and environment variables -- `--require-confirmation` flag or `REQUIRE_CONFIRMATION` environment varible can be set to prompt the user for confirmation before applying the manifest. Defaults to true. +- `--require-confirmation` flag or `REQUIRE_CONFIRMATION` environment variable can be set to prompt the user for confirmation before applying the manifest. Defaults to true. - `--temperature` flag or `TEMPERATURE` environment variable can be set between 0 and 1. Higher temperature will result in more creative completions. Lower temperature will result in more deterministic completions. Defaults to 0. diff --git a/cmd/cli/root.go b/cmd/cli/root.go index d4fca8b..a699060 100644 --- a/cmd/cli/root.go +++ b/cmd/cli/root.go @@ -28,10 +28,10 @@ var ( version = "dev" kubernetesConfigFlags = genericclioptions.NewConfigFlags(false) - openAIDeploymentName = flag.String("openai-deployment-name", env.GetOr("OPENAI_DEPLOYMENT_NAME", env.String, "gpt-3.5-turbo-0301"), "The deployment name used for the model in OpenAI service.") + openAIDeploymentName = flag.String("openai-deployment-name", env.GetOr("OPENAI_DEPLOYMENT_NAME", env.String, "gpt-4o-mini-2024-07-18"), "The deployment name used for the model in OpenAI service.") openAIAPIKey = flag.String("openai-api-key", env.GetOr("OPENAI_API_KEY", env.String, ""), "The API key for the OpenAI service. This is required.") openAIEndpoint = flag.String("openai-endpoint", env.GetOr("OPENAI_ENDPOINT", env.String, openaiAPIURLv1), "The endpoint for OpenAI service. Defaults to"+openaiAPIURLv1+". Set this to your Local AI endpoint or Azure OpenAI Service, if needed.") - azureModelMap = flag.StringToString("azure-openai-map", env.GetOr("AZURE_OPENAI_MAP", env.Map(env.String, "=", env.String, ""), map[string]string{}), "The mapping from OpenAI model to Azure OpenAI deployment. Defaults to empty map. Example format: gpt-3.5-turbo=my-deployment.") + azureModelMap = flag.StringToString("azure-openai-map", env.GetOr("AZURE_OPENAI_MAP", env.Map(env.String, "=", env.String, ""), map[string]string{}), "The mapping from OpenAI model to Azure OpenAI deployment. Defaults to empty map. Example format: gpt-4o-mini=my-deployment.") requireConfirmation = flag.Bool("require-confirmation", env.GetOr("REQUIRE_CONFIRMATION", strconv.ParseBool, true), "Whether to require confirmation before executing the command. Defaults to true.") temperature = flag.Float64("temperature", env.GetOr("TEMPERATURE", env.WithBitSize(strconv.ParseFloat, 64), 0.0), "The temperature to use for the model. Range is between 0 and 1. Set closer to 0 if your want output to be more deterministic but less creative. Defaults to 0.0.") raw = flag.Bool("raw", false, "Prints the raw YAML output immediately. Defaults to false.")