diff --git a/.golangci.yaml b/.golangci.yaml index 0921930ff9..229d73d143 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -23,23 +23,8 @@ linters: - "-ST1020" - "-ST1021" - "-ST1022" - ##### TODO: fix and enable these - # 4 occurrences. - # Use fmt.Fprintf(x, ...) instead of x.Write(fmt.Sprintf(...)) https://staticcheck.dev/docs/checks#QF1012 - - "-QF1012" - # 3 occurrences. - # Apply De Morgan’s law https://staticcheck.dev/docs/checks#QF1001 - - "-QF1001" - # 9 occurrences. - # Convert if/else-if chain to tagged switch https://staticcheck.dev/docs/checks#QF1003 - - "-QF1003" - # 1 occurrence. - # could omit type *os.File from declaration; it will be inferred from the right-hand side - - "-QF1011" - ##### These have been vetted to be disabled. - # 19 occurrences. Omit embedded fields from selector expression https://staticcheck.dev/docs/checks#QF1008 - # Usefulness is questionable. - "-QF1008" + revive: enable-all-rules: true rules: @@ -150,23 +135,22 @@ linters: # - yodaStyleExpr # - typeUnparen - ##### TODO: fix and enable these # We enabled these and we pass - nilValReturn - # - weakCond # pkg/minikube/config/profile.go:61:9: weakCond: suspicious `cc.Nodes != nil && cc.Nodes[0].Name == node.Name`; nil check may not be enough, check for len (gocritic) + - weakCond - indexAlloc - rangeExprCopy - boolExprSimplify - commentedOutImport - # - docStub # pkg/minikube/tunnel/kic/service_tunnel.go:51:1: docStub: silencing go lint doc-comment warnings is unadvised (gocritic) + - docStub - emptyFallthrough - hexLiteral - typeAssertChain - unlabelStmt - # - builtinShadow # cmd/minikube/cmd/delete.go:89:7: builtinShadow: shadowing of predeclared identifier: error (gocritic) - # - importShadow # pkg/storage/storage_provisioner.go:60:2: importShadow: shadow of imported package 'path' (gocritic) + - builtinShadow + - importShadow - initClause - # - nestingReduce # pkg/minikube/tunnel/registry.go:94:3: nestingReduce: invert if cond, replace body with `continue`, move old body after the statement (gocritic) + - nestingReduce - unnecessaryBlock exclusions: @@ -181,7 +165,3 @@ linters: - path: '(.+)\.go$' text: "Error return value of `.*` is not checked" linter: errcheck - # This code is doubtful and I don't understand it. Location: Line 456 - - path: 'cmd/minikube/cmd/docker-env.go' - text: "useless-break: useless break in case clause" - linter: revive diff --git a/Makefile b/Makefile index b44f1767e0..8455fb517e 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ KIC_VERSION ?= $(shell grep -E "Version =" pkg/drivers/kic/types.go | cut -d \" HUGO_VERSION ?= $(shell grep -E "HUGO_VERSION = \"" netlify.toml | cut -d \" -f2) # Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions -ISO_VERSION ?= v1.36.0 +ISO_VERSION ?= v1.36.0-1749153077-20895 # Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta DEB_VERSION ?= $(subst -,~,$(RAW_VERSION)) @@ -103,7 +103,7 @@ $(shell mkdir -p $(BUILD_DIR)) CURRENT_GIT_BRANCH ?= $(shell git branch | grep \* | cut -d ' ' -f2) # Use system python if it exists, otherwise use Docker. -PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube:Z -w /minikube python python") +PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube -w /minikube python python") BUILD_OS := $(shell uname -s) SHA512SUM=$(shell command -v sha512sum || echo "shasum -a 512") @@ -189,7 +189,7 @@ endef # $(call DOCKER, image, command) define DOCKER - docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 --user $(shell id -u):$(shell id -g) -w /app -v $(PWD):/app:Z -v $(GOPATH):/go --init $(1) /bin/bash -c '$(2)' + docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 --user $(shell id -u):$(shell id -g) -w /app -v $(PWD):/app -v $(GOPATH):/go --init $(1) /bin/bash -c '$(2)' endef ifeq ($(BUILD_IN_DOCKER),y) @@ -341,13 +341,13 @@ out/minikube-%.iso: $(shell find "deploy/iso/minikube-iso" -type f) ifeq ($(IN_DOCKER),1) $(MAKE) minikube-iso-$* else - docker run --rm --workdir /mnt --volume $(CURDIR):/mnt:Z $(ISO_DOCKER_EXTRA_ARGS) \ + docker run --rm --workdir /mnt --volume $(CURDIR):/mnt $(ISO_DOCKER_EXTRA_ARGS) \ --user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \ $(ISO_BUILD_IMAGE) /bin/bash -lc '/usr/bin/make minikube-iso-$*' endif iso_in_docker: - docker run -it --rm --workdir /mnt --volume $(CURDIR):/mnt:Z $(ISO_DOCKER_EXTRA_ARGS) \ + docker run -it --rm --workdir /mnt --volume $(CURDIR):/mnt $(ISO_DOCKER_EXTRA_ARGS) \ --user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \ $(ISO_BUILD_IMAGE) /bin/bash @@ -523,7 +523,7 @@ out/linters/golangci-lint-$(GOLINT_VERSION): .PHONY: lint ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) lint: - docker run --rm -v `pwd`:/app:Z -w /app golangci/golangci-lint:$(GOLINT_VERSION) \ + docker run --rm -v `pwd`:/app -w /app golangci/golangci-lint:$(GOLINT_VERSION) \ golangci-lint run ${GOLINT_OPTIONS} ./..." # --skip-dirs "cmd/drivers/kvm|cmd/drivers/hyperkit|pkg/drivers/kvm|pkg/drivers/hyperkit" # The "--skip-dirs" parameter is no longer supported in the V2 version. If you need to skip the directory, @@ -657,7 +657,7 @@ out/docker-machine-driver-hyperkit: ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 \ --user $(shell id -u):$(shell id -g) -w /app \ - -v $(PWD):/app:Z -v $(GOPATH):/go:Z --init --entrypoint "" \ + -v $(PWD):/app -v $(GOPATH):/go --init --entrypoint "" \ $(HYPERKIT_BUILD_IMAGE) /bin/bash -c 'CC=o64-clang CXX=o64-clang++ /usr/bin/make $@' else $(if $(quiet),@echo " GO $@") diff --git a/OWNERS b/OWNERS index d436b77c90..50b5edab7b 100644 --- a/OWNERS +++ b/OWNERS @@ -3,7 +3,6 @@ reviewers: - medyagh - prezha - - spowelljr - comradeprogrammer approvers: - medyagh diff --git a/cmd/minikube/cmd/completion.go b/cmd/minikube/cmd/completion.go index ab8221b215..b85d4e9a59 100644 --- a/cmd/minikube/cmd/completion.go +++ b/cmd/minikube/cmd/completion.go @@ -162,7 +162,7 @@ func GenerateBashCompletion(w io.Writer, cmd *cobra.Command) error { } // GenerateZshCompletion generates the completion for the zsh shell -func GenerateZshCompletion(out io.Writer, cmd *cobra.Command) error { +func GenerateZshCompletion(w io.Writer, cmd *cobra.Command) error { zshAutoloadTag := `#compdef minikube ` @@ -300,17 +300,17 @@ __minikube_convert_bash_to_zsh() { <<'BASH_COMPLETION_EOF' ` - _, err := out.Write([]byte(zshAutoloadTag)) + _, err := w.Write([]byte(zshAutoloadTag)) if err != nil { return err } - _, err = out.Write([]byte(boilerPlate)) + _, err = w.Write([]byte(boilerPlate)) if err != nil { return err } - _, err = out.Write([]byte(zshInitialization)) + _, err = w.Write([]byte(zshInitialization)) if err != nil { return err } @@ -320,7 +320,7 @@ __minikube_convert_bash_to_zsh() { if err != nil { return errors.Wrap(err, "Error generating zsh completion") } - _, err = out.Write(buf.Bytes()) + _, err = w.Write(buf.Bytes()) if err != nil { return err } @@ -330,7 +330,7 @@ BASH_COMPLETION_EOF } __minikube_bash_source <(__minikube_convert_bash_to_zsh) ` - _, err = out.Write([]byte(zshTail)) + _, err = w.Write([]byte(zshTail)) if err != nil { return err } diff --git a/cmd/minikube/cmd/config/configure.go b/cmd/minikube/cmd/config/configure.go index 93bcbdfa2f..045aafca55 100644 --- a/cmd/minikube/cmd/config/configure.go +++ b/cmd/minikube/cmd/config/configure.go @@ -133,7 +133,7 @@ func loadAddonConfigFile(addon, configFilePath string) (ac *addonConfig) { type configFile struct { Addons addonConfig `json:"addons"` } - var config configFile + var cf configFile if configFilePath != "" { out.Ln("Reading %s configs from %s", addon, configFilePath) @@ -150,14 +150,14 @@ func loadAddonConfigFile(addon, configFilePath string) (ac *addonConfig) { fmt.Sprintf("error opening config file: %s", configFilePath)) } - if err = json.Unmarshal(confData, &config); err != nil { + if err = json.Unmarshal(confData, &cf); err != nil { // err = errors2.Wrapf(err, "error reading config file (%s)", configFilePath) klog.Errorf("error reading config file (%s): %v", configFilePath, err) exit.Message(reason.Kind{ExitCode: reason.ExProgramConfig, Advice: "provide a valid config file"}, fmt.Sprintf("error reading config file: %v", err)) } - return &config.Addons + return &cf.Addons } return nil } diff --git a/cmd/minikube/cmd/config/configure_registry_creds.go b/cmd/minikube/cmd/config/configure_registry_creds.go index e6ddaf6230..8a6da86eab 100644 --- a/cmd/minikube/cmd/config/configure_registry_creds.go +++ b/cmd/minikube/cmd/config/configure_registry_creds.go @@ -92,7 +92,9 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) { regCredsConf := &ac.RegistryCreds awsEcrAction := regCredsConf.EnableAWSEcr // regCredsConf. "enableAWSEcr") - if awsEcrAction == "prompt" || awsEcrAction == "" { + + switch awsEcrAction { + case "prompt", "": enableAWSECR := AskForYesNoConfirmation("\nDo you want to enable AWS Elastic Container Registry?", posResponses, negResponses) if enableAWSECR { awsAccessID = AskForStaticValue("-- Enter AWS Access Key ID: ") @@ -102,7 +104,7 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) { awsAccount = AskForStaticValue("-- Enter 12 digit AWS Account ID (Comma separated list): ") awsRole = AskForStaticValueOptional("-- (Optional) Enter ARN of AWS role to assume: ") } - } else if awsEcrAction == "enable" { + case "enable": out.Ln("Loading AWS ECR configs from: %s", addonConfigFile) // Then read the configs awsAccessID = regCredsConf.EcrConfigs.AccessID @@ -111,15 +113,17 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) { awsRegion = regCredsConf.EcrConfigs.Region awsAccount = regCredsConf.EcrConfigs.Account awsRole = regCredsConf.EcrConfigs.Role - } else if awsEcrAction == "disable" { + case "disable": out.Ln("Ignoring AWS ECR configs") - } else { + default: out.Ln("Disabling AWS ECR. Invalid value for enableAWSEcr (%s). Must be one of 'disable', 'enable' or 'prompt'", awsEcrAction) } gcrPath := "" gcrAction := regCredsConf.EnableGCR - if gcrAction == "prompt" || gcrAction == "" { + + switch gcrAction { + case "prompt", "": enableGCR := AskForYesNoConfirmation("\nDo you want to enable Google Container Registry?", posResponses, negResponses) if enableGCR { gcrPath = AskForStaticValue("-- Enter path to credentials (e.g. /home/user/.config/gcloud/application_default_credentials.json):") @@ -129,14 +133,14 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) { gcrURL = AskForStaticValue("-- Enter GCR URL (e.g. https://asia.gcr.io):") } } - } else if gcrAction == "enable" { + case "enable": out.Ln("Loading GCR configs from: %s", addonConfigFile) // Then read the configs gcrPath = regCredsConf.GcrConfigs.GcrPath gcrURL = regCredsConf.GcrConfigs.GcrURL - } else if gcrAction == "disable" { + case "disable": out.Ln("Ignoring GCR configs") - } else { + default: out.Ln("Disabling GCR. Invalid value for enableGCR (%s). Must be one of 'disable', 'enable' or 'prompt'", gcrAction) } @@ -152,40 +156,44 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) { } dockerRegistryAction := regCredsConf.EnableDockerRegistry - if dockerRegistryAction == "prompt" || dockerRegistryAction == "" { + + switch dockerRegistryAction { + case "prompt", "": enableDR := AskForYesNoConfirmation("\nDo you want to enable Docker Registry?", posResponses, negResponses) if enableDR { dockerServer = AskForStaticValue("-- Enter docker registry server url: ") dockerUser = AskForStaticValue("-- Enter docker registry username: ") dockerPass = AskForPasswordValue("-- Enter docker registry password: ") } - } else if dockerRegistryAction == "enable" { + case "enable": out.Ln("Loading Docker Registry configs from: %s", addonConfigFile) dockerServer = regCredsConf.DockerConfigs.DockerServer dockerUser = regCredsConf.DockerConfigs.DockerUser dockerPass = regCredsConf.DockerConfigs.DockerPass - } else if dockerRegistryAction == "disable" { + case "disable": out.Ln("Ignoring Docker Registry configs") - } else { + default: out.Ln("Disabling Docker Registry. Invalid value for enableDockerRegistry (%s). Must be one of 'disable', 'enable' or 'prompt'", dockerRegistryAction) } acrAction := regCredsConf.EnableACR - if acrAction == "prompt" || acrAction == "" { + + switch acrAction { + case "prompt", "": enableACR := AskForYesNoConfirmation("\nDo you want to enable Azure Container Registry?", posResponses, negResponses) if enableACR { acrURL = AskForStaticValue("-- Enter Azure Container Registry (ACR) URL: ") acrClientID = AskForStaticValue("-- Enter client ID (service principal ID) to access ACR: ") acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ") } - } else if acrAction == "enable" { + case "enable": out.Ln("Loading ACR configs from: ", addonConfigFile) acrURL = regCredsConf.AcrConfigs.AcrURL acrClientID = regCredsConf.AcrConfigs.AcrClientID acrPassword = regCredsConf.AcrConfigs.AcrPassword - } else if acrAction == "disable" { + case "disable": out.Ln("Ignoring ACR configs") - } else { + default: out.Stringf("Disabling ACR. Invalid value for enableACR (%s). Must be one of 'disable', 'enable' or 'prompt'", acrAction) } diff --git a/cmd/minikube/cmd/config/kubernetes_version.go b/cmd/minikube/cmd/config/kubernetes_version.go index 4f70266324..f58a7844d6 100644 --- a/cmd/minikube/cmd/config/kubernetes_version.go +++ b/cmd/minikube/cmd/config/kubernetes_version.go @@ -20,7 +20,7 @@ import ( "context" "net/http" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "golang.org/x/mod/semver" "k8s.io/minikube/pkg/minikube/constants" ) diff --git a/cmd/minikube/cmd/config/profile_list.go b/cmd/minikube/cmd/config/profile_list.go index 081b026995..30bf874460 100644 --- a/cmd/minikube/cmd/config/profile_list.go +++ b/cmd/minikube/cmd/config/profile_list.go @@ -40,8 +40,11 @@ import ( "k8s.io/klog/v2" ) -var profileOutput string -var isLight bool +var ( + profileOutput string + isLight bool + isDetailed bool +) var profileListCmd = &cobra.Command{ Use: "list", @@ -130,7 +133,13 @@ func profileStatus(p *config.Profile, api libmachine.API) cluster.State { func renderProfilesTable(ps [][]string) { table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"Profile", "VM Driver", "Runtime", "IP", "Port", "Version", "Status", "Nodes", "Active Profile", "Active Kubecontext"}) + if isDetailed { + table.SetHeader([]string{"Profile", "Driver", "Runtime", "IP", "Port", "Version", + "Status", "Nodes", "Active Profile", "Active Kubecontext"}) + } else { + table.SetHeader([]string{"Profile", "Driver", "Runtime", "IP", "Version", "Status", + "Nodes", "Active Profile", "Active Kubecontext"}) + } table.SetAutoFormatHeaders(false) table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true}) table.SetCenterSeparator("|") @@ -164,7 +173,13 @@ func profilesToTableData(profiles []*config.Profile) [][]string { if p.ActiveKubeContext { k = "*" } - data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cpIP, strconv.Itoa(cpPort), k8sVersion, p.Status, strconv.Itoa(len(p.Config.Nodes)), c, k}) + if isDetailed { + data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, + cpIP, strconv.Itoa(cpPort), k8sVersion, p.Status, strconv.Itoa(len(p.Config.Nodes)), c, k}) + } else { + data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, + cpIP, k8sVersion, p.Status, strconv.Itoa(len(p.Config.Nodes)), c, k}) + } } return data } @@ -213,5 +228,6 @@ func profilesOrDefault(profiles []*config.Profile) []*config.Profile { func init() { profileListCmd.Flags().StringVarP(&profileOutput, "output", "o", "table", "The output format. One of 'json', 'table'") profileListCmd.Flags().BoolVarP(&isLight, "light", "l", false, "If true, returns list of profiles faster by skipping validating the status of the cluster.") + profileListCmd.Flags().BoolVarP(&isDetailed, "detailed", "d", false, "If true, returns a detailed list of profiles.") ProfileCmd.AddCommand(profileListCmd) } diff --git a/cmd/minikube/cmd/dashboard.go b/cmd/minikube/cmd/dashboard.go index 52753fb36f..c7b1eabf7f 100644 --- a/cmd/minikube/cmd/dashboard.go +++ b/cmd/minikube/cmd/dashboard.go @@ -157,7 +157,7 @@ func kubectlProxy(kubectlVersion string, binaryURL string, contextName string, p klog.Infof("Waiting for kubectl to output host:port ...") reader := bufio.NewReader(stdoutPipe) - var out []byte + var outData []byte for { r, timedOut, err := readByteWithTimeout(reader, 5*time.Second) if err != nil { @@ -170,10 +170,10 @@ func kubectlProxy(kubectlVersion string, binaryURL string, contextName string, p klog.Infof("timed out waiting for input: possibly due to an old kubectl version.") break } - out = append(out, r) + outData = append(outData, r) } - klog.Infof("proxy stdout: %s", string(out)) - return cmd, hostPortRe.FindString(string(out)), nil + klog.Infof("proxy stdout: %s", string(outData)) + return cmd, hostPortRe.FindString(string(outData)), nil } // readByteWithTimeout returns a byte from a reader or an indicator that a timeout has occurred. @@ -203,9 +203,9 @@ func readByteWithTimeout(r io.ByteReader, timeout time.Duration) (byte, bool, er } // dashboardURL generates a URL for accessing the dashboard service -func dashboardURL(proxy string, ns string, svc string) string { +func dashboardURL(addr string, ns string, svc string) string { // Reference: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above - return fmt.Sprintf("http://%s/api/v1/namespaces/%s/services/http:%s:/proxy/", proxy, ns, svc) + return fmt.Sprintf("http://%s/api/v1/namespaces/%s/services/http:%s:/proxy/", addr, ns, svc) } // checkURL checks if a URL returns 200 HTTP OK diff --git a/cmd/minikube/cmd/delete.go b/cmd/minikube/cmd/delete.go index 1fa0862bac..2f56362460 100644 --- a/cmd/minikube/cmd/delete.go +++ b/cmd/minikube/cmd/delete.go @@ -86,8 +86,8 @@ type DeletionError struct { Errtype typeOfError } -func (error DeletionError) Error() string { - return error.Err.Error() +func (deletionError DeletionError) Error() string { + return deletionError.Err.Error() } var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error { @@ -527,11 +527,11 @@ func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.N } // HandleDeletionErrors handles deletion errors from DeleteProfiles -func HandleDeletionErrors(errors []error) { - if len(errors) == 1 { - handleSingleDeletionError(errors[0]) +func HandleDeletionErrors(errs []error) { + if len(errs) == 1 { + handleSingleDeletionError(errs[0]) } else { - handleMultipleDeletionErrors(errors) + handleMultipleDeletionErrors(errs) } } @@ -556,10 +556,10 @@ func handleSingleDeletionError(err error) { } } -func handleMultipleDeletionErrors(errors []error) { +func handleMultipleDeletionErrors(errs []error) { out.ErrT(style.Sad, "Multiple errors deleting profiles") - for _, err := range errors { + for _, err := range errs { deletionError, ok := err.(DeletionError) if ok { @@ -706,14 +706,14 @@ var isMinikubeProcess = func(pid int) (bool, error) { // getPids opens the file at PATH and tries to read // one or more space separated pids func getPids(path string) ([]int, error) { - out, err := os.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return nil, errors.Wrap(err, "ReadFile") } - klog.Infof("pidfile contents: %s", out) + klog.Infof("pidfile contents: %s", data) pids := []int{} - strPids := strings.Fields(string(out)) + strPids := strings.Fields(string(data)) for _, p := range strPids { intPid, err := strconv.Atoi(p) if err != nil { diff --git a/cmd/minikube/cmd/docker-env.go b/cmd/minikube/cmd/docker-env.go index 8a376ea802..6b65b21bcd 100644 --- a/cmd/minikube/cmd/docker-env.go +++ b/cmd/minikube/cmd/docker-env.go @@ -462,7 +462,6 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error { switch outputFormat { case "": // shell "none" - break case "text": for k, v := range envVars { _, err := fmt.Fprintf(w, "%s=%s\n", k, v) @@ -472,11 +471,11 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error { } return nil case "json": - json, err := json.Marshal(envVars) + jsondata, err := json.Marshal(envVars) if err != nil { return err } - _, err = w.Write(json) + _, err = w.Write(jsondata) if err != nil { return err } @@ -486,11 +485,11 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error { } return nil case "yaml": - yaml, err := yaml.Marshal(envVars) + yamldata, err := yaml.Marshal(envVars) if err != nil { return err } - _, err = w.Write(yaml) + _, err = w.Write(yamldata) if err != nil { return err } @@ -509,7 +508,6 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error { switch outputFormat { case "": // shell "none" - break case "text": for _, n := range vars { _, err := fmt.Fprintf(w, "%s\n", n) @@ -519,11 +517,11 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error { } return nil case "json": - json, err := json.Marshal(vars) + jsondata, err := json.Marshal(vars) if err != nil { return err } - _, err = w.Write(json) + _, err = w.Write(jsondata) if err != nil { return err } @@ -533,11 +531,11 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error { } return nil case "yaml": - yaml, err := yaml.Marshal(vars) + yamldata, err := yaml.Marshal(vars) if err != nil { return err } - _, err = w.Write(yaml) + _, err = w.Write(yamldata) if err != nil { return err } diff --git a/cmd/minikube/cmd/kubectl.go b/cmd/minikube/cmd/kubectl.go index a796ceea22..01b10404bc 100644 --- a/cmd/minikube/cmd/kubectl.go +++ b/cmd/minikube/cmd/kubectl.go @@ -157,12 +157,12 @@ func KubectlCommand(version, binaryURL string, args ...string) (*exec.Cmd, error version = constants.DefaultKubernetesVersion } - path, err := node.CacheKubectlBinary(version, binaryURL) + binary, err := node.CacheKubectlBinary(version, binaryURL) if err != nil { return nil, err } - return exec.Command(path, args...), nil + return exec.Command(binary, args...), nil } func init() { diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index e05638b245..86a70e3c63 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -63,7 +63,7 @@ var logsCmd = &cobra.Command{ Short: "Returns logs to debug a local Kubernetes cluster", Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`, Run: func(_ *cobra.Command, _ []string) { - var logOutput *os.File = os.Stdout + logOutput := os.Stdout var err error if fileOutput != "" { diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 345e2adac7..f8b1ab80ad 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -313,7 +313,7 @@ func removePid(path string, pid string) error { // we found the correct file // we're reading the pids... - out, err := os.ReadFile(pidPath) + data, err := os.ReadFile(pidPath) if err != nil { return errors.Wrap(err, "readFile") } @@ -321,7 +321,7 @@ func removePid(path string, pid string) error { pids := []string{} // we're splitting the mount-pids file content into a slice of strings // so that we can compare each to the PID we're looking for - strPids := strings.Fields(string(out)) + strPids := strings.Fields(string(data)) for _, p := range strPids { // If we find the PID, we don't add it to the slice if p == pid { diff --git a/cmd/minikube/cmd/podman-env.go b/cmd/minikube/cmd/podman-env.go index f236e15a3f..e282000542 100644 --- a/cmd/minikube/cmd/podman-env.go +++ b/cmd/minikube/cmd/podman-env.go @@ -253,10 +253,10 @@ func podmanUnsetScript(ec PodmanEnvConfig, w io.Writer) error { // podmanBridge returns the command to use in a var for accessing the podman varlink bridge over ssh func podmanBridge(client *ssh.ExternalClient) string { - command := []string{client.BinaryPath} - command = append(command, client.BaseArgs...) - command = append(command, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge") - return strings.Join(command, " ") + cmd := []string{client.BinaryPath} + cmd = append(cmd, client.BaseArgs...) + cmd = append(cmd, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge") + return strings.Join(cmd, " ") } // podmanURL returns the url to use in a var for accessing the podman socket over ssh diff --git a/cmd/minikube/cmd/service.go b/cmd/minikube/cmd/service.go index b0dfa558ec..32fe90c0ef 100644 --- a/cmd/minikube/cmd/service.go +++ b/cmd/minikube/cmd/service.go @@ -163,18 +163,20 @@ You may select another namespace by using 'minikube service {{.service}} -n 0 { out.WarningT("Services {{.svc_names}} have type \"ClusterIP\" not meant to be exposed, however for local development minikube allows you to access this !", out.V{"svc_names": noNodePortSvcNames}) } - if driver.NeedsPortForward(co.Config.Driver) && services != nil { - startKicServiceTunnel(services, cname, co.Config.Driver) + if driver.NeedsPortForward(co.Config.Driver) { + svcs := services + if len(svcs) == 0 && len(noNodePortServices) > 0 { + svcs = noNodePortServices + } + if len(svcs) > 0 { + startKicServiceTunnel(svcs, cname, co.Config.Driver) + } } else if !serviceURLMode { openURLs(data) - if len(noNodePortServices) != 0 { - startKicServiceTunnel(noNodePortServices, cname, co.Config.Driver) - } - } }, } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index cd6e786d23..82fb1386ab 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -282,7 +282,7 @@ func runStart(cmd *cobra.Command, _ []string) { } } - kubeconfig, err := startWithDriver(cmd, starter, existing) + configInfo, err := startWithDriver(cmd, starter, existing) if err != nil { node.ExitIfFatal(err, useForce) exit.Error(reason.GuestStart, "failed to start node", err) @@ -294,7 +294,7 @@ func runStart(cmd *cobra.Command, _ []string) { } } - if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil { + if err := showKubectlInfo(configInfo, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil { klog.Errorf("kubectl info: %v", err) } } @@ -363,11 +363,11 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * } if driver.IsVM(driverName) && !driver.IsSSH(driverName) { - url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) + urlString, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) if err != nil { return node.Starter{}, errors.Wrap(err, "Failed to cache ISO") } - cc.MinikubeISO = url + cc.MinikubeISO = urlString } var existingAddons map[string]bool @@ -462,9 +462,9 @@ func imageMatchesBinaryVersion(imageVersion, binaryVersion string) bool { func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { // start primary control-plane node - kubeconfig, err := node.Start(starter) + configInfo, err := node.Start(starter) if err != nil { - kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) + configInfo, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) if err != nil { return nil, err } @@ -512,7 +512,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. pause.RemovePausedFile(starter.Runner) - return kubeconfig, nil + return configInfo, nil } func warnAboutMultiNodeCNI() { @@ -528,14 +528,14 @@ func updateDriver(driverName string) { } } -func displayVersion(version string) { +func displayVersion(ver string) { prefix := "" if ClusterFlagValue() != constants.DefaultClusterName { prefix = fmt.Sprintf("[%s] ", ClusterFlagValue()) } register.Reg.SetStep(register.InitialSetup) - out.Step(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()}) + out.Step(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": ver, "platform": platform()}) } // displayEnviron makes the user aware of environment variables that will affect how minikube operates @@ -631,7 +631,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co // Re-generate the cluster config, just in case the failure was related to an old config format cc := updateExistingConfigFromFlags(cmd, &existing) - var kubeconfig *kubeconfig.Settings + var configInfo *kubeconfig.Settings for _, n := range cc.Nodes { r, p, m, h, err := node.Provision(&cc, &n, false) s := node.Starter{ @@ -650,14 +650,14 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co k, err := node.Start(s) if n.ControlPlane { - kubeconfig = k + configInfo = k } if err != nil { // Ok we failed again, let's bail return nil, err } } - return kubeconfig, nil + return configInfo, nil } // Don't delete the cluster unless they ask return nil, originalErr @@ -902,12 +902,12 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) { // validateDriver validates that the selected driver appears sane, exits if not func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { - name := ds.Name - os := detect.RuntimeOS() + driverName := ds.Name + osName := detect.RuntimeOS() arch := detect.RuntimeArch() - klog.Infof("validating driver %q against %+v", name, existing) - if !driver.Supported(name) { - exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": os, "arch": arch}) + klog.Infof("validating driver %q against %+v", driverName, existing) + if !driver.Supported(driverName) { + exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": driverName, "os": osName, "arch": arch}) } // if we are only downloading artifacts for a driver, we can stop validation here @@ -916,7 +916,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { } st := ds.State - klog.Infof("status for %s: %+v", name, st) + klog.Infof("status for %s: %+v", driverName, st) if st.NeedsImprovement { out.Styled(style.Improvement, `For improved {{.driver}} performance, {{.fix}}`, out.V{"driver": driver.FullName(ds.Name), "fix": translate.T(st.Fix)}) @@ -924,7 +924,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { if ds.Priority == registry.Obsolete { exit.Message(reason.Kind{ - ID: fmt.Sprintf("PROVIDER_%s_OBSOLETE", strings.ToUpper(name)), + ID: fmt.Sprintf("PROVIDER_%s_OBSOLETE", strings.ToUpper(driverName)), Advice: translate.T(st.Fix), ExitCode: reason.ExProviderUnsupported, URL: st.Doc, @@ -943,23 +943,23 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { if !st.Installed { exit.Message(reason.Kind{ - ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)), + ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(driverName)), Advice: translate.T(st.Fix), ExitCode: reason.ExProviderNotFound, URL: st.Doc, Style: style.Shrug, - }, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error}) + }, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": driverName, "error": st.Error}) } id := st.Reason if id == "" { - id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name)) + id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(driverName)) } code := reason.ExProviderUnavailable if !st.Running { - id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name)) + id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(driverName)) code = reason.ExProviderNotRunning } @@ -1098,8 +1098,8 @@ func suggestMemoryAllocation(sysLimit, containerLimit, nodes int) int { return mem } - const fallback = 2200 - maximum := 6000 + const fallback = 3072 + maximum := 6144 if sysLimit > 0 && fallback > sysLimit { return sysLimit @@ -1515,15 +1515,15 @@ func defaultRuntime() string { } // if container runtime is not docker, check that cni is not disabled -func validateCNI(cmd *cobra.Command, runtime string) { - if runtime == constants.Docker { +func validateCNI(cmd *cobra.Command, runtimeName string) { + if runtimeName == constants.Docker { return } if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" { if viper.GetBool(force) { - out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime}) + out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtimeName}) } else { - exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime}) + exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtimeName}) } } } @@ -2004,16 +2004,16 @@ func validateBareMetal(drvName string) { if err != nil { klog.Warningf("failed getting Kubernetes version: %v", err) } - version, _ := util.ParseKubernetesVersion(kubeVer) - if version.GTE(semver.MustParse("1.18.0-beta.1")) { + ver, _ := util.ParseKubernetesVersion(kubeVer) + if ver.GTE(semver.MustParse("1.18.0-beta.1")) { if _, err := exec.LookPath("conntrack"); err != nil { - exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()}) + exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": ver.String()}) } } // crictl is required starting with Kubernetes 1.24, for all runtimes since the removal of dockershim - if version.GTE(semver.MustParse("1.24.0-alpha.0")) { + if ver.GTE(semver.MustParse("1.24.0-alpha.0")) { if _, err := exec.LookPath("crictl"); err != nil { - exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires crictl to be installed in root's path", out.V{"k8sVersion": version.String()}) + exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires crictl to be installed in root's path", out.V{"k8sVersion": ver.String()}) } } } @@ -2062,24 +2062,24 @@ func startNerdctld() { runner := co.CP.Runner // and set 777 to these files - if out, err := runner.RunCmd(exec.Command("sudo", "chmod", "777", "/usr/local/bin/nerdctl", "/usr/local/bin/nerdctld")); err != nil { - exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed setting permission for nerdctl: %s", out.Output()), err) + if rest, err := runner.RunCmd(exec.Command("sudo", "chmod", "777", "/usr/local/bin/nerdctl", "/usr/local/bin/nerdctld")); err != nil { + exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed setting permission for nerdctl: %s", rest.Output()), err) } // sudo systemctl start nerdctld.socket - if out, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.socket")); err != nil { - exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.socket: %s", out.Output()), err) + if rest, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.socket")); err != nil { + exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.socket: %s", rest.Output()), err) } // sudo systemctl start nerdctld.service - if out, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.service")); err != nil { - exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.service: %s", out.Output()), err) + if rest, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.service")); err != nil { + exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.service: %s", rest.Output()), err) } // set up environment variable on remote machine. docker client uses 'non-login & non-interactive shell' therefore the only way is to modify .bashrc file of user 'docker' // insert this at 4th line envSetupCommand := exec.Command("/bin/bash", "-c", "sed -i '4i export DOCKER_HOST=unix:///run/nerdctld.sock' .bashrc") - if out, err := runner.RunCmd(envSetupCommand); err != nil { - exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to set up DOCKER_HOST: %s", out.Output()), err) + if rest, err := runner.RunCmd(envSetupCommand); err != nil { + exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to set up DOCKER_HOST: %s", rest.Output()), err) } } diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index cb4cfc4093..0028d50310 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -61,8 +61,8 @@ const ( hostOnlyCIDR = "host-only-cidr" containerRuntime = "container-runtime" criSocket = "cri-socket" - networkPlugin = "network-plugin" - enableDefaultCNI = "enable-default-cni" + networkPlugin = "network-plugin" // deprecated, use --cni instead + enableDefaultCNI = "enable-default-cni" // deprecated, use --cni=bridge instead cniFlag = "cni" hypervVirtualSwitch = "hyperv-virtual-switch" hypervUseExternalSwitch = "hyperv-use-external-switch" @@ -163,7 +163,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(dryRun, false, "dry-run mode. Validates configuration, but does not mutate system state") startCmd.Flags().String(cpus, "2", fmt.Sprintf("Number of CPUs allocated to Kubernetes. Use %q to use the maximum number of CPUs. Use %q to not specify a limit (Docker/Podman only)", constants.MaxResources, constants.NoLimit)) - startCmd.Flags().String(memory, "", fmt.Sprintf("Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use %q to use the maximum amount of memory. Use %q to not specify a limit (Docker/Podman only)", constants.MaxResources, constants.NoLimit)) + startCmd.Flags().StringP(memory, "m", "", fmt.Sprintf("Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use %q to use the maximum amount of memory. Use %q to not specify a limit (Docker/Podman only)", constants.MaxResources, constants.NoLimit)) startCmd.Flags().String(humanReadableDiskSize, defaultDiskSize, "Disk size allocated to the minikube VM (format: [], where unit = b, k, m or g).") startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.") startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.") @@ -182,7 +182,7 @@ func initMinikubeFlags() { startCmd.Flags().Uint16(mountPortFlag, defaultMountPort, mountPortDescription) startCmd.Flags().String(mountTypeFlag, defaultMountType, mountTypeDescription) startCmd.Flags().String(mountUID, defaultMountUID, mountUIDDescription) - startCmd.Flags().StringSlice(config.AddonListFlag, nil, "Enable addons. see `minikube addons list` for a list of valid addon names.") + startCmd.Flags().StringSlice(config.AddonListFlag, nil, "Enable one or more addons, in a comma-separated format. See `minikube addons list` for a list of valid addon names.") startCmd.Flags().String(criSocket, "", "The cri socket path to be used.") startCmd.Flags().String(networkPlugin, "", "DEPRECATED: Replaced by --cni") startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=bridge") @@ -232,6 +232,20 @@ func initKubernetesFlags() { func initDriverFlags() { startCmd.Flags().StringP("driver", "d", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers())) startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.") + // Hide the deprecated vm-driver flag from help text + if err := startCmd.Flags().MarkHidden("vm-driver"); err != nil { + klog.Warningf("Failed to hide vm-driver flag: %v\n", err) + } + // Hide the deprecated flag from help text so new users dont use it (still will be processed) + if err := startCmd.Flags().MarkHidden(enableDefaultCNI); err != nil { + klog.Warningf("Failed to hide %s flag: %v\n", enableDefaultCNI, err) + } + + // Hide the deprecated flag from help text so new users dont use it (still will be processed) + if err := startCmd.Flags().MarkHidden(networkPlugin); err != nil { + klog.Warningf("Failed to hide %s flag: %v\n", networkPlugin, err) + } + startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors") startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers") diff --git a/cmd/minikube/cmd/start_test.go b/cmd/minikube/cmd/start_test.go index 6f468b6326..a4786ee171 100644 --- a/cmd/minikube/cmd/start_test.go +++ b/cmd/minikube/cmd/start_test.go @@ -277,26 +277,26 @@ func TestSuggestMemoryAllocation(t *testing.T) { nodes int want int }{ - {"128GB sys", 128000, 0, 1, 6000}, - {"64GB sys", 64000, 0, 1, 6000}, - {"32GB sys", 32768, 0, 1, 6000}, + {"128GB sys", 128000, 0, 1, 6144}, + {"64GB sys", 64000, 0, 1, 6144}, + {"32GB sys", 32768, 0, 1, 6144}, {"16GB sys", 16384, 0, 1, 4000}, {"odd sys", 14567, 0, 1, 3600}, - {"4GB sys", 4096, 0, 1, 2200}, + {"4GB sys", 4096, 0, 1, 3072}, {"2GB sys", 2048, 0, 1, 2048}, - {"Unable to poll sys", 0, 0, 1, 2200}, + {"Unable to poll sys", 0, 0, 1, 3072}, {"128GB sys, 16GB container", 128000, 16384, 1, 16336}, {"64GB sys, 16GB container", 64000, 16384, 1, 16000}, {"16GB sys, 4GB container", 16384, 4096, 1, 4000}, {"4GB sys, 3.5GB container", 16384, 3500, 1, 3452}, {"16GB sys, 2GB container", 16384, 2048, 1, 2048}, {"16GB sys, unable to poll container", 16384, 0, 1, 4000}, - {"128GB sys 2 nodes", 128000, 0, 2, 6000}, - {"8GB sys 3 nodes", 8192, 0, 3, 2200}, - {"16GB sys 2 nodes", 16384, 0, 2, 2200}, + {"128GB sys 2 nodes", 128000, 0, 2, 6144}, + {"8GB sys 3 nodes", 8192, 0, 3, 3072}, + {"16GB sys 2 nodes", 16384, 0, 2, 3072}, {"32GB sys 2 nodes", 32768, 0, 2, 4050}, - {"odd sys 2 nodes", 14567, 0, 2, 2200}, - {"4GB sys 2 nodes", 4096, 0, 2, 2200}, + {"odd sys 2 nodes", 14567, 0, 2, 3072}, + {"4GB sys 2 nodes", 4096, 0, 2, 3072}, {"2GB sys 3 nodes", 2048, 0, 3, 2048}, } for _, test := range tests { diff --git a/cmd/minikube/main.go b/cmd/minikube/main.go index af9a29c924..eb3fa841e2 100644 --- a/cmd/minikube/main.go +++ b/cmd/minikube/main.go @@ -156,13 +156,13 @@ func checkLogFileMaxSize(file string, maxSizeKB int64) bool { // logFileName generates a default logfile name in the form minikube___.log from args func logFileName(dir string, logIdx int64) string { h := sha1.New() - user, err := user.Current() + userInfo, err := user.Current() if err != nil { klog.Warningf("Unable to get username to add to log filename hash: %v", err) } else { - _, err := h.Write([]byte(user.Username)) + _, err := h.Write([]byte(userInfo.Username)) if err != nil { - klog.Warningf("Unable to add username %s to log filename hash: %v", user.Username, err) + klog.Warningf("Unable to add username %s to log filename hash: %v", userInfo.Username, err) } } for _, s := range pflag.Args() { diff --git a/deploy/addons/assets.go b/deploy/addons/assets.go index f1db9821c5..b2e6fbdee0 100644 --- a/deploy/addons/assets.go +++ b/deploy/addons/assets.go @@ -178,4 +178,8 @@ var ( // YakdAssets assets for yakd addon //go:embed yakd/*.yaml yakd/*.tmpl YakdAssets embed.FS + + // Kubetail assets for kubetail addon + //go:embed kubetail/*.yaml kubetail/*.tmpl + KubetailAssets embed.FS ) diff --git a/deploy/addons/inspektor-gadget/ig-deployment.yaml.tmpl b/deploy/addons/inspektor-gadget/ig-deployment.yaml.tmpl index 575cc81cfe..6b17117255 100644 --- a/deploy/addons/inspektor-gadget/ig-deployment.yaml.tmpl +++ b/deploy/addons/inspektor-gadget/ig-deployment.yaml.tmpl @@ -28,8 +28,6 @@ data: docker-socketpath: /run/docker.sock podman-socketpath: /run/podman/podman.sock operator: - ebpf: - enable-bpfstats: false oci: verify-image: true public-keys: @@ -180,6 +178,13 @@ spec: - -liveness periodSeconds: 5 timeoutSeconds: 2 + startupProbe: + exec: + command: + - /bin/gadgettracermanager + - -liveness + failureThreshold: 12 + periodSeconds: 5 env: - name: NODE_NAME valueFrom: diff --git a/deploy/addons/kubetail/kubetail-cli.yaml b/deploy/addons/kubetail/kubetail-cli.yaml new file mode 100644 index 0000000000..7b1f917455 --- /dev/null +++ b/deploy/addons/kubetail/kubetail-cli.yaml @@ -0,0 +1,54 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kubetail-cli + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cli +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubetail-cli + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cli +rules: +- apiGroups: [""] + resources: [nodes] + verbs: [get, list, watch] +- apiGroups: ["", apps, batch] + resources: [cronjobs, daemonsets, deployments, jobs, pods, replicasets, statefulsets] + verbs: [get, list, watch] +- apiGroups: [""] + resources: [pods/log] + verbs: [list, watch] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubetail-cli + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cli +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubetail-cli +subjects: +- kind: ServiceAccount + name: kubetail-cli + namespace: kubetail-system diff --git a/deploy/addons/kubetail/kubetail-cluster-agent.yaml.tmpl b/deploy/addons/kubetail/kubetail-cluster-agent.yaml.tmpl new file mode 100644 index 0000000000..d493079e81 --- /dev/null +++ b/deploy/addons/kubetail/kubetail-cluster-agent.yaml.tmpl @@ -0,0 +1,175 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubetail-cluster-agent + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent +data: + config.yaml: | + cluster-agent: + addr: :50051 + logging: + enabled: true + format: json + level: info + tls: + cert-file: null + enabled: false + key-file: null +--- +kind: ServiceAccount +apiVersion: v1 +automountServiceAccountToken: true +metadata: + name: kubetail-cluster-agent + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kubetail-cluster-agent + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent +spec: + selector: + matchLabels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent + template: + metadata: + labels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent + spec: + automountServiceAccountToken: true + serviceAccountName: kubetail-cluster-agent + securityContext: + fsGroup: 0 + containers: + - name: kubetail-cluster-agent + image: {{.CustomRegistries.Kubetail | default .ImageRepository | default .Registries.Kubetail }}{{.Images.KubetailClusterAgent}} + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - DAC_READ_SEARCH + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsUser: 1000 + imagePullPolicy: IfNotPresent + args: + - --config=/etc/kubetail/config.yaml + ports: + - name: grpc + protocol: TCP + containerPort: 50051 + livenessProbe: + grpc: + port: 50051 + initialDelaySeconds: 5 + timeoutSeconds: 30 + periodSeconds: 3 + failureThreshold: 3 + readinessProbe: + grpc: + port: 50051 + initialDelaySeconds: 5 + timeoutSeconds: 30 + periodSeconds: 3 + failureThreshold: 3 + volumeMounts: + - name: config + mountPath: /etc/kubetail + readOnly: true + - name: varlog + mountPath: /var/log + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + volumes: + - name: config + configMap: + name: kubetail-cluster-agent + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists +--- +kind: Service +apiVersion: v1 +metadata: + name: kubetail-cluster-agent + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent +spec: + clusterIP: None + selector: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: kubetail-cluster-agent + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-agent + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api diff --git a/deploy/addons/kubetail/kubetail-cluster-api.yaml.tmpl b/deploy/addons/kubetail/kubetail-cluster-api.yaml.tmpl new file mode 100644 index 0000000000..ae7609bdd6 --- /dev/null +++ b/deploy/addons/kubetail/kubetail-cluster-api.yaml.tmpl @@ -0,0 +1,238 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubetail-cluster-api + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +data: + config.yaml: | + cluster-api: + addr: :8080 + cluster-agent-dispatch-url: "kubernetes://kubetail-cluster-agent:50051" + base-path: / + csrf: + cookie: + domain: null + http-only: true + max-age: 43200 + name: kubetail_cluster_api_csrf + path: / + same-site: strict + secure: false + enabled: true + field-name: csrf_token + secret: ${KUBETAIL_CLUSTER_API_CSRF_SECRET} + gin-mode: release + logging: + access-log: + enabled: true + hide-health-checks: true + enabled: true + format: json + level: info + tls: + cert-file: null + enabled: false + key-file: null +--- +kind: ServiceAccount +apiVersion: v1 +automountServiceAccountToken: true +metadata: + name: kubetail-cluster-api + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: "kubetail" + app.kubernetes.io/component: "cluster-api" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubetail-cluster-api + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +rules: +- apiGroups: [""] + resources: [nodes] + verbs: [get, list, watch] +- apiGroups: ["", apps, batch] + resources: [cronjobs, daemonsets, deployments, jobs, pods, replicasets, statefulsets] + verbs: [get, list, watch] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubetail-cluster-api + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubetail-cluster-api +subjects: +- kind: ServiceAccount + name: kubetail-cluster-api + namespace: kubetail-system +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kubetail-system + name: kubetail-cluster-api + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +rules: +- apiGroups: [discovery.k8s.io] + resources: [endpointslices] + verbs: [list, watch] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kubetail-system + name: kubetail-cluster-api + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubetail-cluster-api +subjects: +- kind: ServiceAccount + name: kubetail-cluster-api + namespace: kubetail-system +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kubetail-cluster-api + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +spec: + replicas: 1 + revisionHistoryLimit: 5 + selector: + matchLabels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api + strategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api + spec: + automountServiceAccountToken: true + serviceAccountName: kubetail-cluster-api + containers: + - name: kubetail-cluster-api + image: {{.CustomRegistries.Kubetail | default .ImageRepository | default .Registries.Kubetail }}{{.Images.KubetailClusterAPI}} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsUser: 1000 + imagePullPolicy: IfNotPresent + env: + - name: KUBETAIL_CLUSTER_API_CSRF_SECRET + value: "DUMMY" + args: + - --config=/etc/kubetail/config.yaml + ports: + - name: http + protocol: TCP + containerPort: 8080 + livenessProbe: + httpGet: + scheme: HTTP + path: /healthz + port: http + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 10 + failureThreshold: 3 + readinessProbe: + httpGet: + scheme: HTTP + path: /healthz + port: http + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 10 + failureThreshold: 3 + volumeMounts: + - name: config + mountPath: /etc/kubetail + readOnly: true + volumes: + - name: config + configMap: + name: kubetail-cluster-api +--- +kind: Service +apiVersion: v1 +metadata: + name: kubetail-cluster-api + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: cluster-api + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: http + appProtocol: http diff --git a/deploy/addons/kubetail/kubetail-dashboard.yaml.tmpl b/deploy/addons/kubetail/kubetail-dashboard.yaml.tmpl new file mode 100644 index 0000000000..73220dba91 --- /dev/null +++ b/deploy/addons/kubetail/kubetail-dashboard.yaml.tmpl @@ -0,0 +1,254 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubetail-dashboard + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +data: + config.yaml: | + dashboard: + addr: :8080 + auth-mode: auto + cluster-api-endpoint: "http://kubetail-cluster-api:8080" + environment: cluster + ui: + cluster-api-enabled: true + base-path: / + csrf: + cookie: + domain: null + http-only: true + max-age: 43200 + name: kubetail_dashhboard_csrf + path: / + same-site: strict + secure: false + enabled: true + field-name: csrf_token + secret: ${KUBETAIL_DASHBOARD_CSRF_SECRET} + gin-mode: release + logging: + access-log: + enabled: true + hide-health-checks: true + enabled: true + format: json + level: info + session: + cookie: + domain: null + http-only: true + max-age: 1092000 + name: kubetail_dashboard_session + path: / + same-site: lax + secure: false + secret: ${KUBETAIL_DASHBOARD_SESSION_SECRET} + tls: + cert-file: null + enabled: false + key-file: null +--- +kind: ServiceAccount +apiVersion: v1 +automountServiceAccountToken: true +metadata: + name: kubetail-dashboard + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubetail-dashboard + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +rules: +- apiGroups: [""] + resources: [namespaces, nodes] + verbs: [get, list, watch] +- apiGroups: ["", apps, batch] + resources: [cronjobs, daemonsets, deployments, jobs, pods, pods/log, replicasets, statefulsets] + verbs: [get, list, watch] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubetail-dashboard + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubetail-dashboard +subjects: +- kind: ServiceAccount + name: kubetail-dashboard + namespace: kubetail-system +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kubetail-system + name: kubetail-dashboard + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +rules: +- apiGroups: [discovery.k8s.io] + resources: [endpointslices] + verbs: [list, watch] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kubetail-system + name: kubetail-dashboard + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubetail-dashboard +subjects: +- kind: ServiceAccount + name: kubetail-dashboard + namespace: kubetail-system +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: kubetail-dashboard + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +spec: + replicas: 1 + revisionHistoryLimit: 5 + selector: + matchLabels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard + strategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard + spec: + automountServiceAccountToken: true + serviceAccountName: kubetail-dashboard + containers: + - name: kubetail-dashboard + image: {{.CustomRegistries.Kubetail | default .ImageRepository | default .Registries.Kubetail }}{{.Images.KubetailDashboard}} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsUser: 1000 + imagePullPolicy: IfNotPresent + env: + - name: KUBETAIL_DASHBOARD_CSRF_SECRET + value: "DUMMY" + - name: KUBETAIL_DASHBOARD_SESSION_SECRET + value: "DUMMY" + args: + - --config=/etc/kubetail/config.yaml + ports: + - name: http + protocol: TCP + containerPort: 8080 + livenessProbe: + httpGet: + scheme: HTTP + path: /healthz + port: http + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 10 + failureThreshold: 3 + readinessProbe: + httpGet: + scheme: HTTP + path: /healthz + port: http + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 10 + failureThreshold: 3 + volumeMounts: + - name: config + mountPath: /etc/kubetail + readOnly: true + volumes: + - name: config + configMap: + name: kubetail-dashboard +--- +kind: Service +apiVersion: v1 +metadata: + name: kubetail-dashboard + namespace: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/name: kubetail + app.kubernetes.io/version: "0.11.5" + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: kubetail + app.kubernetes.io/instance: kubetail + app.kubernetes.io/component: dashboard + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: http + appProtocol: http diff --git a/deploy/addons/kubetail/kubetail-namespace.yaml b/deploy/addons/kubetail/kubetail-namespace.yaml new file mode 100644 index 0000000000..296ce9846a --- /dev/null +++ b/deploy/addons/kubetail/kubetail-namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kubetail-system + labels: + kubernetes.io/minikube-addons: kubetail + addonmanager.kubernetes.io/mode: Reconcile diff --git a/deploy/addons/volcano/volcano-development.yaml.tmpl b/deploy/addons/volcano/volcano-development.yaml.tmpl index e6889550db..ce450f4241 100644 --- a/deploy/addons/volcano/volcano-development.yaml.tmpl +++ b/deploy/addons/volcano/volcano-development.yaml.tmpl @@ -140,7 +140,7 @@ spec: priorityClassName: system-cluster-critical containers: - args: - - --enabled-admission=/jobs/mutate,/jobs/validate,/podgroups/mutate,/pods/validate,/pods/mutate,/queues/mutate,/queues/validate + - --enabled-admission=/jobs/mutate,/jobs/validate,/podgroups/validate,/queues/mutate,/queues/validate,/hypernodes/validate - --tls-cert-file=/admission.local.config/certificates/tls.crt - --tls-private-key-file=/admission.local.config/certificates/tls.key - --ca-cert-file=/admission.local.config/certificates/ca.crt @@ -269,7 +269,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: jobs.batch.volcano.sh spec: group: batch.volcano.sh @@ -323,6 +323,18 @@ spec: format: int32 minimum: 1 type: integer + networkTopology: + properties: + highestTierAllowed: + default: 1 + type: integer + mode: + default: hard + enum: + - hard + - soft + type: string + type: object plugins: additionalProperties: items: @@ -3083,6 +3095,39 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object restartPolicy: type: string runtimeClassName: @@ -3125,6 +3170,8 @@ spec: runAsUser: format: int64 type: integer + seLinuxChangePolicy: + type: string seLinuxOptions: properties: level: @@ -4264,7 +4311,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: commands.bus.volcano.sh spec: group: bus.volcano.sh @@ -4361,6 +4408,15 @@ metadata: namespace: volcano-system --- # Source: volcano/templates/controllers.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: volcano-controller-configmap + namespace: volcano-system +data: + volcano-controller.conf: | +--- +# Source: volcano/templates/controllers.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -4415,10 +4471,10 @@ rules: resources: ["networkpolicies"] verbs: ["get", "create", "delete"] - apiGroups: ["apps"] - resources: ["daemonsets", "statefulsets"] + resources: ["daemonsets"] verbs: ["get"] - apiGroups: ["apps"] - resources: ["replicasets"] + resources: ["replicasets", "statefulsets"] verbs: ["get", "list", "watch"] - apiGroups: ["batch"] resources: ["jobs"] @@ -4426,6 +4482,9 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "create", "update", "watch"] + - apiGroups: ["topology.volcano.sh"] + resources: ["hypernodes", "hypernodes/status"] + verbs: ["list", "watch", "get", "create", "delete", "update", "patch"] --- # Source: volcano/templates/controllers.yaml kind: ClusterRoleBinding @@ -4513,6 +4572,11 @@ spec: - ALL runAsNonRoot: true runAsUser: 1000 + env: + - name: KUBE_POD_NAMESPACE + value: volcano-system + - name: HELM_RELEASE_NAME + value: volcano --- # Source: volcano/templates/scheduler.yaml apiVersion: v1 @@ -4601,13 +4665,16 @@ rules: verbs: ["get", "list", "watch", "create", "delete", "update"] - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.volcano.sh"] resources: ["queues/status"] - verbs: ["patch"] + verbs: ["update"] - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.volcano.sh"] resources: ["podgroups"] verbs: ["list", "watch", "update"] - apiGroups: ["nodeinfo.volcano.sh"] resources: ["numatopologies"] verbs: ["get", "list", "watch", "delete"] + - apiGroups: ["topology.volcano.sh"] + resources: ["hypernodes", "hypernodes/status"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["configmaps"] verbs: ["get", "create", "delete", "update"] @@ -4617,6 +4684,15 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "create", "update", "watch"] + - apiGroups: ["resource.k8s.io"] + resources: ["resourceclaims"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["resource.k8s.io"] + resources: ["resourceclaims/status"] + verbs: ["update"] + - apiGroups: ["resource.k8s.io"] + resources: ["deviceclasses","resourceslices"] + verbs: ["get", "list", "watch", "create"] --- # Source: volcano/templates/scheduler.yaml kind: ClusterRoleBinding @@ -4717,15 +4793,14 @@ spec: configMap: name: volcano-scheduler-configmap - name: klog-sock - hostPath: - path: /tmp/klog-socks + emptyDir: {} --- # Source: volcano/templates/scheduling_v1beta1_podgroup.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: podgroups.scheduling.volcano.sh spec: group: scheduling.volcano.sh @@ -4811,6 +4886,24 @@ spec: if there's not enough resources to start each task, the scheduler will not start anyone. type: object + networkTopology: + description: NetworkTopology defines the NetworkTopology config, this + field works in conjunction with network topology feature and hyperNode + CRD. + properties: + highestTierAllowed: + default: 1 + description: HighestTierAllowed specifies the highest tier that + a job allowed to cross when scheduling. + type: integer + mode: + default: hard + description: Mode specifies the mode of the network topology constrain. + enum: + - hard + - soft + type: string + type: object priorityClassName: description: |- If specified, indicates the PodGroup's priority. "system-node-critical" and @@ -4821,6 +4914,7 @@ spec: default. type: string queue: + default: default description: |- Queue defines the queue to allocate resource for PodGroup; if queue does not exist, the PodGroup will not be scheduled. Defaults to `default` Queue with the lowest weight. @@ -4887,7 +4981,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: queues.scheduling.volcano.sh spec: group: scheduling.volcano.sh @@ -4901,7 +4995,11 @@ spec: singular: queue scope: Cluster versions: - - name: v1beta1 + - additionalPrinterColumns: + - jsonPath: .spec.parent + name: PARENT + type: string + name: v1beta1 schema: openAPIV3Schema: description: Queue is a queue of PodGroup. @@ -5033,7 +5131,10 @@ spec: description: Type define the type of queue type: string weight: + default: 1 format: int32 + maximum: 65535 + minimum: 1 type: integer type: object status: @@ -5103,7 +5204,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: numatopologies.nodeinfo.volcano.sh spec: group: nodeinfo.volcano.sh @@ -5190,44 +5291,231 @@ spec: served: true storage: true --- -# Source: volcano/templates/webhooks.yaml -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration +# Source: volcano/templates/topology_v1alpha1_hypernodes.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: volcano-admission-service-pods-mutate -webhooks: - - admissionReviewVersions: - - v1 - clientConfig: - service: - name: volcano-admission-service - namespace: volcano-system - path: /pods/mutate - port: 443 - failurePolicy: Fail - matchPolicy: Equivalent - name: mutatepod.volcano.sh - namespaceSelector: - matchExpressions: - - key: kubernetes.io/metadata.name - operator: NotIn - values: - - volcano-system - - kube-system - objectSelector: {} - reinvocationPolicy: Never - rules: - - apiGroups: - - "" - apiVersions: - - v1 - operations: - - CREATE - resources: - - pods - scope: '*' - sideEffects: NoneOnDryRun - timeoutSeconds: 10 + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: hypernodes.topology.volcano.sh +spec: + group: topology.volcano.sh + names: + kind: HyperNode + listKind: HyperNodeList + plural: hypernodes + shortNames: + - hn + singular: hypernode + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.tier + name: Tier + type: string + - jsonPath: .status.nodeCount + name: NodeCount + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: HyperNode represents a collection of nodes sharing similar network + topology or performance characteristics. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the desired configuration of the HyperNode. + properties: + members: + description: Members defines a list of node groups or individual nodes + included in the HyperNode. + items: + description: MemberSpec represents a specific node or a hyperNodes + in the hyperNode. + properties: + selector: + description: Selector defines the selection rules for this member. + properties: + exactMatch: + description: ExactMatch defines the exact match criteria. + properties: + name: + description: Name specifies the exact name of the node + to match. + type: string + type: object + labelMatch: + description: LabelMatch defines the labels match criteria + (only take effect when Member Type is "Node"). + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + regexMatch: + description: RegexMatch defines the regex match criteria. + properties: + pattern: + description: Pattern defines the regex pattern to match + node names. + type: string + type: object + type: object + x-kubernetes-validations: + - message: Either ExactMatch or RegexMatch or LabelMatch must + be specified + rule: has(self.exactMatch) || has(self.regexMatch) || has(self.labelMatch) + - message: Only one of ExactMatch, RegexMatch, or LabelMatch + can be specified + rule: '(has(self.exactMatch) ? 1 : 0) + (has(self.regexMatch) + ? 1 : 0) + (has(self.labelMatch) ? 1 : 0) <= 1' + type: + description: Type specifies the member type. + enum: + - Node + - HyperNode + type: string + required: + - type + type: object + type: array + tier: + description: Tier categorizes the performance level of the HyperNode. + type: integer + required: + - tier + type: object + status: + description: Status provides the current state of the HyperNode. + properties: + conditions: + description: Conditions provide details about the current state of + the HyperNode. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + nodeCount: + description: NodeCount is the total number of nodes currently in the + HyperNode. + format: int64 + minimum: 0 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} --- # Source: volcano/templates/webhooks.yaml apiVersion: admissionregistration.k8s.io/v1 @@ -5272,7 +5560,7 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - name: volcano-admission-service-podgroups-mutate + name: volcano-admission-service-jobs-mutate webhooks: - admissionReviewVersions: - v1 @@ -5280,11 +5568,11 @@ webhooks: service: name: volcano-admission-service namespace: volcano-system - path: /podgroups/mutate + path: /jobs/mutate port: 443 failurePolicy: Fail matchPolicy: Equivalent - name: mutatepodgroup.volcano.sh + name: mutatejob.volcano.sh namespaceSelector: matchExpressions: - key: kubernetes.io/metadata.name @@ -5296,22 +5584,22 @@ webhooks: reinvocationPolicy: Never rules: - apiGroups: - - scheduling.volcano.sh + - batch.volcano.sh apiVersions: - - v1beta1 + - v1alpha1 operations: - CREATE resources: - - podgroups + - jobs scope: '*' sideEffects: NoneOnDryRun timeoutSeconds: 10 --- # Source: volcano/templates/webhooks.yaml apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration +kind: ValidatingWebhookConfiguration metadata: - name: volcano-admission-service-jobs-mutate + name: volcano-admission-service-jobs-validate webhooks: - admissionReviewVersions: - v1 @@ -5319,11 +5607,11 @@ webhooks: service: name: volcano-admission-service namespace: volcano-system - path: /jobs/mutate + path: /jobs/validate port: 443 failurePolicy: Fail matchPolicy: Equivalent - name: mutatejob.volcano.sh + name: validatejob.volcano.sh namespaceSelector: matchExpressions: - key: kubernetes.io/metadata.name @@ -5332,7 +5620,6 @@ webhooks: - volcano-system - kube-system objectSelector: {} - reinvocationPolicy: Never rules: - apiGroups: - batch.volcano.sh @@ -5340,6 +5627,7 @@ webhooks: - v1alpha1 operations: - CREATE + - UPDATE resources: - jobs scope: '*' @@ -5350,7 +5638,7 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - name: volcano-admission-service-jobs-validate + name: volcano-admission-service-queues-validate webhooks: - admissionReviewVersions: - v1 @@ -5358,11 +5646,11 @@ webhooks: service: name: volcano-admission-service namespace: volcano-system - path: /jobs/validate + path: /queues/validate port: 443 failurePolicy: Fail matchPolicy: Equivalent - name: validatejob.volcano.sh + name: validatequeue.volcano.sh namespaceSelector: matchExpressions: - key: kubernetes.io/metadata.name @@ -5373,14 +5661,15 @@ webhooks: objectSelector: {} rules: - apiGroups: - - batch.volcano.sh + - scheduling.volcano.sh apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE - UPDATE + - DELETE resources: - - jobs + - queues scope: '*' sideEffects: NoneOnDryRun timeoutSeconds: 10 @@ -5389,7 +5678,7 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - name: volcano-admission-service-pods-validate + name: volcano-admission-service-podgroups-validate webhooks: - admissionReviewVersions: - v1 @@ -5397,11 +5686,11 @@ webhooks: service: name: volcano-admission-service namespace: volcano-system - path: /pods/validate + path: /podgroups/validate port: 443 failurePolicy: Fail matchPolicy: Equivalent - name: validatepod.volcano.sh + name: validatepodgroup.volcano.sh namespaceSelector: matchExpressions: - key: kubernetes.io/metadata.name @@ -5412,13 +5701,13 @@ webhooks: objectSelector: {} rules: - apiGroups: - - "" + - scheduling.volcano.sh apiVersions: - - v1 + - v1beta1 operations: - CREATE resources: - - pods + - podgroups scope: '*' sideEffects: NoneOnDryRun timeoutSeconds: 10 @@ -5427,7 +5716,7 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - name: volcano-admission-service-queues-validate + name: volcano-admission-service-hypernodes-validate webhooks: - admissionReviewVersions: - v1 @@ -5435,32 +5724,22 @@ webhooks: service: name: volcano-admission-service namespace: volcano-system - path: /queues/validate + path: /hypernodes/validate port: 443 failurePolicy: Fail matchPolicy: Equivalent - name: validatequeue.volcano.sh - namespaceSelector: - matchExpressions: - - key: kubernetes.io/metadata.name - operator: NotIn - values: - - volcano-system - - kube-system - objectSelector: {} + name: validatehypernodes.volcano.sh rules: - apiGroups: - - scheduling.volcano.sh + - topology.volcano.sh apiVersions: - - v1beta1 + - v1alpha1 operations: - CREATE - UPDATE - - DELETE resources: - - queues - scope: '*' - sideEffects: NoneOnDryRun + - hypernodes + sideEffects: None timeoutSeconds: 10 --- # Source: jobflow/templates/flow_v1alpha1_jobflows.yaml @@ -5468,7 +5747,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: jobtemplates.flow.volcano.sh spec: group: flow.volcano.sh @@ -5504,6 +5783,18 @@ spec: format: int32 minimum: 1 type: integer + networkTopology: + properties: + highestTierAllowed: + default: 1 + type: integer + mode: + default: hard + enum: + - hard + - soft + type: string + type: object plugins: additionalProperties: items: @@ -8264,6 +8555,39 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object restartPolicy: type: string runtimeClassName: @@ -8306,6 +8630,8 @@ spec: runAsUser: format: int64 type: integer + seLinuxChangePolicy: + type: string seLinuxOptions: properties: level: @@ -9382,7 +9708,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.17.0 name: jobflows.flow.volcano.sh spec: group: flow.volcano.sh diff --git a/deploy/iso/minikube-iso/board/minikube/aarch64/grub.cfg b/deploy/iso/minikube-iso/board/minikube/aarch64/grub.cfg index e06366b9ab..90b7df2595 100644 --- a/deploy/iso/minikube-iso/board/minikube/aarch64/grub.cfg +++ b/deploy/iso/minikube-iso/board/minikube/aarch64/grub.cfg @@ -1,7 +1,10 @@ set default="0" -set timeout="5" +set timeout="0" menuentry "Buildroot" { - linux /boot/bzimage console=ttyAMA0 # kernel + # The console depends on the driver: + # qemu: console=ttyAMA0 + # vfkit,krunkit: console=hvc0 + linux /boot/bzimage console=ttyAMA0 console=hvc0 initrd /boot/initrd # rootfs } diff --git a/deploy/iso/minikube-iso/board/minikube/x86_64/grub.cfg b/deploy/iso/minikube-iso/board/minikube/x86_64/grub.cfg index 009b345539..9f630e596e 100644 --- a/deploy/iso/minikube-iso/board/minikube/x86_64/grub.cfg +++ b/deploy/iso/minikube-iso/board/minikube/x86_64/grub.cfg @@ -1,5 +1,5 @@ set default="0" -set timeout="5" +set timeout="0" menuentry "Buildroot" { linux /boot/bzimage console=tty0 rw # kernel diff --git a/deploy/iso/minikube-iso/go.hash b/deploy/iso/minikube-iso/go.hash index 10d429ba08..2cb1e75ff3 100644 --- a/deploy/iso/minikube-iso/go.hash +++ b/deploy/iso/minikube-iso/go.hash @@ -35,4 +35,4 @@ sha256 36930162a93df417d90bd22c6e14daff4705baac2b02418edda671cdfa9cd07f go1.23 sha256 8d6a77332487557c6afa2421131b50f83db4ae3c579c3bc72e670ee1f6968599 go1.23.3.src.tar.gz sha256 ad345ac421e90814293a9699cca19dd5238251c3f687980bbcae28495b263531 go1.23.4.src.tar.gz sha256 d14120614acb29d12bcab72bd689f257eb4be9e0b6f88a8fb7e41ac65f8556e5 go1.24.0.src.tar.gz -sha256 6924efde5de86fe277676e929dc9917d466efa02fb934197bc2eba35d5680971 go1.23.4.linux-amd64.tar.gz \ No newline at end of file +sha256 6924efde5de86fe277676e929dc9917d466efa02fb934197bc2eba35d5680971 go1.23.4.linux-amd64.tar.gz diff --git a/deploy/minikube/releases-v2.json b/deploy/minikube/releases-v2.json index f33c780813..825bfc7a9b 100644 --- a/deploy/minikube/releases-v2.json +++ b/deploy/minikube/releases-v2.json @@ -1,4 +1,30 @@ [ + { + "checksums": { + "amd64": { + "darwin": "a7e3da0db4041b2f845ca37af592424a9cbe34087ac922220b1e3abc4e1976ea", + "linux": "cddeab5ab86ab98e4900afac9d62384dae0941498dfbe712ae0c8868250bc3d7", + "windows": "c7504d574a416a4dd4c948e8bab9c2c2028e12c06d038046d8728c96c7cf4730" + }, + "arm": { + "linux": "6b5de419c665c5b3afa513c4d0a4387e973a1048a335f0ce879410bda3d3315f" + }, + "arm64": { + "darwin": "a9f06bc9634c87800e772354872c6641ef0e02699187d5118225a86b79c99348", + "linux": "6fe9adf0c40c75346a0528e609b3d4119ab192e2506d0401cc89adee051a48ea" + }, + "ppc64le": { + "linux": "db7eb5bfe583b5a1a7caf0c3b74b2733e29f26988970d687ea5fe4d10f60946b" + }, + "s390x": { + "linux": "f2659c51ba66374c34ee4c818227c870044b8ef1db9a5521a5206002b4a69234" + }, + "darwin": "a7e3da0db4041b2f845ca37af592424a9cbe34087ac922220b1e3abc4e1976ea", + "linux": "cddeab5ab86ab98e4900afac9d62384dae0941498dfbe712ae0c8868250bc3d7", + "windows": "c7504d574a416a4dd4c948e8bab9c2c2028e12c06d038046d8728c96c7cf4730" + }, + "name": "v1.36.0" + }, { "checksums": { "amd64": { diff --git a/deploy/minikube/releases.json b/deploy/minikube/releases.json index 0ce02387f6..4ecc2b280d 100644 --- a/deploy/minikube/releases.json +++ b/deploy/minikube/releases.json @@ -1,4 +1,12 @@ [ + { + "checksums": { + "darwin": "a7e3da0db4041b2f845ca37af592424a9cbe34087ac922220b1e3abc4e1976ea", + "linux": "cddeab5ab86ab98e4900afac9d62384dae0941498dfbe712ae0c8868250bc3d7", + "windows": "c7504d574a416a4dd4c948e8bab9c2c2028e12c06d038046d8728c96c7cf4730" + }, + "name": "v1.36.0" + }, { "checksums": { "darwin": "ba5ab2789ee0c40cefde30762656f3aa0bf47b15ee0bd808f3d7523cc54d75df", diff --git a/go.mod b/go.mod index 58a07ae9f4..4895af817d 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/cheggaaa/pb/v3 v3.1.7 github.com/cloudevents/sdk-go/v2 v2.16.0 github.com/distribution/reference v0.6.0 - github.com/docker/cli v28.1.1+incompatible + github.com/docker/cli v28.2.2+incompatible github.com/docker/docker v27.5.0+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 @@ -28,7 +28,7 @@ require ( github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/google/go-cmp v0.7.0 github.com/google/go-containerregistry v0.20.3 - github.com/google/go-github/v72 v72.0.0 + github.com/google/go-github/v73 v73.0.0 github.com/google/slowjam v1.1.2 github.com/google/uuid v1.6.0 github.com/hashicorp/go-getter v1.7.8 @@ -64,9 +64,9 @@ require ( github.com/spf13/viper v1.20.1 github.com/zchee/go-vmnet v0.0.0-20161021174912-97ebf9174097 go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.35.0 + go.opentelemetry.io/otel v1.36.0 go.opentelemetry.io/otel/sdk v1.35.0 - go.opentelemetry.io/otel/trace v1.35.0 + go.opentelemetry.io/otel/trace v1.36.0 golang.org/x/build v0.0.0-20190927031335-2835ba2e683f golang.org/x/crypto v0.38.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c @@ -77,17 +77,17 @@ require ( golang.org/x/term v0.32.0 golang.org/x/text v0.25.0 gonum.org/v1/plot v0.16.0 - google.golang.org/api v0.233.0 + google.golang.org/api v0.234.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.33.1 k8s.io/apimachinery v0.33.1 k8s.io/client-go v0.33.1 k8s.io/cluster-bootstrap v0.0.0 - k8s.io/component-base v0.32.2 + k8s.io/component-base v0.33.1 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.32.2 + k8s.io/kubectl v0.33.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - libvirt.org/go/libvirt v1.11001.0 + libvirt.org/go/libvirt v1.11004.0 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 ) @@ -96,10 +96,10 @@ require ( cloud.google.com/go v0.121.0 // indirect cloud.google.com/go/auth v0.16.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect - cloud.google.com/go/trace v1.11.3 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + cloud.google.com/go/trace v1.11.6 // indirect codeberg.org/go-fonts/liberation v0.5.0 // indirect codeberg.org/go-latex/latex v0.1.0 // indirect codeberg.org/go-pdf/fpdf v0.10.0 // indirect @@ -121,6 +121,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -157,7 +158,7 @@ require ( github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gookit/color v1.5.2 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -171,7 +172,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -201,9 +202,9 @@ require ( github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/prometheus v0.35.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect @@ -229,25 +230,23 @@ require ( go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/image v0.25.0 // indirect golang.org/x/net v0.40.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.29.0 // indirect - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/grpc v1.72.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect + google.golang.org/grpc v1.72.1 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cli-runtime v0.32.2 // indirect + k8s.io/cli-runtime v0.33.1 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/randfill v1.0.0 // indirect diff --git a/go.sum b/go.sum index 2859967c86..f9c89ce8e2 100644 --- a/go.sum +++ b/go.sum @@ -187,8 +187,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -384,8 +384,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -570,8 +570,8 @@ cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -909,6 +909,8 @@ github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -1016,8 +1018,8 @@ github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5 github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= -github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A= +github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -1333,8 +1335,8 @@ github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYV github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM= -github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= @@ -1398,8 +1400,8 @@ github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqE github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -1582,8 +1584,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -1605,6 +1607,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= @@ -1884,8 +1887,8 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1906,8 +1909,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= @@ -2163,8 +2166,8 @@ go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzox go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= @@ -2183,8 +2186,8 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= @@ -2199,8 +2202,8 @@ go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16g go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.12.1/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2859,8 +2862,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.233.0 h1:iGZfjXAJiUFSSaekVB7LzXl6tRfEKhUN7FkZN++07tI= -google.golang.org/api v0.233.0/go.mod h1:TCIVLLlcwunlMpZIhIp7Ltk77W+vUSdUKAAIlbxY44c= +google.golang.org/api v0.234.0 h1:d3sAmYq3E9gdr2mpmiWGbm9pHsA/KJmyiLkwKfHBqU4= +google.golang.org/api v0.234.0/go.mod h1:QpeJkemzkFKe5VCE/PMv7GsUfn9ZF+u+q1Q7w6ckxTg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3007,12 +3010,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 h1:vPV0tzlsK6EzEDHNNH5sa7Hs9bd7iXR7B1tSiPepkV0= google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:pKLAc5OolXC3ViWGI62vvC0n10CpwAtRcTNCFwTKBEw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2 h1:IqsN8hx+lWLqlN+Sc3DoMy/watjofWiU8sRFgQ8fhKM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 h1:IkAfh6J/yllPtpYFU0zZN1hUPYdT0ogkBT/9hMxHjvg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -3059,8 +3062,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3162,8 +3165,8 @@ k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= -k8s.io/cli-runtime v0.32.2 h1:aKQR4foh9qeyckKRkNXUccP9moxzffyndZAvr+IXMks= -k8s.io/cli-runtime v0.32.2/go.mod h1:a/JpeMztz3xDa7GCyyShcwe55p8pbcCVQxvqZnIwXN8= +k8s.io/cli-runtime v0.33.1 h1:TvpjEtF71ViFmPeYMj1baZMJR4iWUEplklsUQ7D3quA= +k8s.io/cli-runtime v0.33.1/go.mod h1:9dz5Q4Uh8io4OWCLiEf/217DXwqNgiTS/IOuza99VZE= k8s.io/client-go v0.19.1/go.mod h1:AZOIVSI9UUtQPeJD3zJFp15CEhSjRgAuQP5PWRJrCIQ= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= @@ -3179,8 +3182,8 @@ k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeY k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= -k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= -k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= +k8s.io/component-base v0.33.1 h1:EoJ0xA+wr77T+G8p6T3l4efT2oNwbqBVKR71E0tBIaI= +k8s.io/component-base v0.33.1/go.mod h1:guT/w/6piyPfTgq7gfvgetyXMIh10zuXA6cRRm3rDuY= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -3207,8 +3210,8 @@ k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2R k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us= -k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8= +k8s.io/kubectl v0.33.1 h1:OJUXa6FV5bap6iRy345ezEjU9dTLxqv1zFTVqmeHb6A= +k8s.io/kubectl v0.33.1/go.mod h1:Z07pGqXoP4NgITlPRrnmiM3qnoo1QrK1zjw85Aiz8J0= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -3218,8 +3221,8 @@ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -libvirt.org/go/libvirt v1.11001.0 h1:QJgpslxY7qkpXZIDxdMHpkDl7FfhgQJwqRTGBbg/S8E= -libvirt.org/go/libvirt v1.11001.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= +libvirt.org/go/libvirt v1.11004.0 h1:8iWbiTJzrqQoS+opyowkDeJAWImDx8jb/jGQjo++upM= +libvirt.org/go/libvirt v1.11004.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= diff --git a/hack/benchmark/cpu_usage/auto_pause/chart.go b/hack/benchmark/cpu_usage/auto_pause/chart.go index 295ba645a1..86c618136b 100644 --- a/hack/benchmark/cpu_usage/auto_pause/chart.go +++ b/hack/benchmark/cpu_usage/auto_pause/chart.go @@ -63,11 +63,13 @@ func execute() error { p := plot.New() // Set view options - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)" - } else if runtime.GOOS == "linux" { + case "linux": p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)" } + p.Y.Label.Text = "CPU overhead%" // Open non-autopause csv file of benchmark summary @@ -158,9 +160,10 @@ func execute() error { p.Legend.Top = true // Add x-lay names - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind") - } else if runtime.GOOS == "linux" { + case "linux": p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind") } @@ -223,16 +226,18 @@ func execute() error { p.Add(napl, apl) // Output bar graph - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil { return errors.Wrap(err, "Failed to create bar graph png") } log.Printf("Generated graph png to %s/mac.png", FOLDER) - } else if runtime.GOOS == "linux" { + case "linux": if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil { return errors.Wrap(err, "Failed to create bar graph png") } log.Printf("Generated graph png to %s/linux.png", FOLDER) } + return nil } diff --git a/hack/benchmark/cpu_usage/idle_only/chart.go b/hack/benchmark/cpu_usage/idle_only/chart.go index f3f4e1f759..aaf466b67c 100644 --- a/hack/benchmark/cpu_usage/idle_only/chart.go +++ b/hack/benchmark/cpu_usage/idle_only/chart.go @@ -62,11 +62,13 @@ func execute() error { p := plot.New() // Set view options - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on macOS (less is better)" - } else if runtime.GOOS == "linux" { + case "linux": p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on Linux (less is better)" } + p.Y.Label.Text = "CPU overhead%" // Open csv file of benchmark summary @@ -114,9 +116,10 @@ func execute() error { p.Legend.Top = true // Add x-lay names - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind") - } else if runtime.GOOS == "linux" { + case "linux": p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind") } @@ -151,16 +154,18 @@ func execute() error { p.Add(cl) // Output bar graph - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil { return errors.Wrap(err, "Failed to create bar graph png") } log.Printf("Generated graph png to %s/mac.png", FOLDER) - } else if runtime.GOOS == "linux" { + case "linux": if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil { return errors.Wrap(err, "Failed to create bar graph png") } log.Printf("Generated graph png to %s/linux.png", FOLDER) } + return nil } diff --git a/hack/jenkins/common.ps1 b/hack/jenkins/common.ps1 index fdda3f626f..c9e0563700 100644 --- a/hack/jenkins/common.ps1 +++ b/hack/jenkins/common.ps1 @@ -69,7 +69,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/installers/check # Download gopogh and gotestsum go install github.com/medyagh/gopogh/cmd/gopogh@v0.29.0 -go install gotest.tools/gotestsum@v1.12.2 +go install gotest.tools/gotestsum@v1.12.3 # temporary: remove the old install of gopogh & gotestsum as it's taking priority over our current install, preventing updating if (Test-Path "C:\Go") { Remove-Item "C:\Go" -Recurse -Force diff --git a/hack/jenkins/installers/check_install_gh.sh b/hack/jenkins/installers/check_install_gh.sh index 8adb29c062..68aadef82d 100755 --- a/hack/jenkins/installers/check_install_gh.sh +++ b/hack/jenkins/installers/check_install_gh.sh @@ -16,7 +16,7 @@ set -eux -o pipefail -GH_VERSION="2.72.0" +GH_VERSION="2.74.2" echo "Installing latest version of gh" curl -qLO "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_amd64.tar.gz" diff --git a/hack/jenkins/installers/check_install_gotestsum.sh b/hack/jenkins/installers/check_install_gotestsum.sh index e666d59422..52b1b35d6a 100755 --- a/hack/jenkins/installers/check_install_gotestsum.sh +++ b/hack/jenkins/installers/check_install_gotestsum.sh @@ -18,7 +18,7 @@ set -eux -o pipefail function install_gotestsum() { rm -f $(which gotestsum) - GOBIN="$GOROOT/bin" go install gotest.tools/gotestsum@v1.12.2 + GOBIN="$GOROOT/bin" go install gotest.tools/gotestsum@v1.12.3 } which gotestsum || install_gotestsum diff --git a/hack/metrics/metrics.go b/hack/metrics/metrics.go index 83f61d4e8f..868d6c6324 100644 --- a/hack/metrics/metrics.go +++ b/hack/metrics/metrics.go @@ -145,7 +145,7 @@ func getLabels(containerRuntime string) *stackdriver.Labels { func minikubeStartTime(ctx context.Context, projectID, minikubePath, containerRuntime string) (float64, error) { defer deleteMinikube(ctx, minikubePath) - cmd := exec.CommandContext(ctx, minikubePath, "start", "--driver=docker", "-p", profile, "--memory=2048", "--trace=gcp", fmt.Sprintf("--container-runtime=%s", containerRuntime)) + cmd := exec.CommandContext(ctx, minikubePath, "start", "--driver=docker", "-p", profile, "--memory=3072", "--trace=gcp", fmt.Sprintf("--container-runtime=%s", containerRuntime)) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", pkgtrace.ProjectEnvVar, projectID)) cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr diff --git a/hack/preload-images/kubernetes.go b/hack/preload-images/kubernetes.go index be59927f55..ef5e29186e 100644 --- a/hack/preload-images/kubernetes.go +++ b/hack/preload-images/kubernetes.go @@ -20,7 +20,7 @@ import ( "context" "strings" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "k8s.io/klog/v2" ) diff --git a/hack/update/buildkit_version/update_buildkit_version.go b/hack/update/buildkit_version/update_buildkit_version.go index cfb98dcd69..92a1f2afd9 100644 --- a/hack/update/buildkit_version/update_buildkit_version.go +++ b/hack/update/buildkit_version/update_buildkit_version.go @@ -113,7 +113,7 @@ func updateHashFile(version, arch, filePath string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x buildkit-%s.linux-%s.tar.gz\n", sum, version, arch)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x buildkit-%s.linux-%s.tar.gz\n", sum, version, arch); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/cni_plugins_version/update_cni_plugins_version.go b/hack/update/cni_plugins_version/update_cni_plugins_version.go index ad3fc4b7a7..c8f3d86a22 100644 --- a/hack/update/cni_plugins_version/update_cni_plugins_version.go +++ b/hack/update/cni_plugins_version/update_cni_plugins_version.go @@ -117,7 +117,7 @@ func updateHashFile(version, arch, packagePath string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x cni-plugins-linux-%s-%s.tgz\n", sum, arch, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x cni-plugins-linux-%s-%s.tgz\n", sum, arch, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/containerd_version/update_containerd_version.go b/hack/update/containerd_version/update_containerd_version.go index bfdceed62f..8af4c8a711 100644 --- a/hack/update/containerd_version/update_containerd_version.go +++ b/hack/update/containerd_version/update_containerd_version.go @@ -107,7 +107,7 @@ func updateHashFile(version, arch, folderSuffix string, shaSum [sha256.Size]byte return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/cri-o_version/update_cri-o_version.go b/hack/update/cri-o_version/update_cri-o_version.go index e842881ada..abdb86a81e 100644 --- a/hack/update/cri-o_version/update_cri-o_version.go +++ b/hack/update/cri-o_version/update_cri-o_version.go @@ -107,7 +107,7 @@ func updateHashFile(version string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", sum, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", sum, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/cri_dockerd_version/update_cri_dockerd_version.go b/hack/update/cri_dockerd_version/update_cri_dockerd_version.go index f76f685911..7423172752 100644 --- a/hack/update/cri_dockerd_version/update_cri_dockerd_version.go +++ b/hack/update/cri_dockerd_version/update_cri_dockerd_version.go @@ -135,7 +135,7 @@ func updateHashFile(filePath, commit string, shaSum [sha256.Size]byte) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, commit)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, commit); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/crictl_version/update_crictl_version.go b/hack/update/crictl_version/update_crictl_version.go index 17a07a91e1..72dd12a3a7 100644 --- a/hack/update/crictl_version/update_crictl_version.go +++ b/hack/update/crictl_version/update_crictl_version.go @@ -111,7 +111,7 @@ func updateHashFile(version, arch, packagePath string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x crictl-%s-linux-%s.tar.gz\n", sum, version, arch)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x crictl-%s-linux-%s.tar.gz\n", sum, version, arch); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/crun_version/update_crun_version.go b/hack/update/crun_version/update_crun_version.go index c04f0e24be..039738a4b3 100644 --- a/hack/update/crun_version/update_crun_version.go +++ b/hack/update/crun_version/update_crun_version.go @@ -92,7 +92,7 @@ func updateHashFiles(version string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x crun-%s.tar.gz\n", sum, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x crun-%s.tar.gz\n", sum, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/docker_buildx_version/update_docker_buildx_version.go b/hack/update/docker_buildx_version/update_docker_buildx_version.go index b484736a3a..7ca6062b36 100644 --- a/hack/update/docker_buildx_version/update_docker_buildx_version.go +++ b/hack/update/docker_buildx_version/update_docker_buildx_version.go @@ -105,7 +105,7 @@ func updateHashFile(version, arch, folderSuffix string, shaSum [sha256.Size]byte return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/docker_version/update_docker_version.go b/hack/update/docker_version/update_docker_version.go index 704902e4d8..1dca891e56 100644 --- a/hack/update/docker_version/update_docker_version.go +++ b/hack/update/docker_version/update_docker_version.go @@ -95,7 +95,7 @@ func updateHashFile(version, arch, folderSuffix string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x docker-%s.tgz\n", sum, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x docker-%s.tgz\n", sum, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/github.go b/hack/update/github.go index 3a26586e3c..cac43c9b9b 100644 --- a/hack/update/github.go +++ b/hack/update/github.go @@ -23,7 +23,7 @@ import ( "golang.org/x/mod/semver" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" ) const ( diff --git a/hack/update/golang_version/update_golang_version.go b/hack/update/golang_version/update_golang_version.go index 7096324f15..8f672218b9 100644 --- a/hack/update/golang_version/update_golang_version.go +++ b/hack/update/golang_version/update_golang_version.go @@ -154,7 +154,7 @@ func updateGoHashFile(version string) error { return fmt.Errorf("failed to open go.hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %s go%s.src.tar.gz\n", sha, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %s go%s.src.tar.gz\n", sha, version); err != nil { return fmt.Errorf("failed to write to go.hash file: %v", err) } return nil diff --git a/hack/update/ingress_version/update_ingress_version.go b/hack/update/ingress_version/update_ingress_version.go index f6cbcc2d0f..5fd321e0cc 100644 --- a/hack/update/ingress_version/update_ingress_version.go +++ b/hack/update/ingress_version/update_ingress_version.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" diff --git a/hack/update/kubeadm_constants/update_kubeadm_constants.go b/hack/update/kubeadm_constants/update_kubeadm_constants.go index c5dfee6acb..67d9e93bc8 100644 --- a/hack/update/kubeadm_constants/update_kubeadm_constants.go +++ b/hack/update/kubeadm_constants/update_kubeadm_constants.go @@ -29,7 +29,7 @@ import ( "text/template" "time" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" diff --git a/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go b/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go index 3f40290097..bc7532432c 100644 --- a/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go +++ b/hack/update/kubernetes_versions_list/update_kubernetes_versions_list.go @@ -23,7 +23,7 @@ import ( "sort" "time" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" diff --git a/hack/update/nerdctl_version/update_nerdctl_version.go b/hack/update/nerdctl_version/update_nerdctl_version.go index 5f196255b0..01d2a3a83a 100644 --- a/hack/update/nerdctl_version/update_nerdctl_version.go +++ b/hack/update/nerdctl_version/update_nerdctl_version.go @@ -105,7 +105,7 @@ func updateHashFile(version, arch, packagePath string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x nerdctl-%s-linux-%s.tar.gz\n", sum, version, arch)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x nerdctl-%s-linux-%s.tar.gz\n", sum, version, arch); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/runc_version/update_runc_version.go b/hack/update/runc_version/update_runc_version.go index 27c4263d69..ef89e79ed3 100644 --- a/hack/update/runc_version/update_runc_version.go +++ b/hack/update/runc_version/update_runc_version.go @@ -91,7 +91,7 @@ func updateHashFiles(version string) error { return fmt.Errorf("failed to open hash file: %v", err) } defer f.Close() - if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", sum, version)); err != nil { + if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", sum, version); err != nil { return fmt.Errorf("failed to write to hash file: %v", err) } return nil diff --git a/hack/update/site_node_version/update_site_node_version.go b/hack/update/site_node_version/update_site_node_version.go index c1214482e5..0fbc8f2bc0 100644 --- a/hack/update/site_node_version/update_site_node_version.go +++ b/hack/update/site_node_version/update_site_node_version.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "golang.org/x/mod/semver" "k8s.io/klog/v2" "k8s.io/minikube/hack/update" diff --git a/installers/linux/kvm/Dockerfile.amd64 b/installers/linux/kvm/Dockerfile.amd64 index c55868f317..0d78193790 100644 --- a/installers/linux/kvm/Dockerfile.amd64 +++ b/installers/linux/kvm/Dockerfile.amd64 @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM gcr.io/gcp-runtimes/ubuntu_20_0_4 +FROM ubuntu:22.04 -RUN apt-get update && apt-get install -y --no-install-recommends \ +RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ gcc \ libc6-dev \ make \ diff --git a/installers/linux/kvm/Dockerfile.arm64 b/installers/linux/kvm/Dockerfile.arm64 index 323188a97f..5594e2c066 100644 --- a/installers/linux/kvm/Dockerfile.arm64 +++ b/installers/linux/kvm/Dockerfile.arm64 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:20.04 +FROM ubuntu:22.04 RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ diff --git a/netlify.toml b/netlify.toml index 90d21a0b8d..c49060fe8c 100644 --- a/netlify.toml +++ b/netlify.toml @@ -4,8 +4,8 @@ publish = "site/public/" command = "pwd && cd themes/docsy && npm install && git submodule update -f --init && cd ../.. && hugo" [build.environment] -NODE_VERSION = "20.19.2" -HUGO_VERSION = "v0.147.2" +NODE_VERSION = "20.19.3" +HUGO_VERSION = "v0.147.8" [context.production.environment] HUGO_ENV = "production" diff --git a/pkg/addons/addons_storage_classes.go b/pkg/addons/addons_storage_classes.go index a921294343..45e8648593 100644 --- a/pkg/addons/addons_storage_classes.go +++ b/pkg/addons/addons_storage_classes.go @@ -37,9 +37,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st } class := defaultStorageClassProvisioner - if name == "storage-provisioner-gluster" { + switch name { + case "storage-provisioner-gluster": class = "glusterfile" - } else if name == "storage-provisioner-rancher" { + case "storage-provisioner-rancher": class = "local-path" } diff --git a/pkg/addons/config.go b/pkg/addons/config.go index 5ca855a6bd..dafb8ad723 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -104,6 +104,11 @@ var Addons = []*Addon{ set: SetBool, callbacks: []setFn{EnableOrDisableAddon}, }, + { + name: "kubetail", + set: SetBool, + callbacks: []setFn{EnableOrDisableAddon}, + }, { name: "kubevirt", set: SetBool, diff --git a/pkg/drivers/kic/kic.go b/pkg/drivers/kic/kic.go index 9fae7a457c..6be2547ea8 100644 --- a/pkg/drivers/kic/kic.go +++ b/pkg/drivers/kic/kic.go @@ -456,20 +456,20 @@ func (d *Driver) Stop() error { } } - runtime, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec}) + crMgr, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec}) if err != nil { // won't return error because: // even though we can't stop the cotainers inside, we still wanna stop the minikube container itself klog.Errorf("unable to get container runtime: %v", err) } else { - containers, err := runtime.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces}) + containers, err := crMgr.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces}) if err != nil { klog.Infof("unable list containers : %v", err) } if len(containers) > 0 { - if err := runtime.StopContainers(containers); err != nil { + if err := crMgr.StopContainers(containers); err != nil { klog.Infof("unable to stop containers : %v", err) } - if err := runtime.KillContainers(containers); err != nil { + if err := crMgr.KillContainers(containers); err != nil { klog.Errorf("unable to kill containers : %v", err) } } diff --git a/pkg/drivers/kic/oci/network_create.go b/pkg/drivers/kic/oci/network_create.go index cbdc1076e6..cd38dc785c 100644 --- a/pkg/drivers/kic/oci/network_create.go +++ b/pkg/drivers/kic/oci/network_create.go @@ -109,7 +109,7 @@ func CreateNetwork(ociBin, networkName, subnet, staticIP string) (net.IP, error) return info.gateway, nil } // don't retry if error is not address is taken - if !(errors.Is(err, ErrNetworkSubnetTaken) || errors.Is(err, ErrNetworkGatewayTaken)) { + if !errors.Is(err, ErrNetworkSubnetTaken) && !errors.Is(err, ErrNetworkGatewayTaken) { klog.Errorf("error while trying to create %s network %s %s: %v", ociBin, networkName, subnet.CIDR, err) return nil, fmt.Errorf("un-retryable: %w", err) } diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index 5e761db724..b02fb22397 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -191,9 +191,10 @@ func CreateContainerNode(p CreateParams) error { //nolint to suppress cyclomatic runArgs = append(runArgs, "--ip", p.IP) } - if p.GPUs == "all" || p.GPUs == "nvidia" { + switch p.GPUs { + case "all", "nvidia": runArgs = append(runArgs, "--gpus", "all", "--env", "NVIDIA_DRIVER_CAPABILITIES=all") - } else if p.GPUs == "amd" { + case "amd": /* https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html * "--security-opt seccomp=unconfined" is also required but included above. */ diff --git a/pkg/drivers/kvm/domain.go b/pkg/drivers/kvm/domain.go index 15af5a2ec1..6918db5fc4 100644 --- a/pkg/drivers/kvm/domain.go +++ b/pkg/drivers/kvm/domain.go @@ -31,12 +31,12 @@ import ( func (d *Driver) getDomain() (*libvirt.Domain, *libvirt.Connect, error) { conn, err := getConnection(d.ConnectionURI) if err != nil { - return nil, nil, errors.Wrap(err, "getting libvirt connection") + return nil, nil, fmt.Errorf("failed opening libvirt connection: %w", err) } dom, err := conn.LookupDomainByName(d.MachineName) if err != nil { - return nil, nil, errors.Wrap(err, "looking up domain") + return nil, nil, fmt.Errorf("failed looking up domain: %w", lvErr(err)) } return dom, conn, nil @@ -45,13 +45,17 @@ func (d *Driver) getDomain() (*libvirt.Domain, *libvirt.Connect, error) { func getConnection(connectionURI string) (*libvirt.Connect, error) { conn, err := libvirt.NewConnect(connectionURI) if err != nil { - return nil, errors.Wrap(err, "connecting to libvirt socket") + return nil, fmt.Errorf("failed connecting to libvirt socket: %w", lvErr(err)) } return conn, nil } func closeDomain(dom *libvirt.Domain, conn *libvirt.Connect) error { + if dom == nil { + return fmt.Errorf("nil domain, cannot close") + } + if err := dom.Free(); err != nil { return err } @@ -62,25 +66,31 @@ func closeDomain(dom *libvirt.Domain, conn *libvirt.Connect) error { return err } -func (d *Driver) createDomain() (*libvirt.Domain, error) { - // create the XML for the domain using our domainTmpl template +// defineDomain defines the XML for the domain using our domainTmpl template +func (d *Driver) defineDomain() (*libvirt.Domain, error) { tmpl := template.Must(template.New("domain").Parse(domainTmpl)) var domainXML bytes.Buffer - if err := tmpl.Execute(&domainXML, d); err != nil { + dlog := struct { + Driver + ConsoleLogPath string + }{ + Driver: *d, + ConsoleLogPath: consoleLogPath(*d), + } + if err := tmpl.Execute(&domainXML, dlog); err != nil { return nil, errors.Wrap(err, "executing domain xml") } conn, err := getConnection(d.ConnectionURI) if err != nil { - return nil, errors.Wrap(err, "getting libvirt connection") + return nil, fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() - log.Infof("define libvirt domain using xml: %v", domainXML.String()) - // define the domain in libvirt using the generated XML + log.Infof("defining domain using XML: %v", domainXML.String()) dom, err := conn.DomainDefineXML(domainXML.String()) if err != nil { return nil, errors.Wrapf(err, "error defining domain xml: %s", domainXML.String()) diff --git a/pkg/drivers/kvm/domain_definition_arm64.go b/pkg/drivers/kvm/domain_definition_arm64.go index 4d02acb9d9..b9ba0a27c4 100644 --- a/pkg/drivers/kvm/domain_definition_arm64.go +++ b/pkg/drivers/kvm/domain_definition_arm64.go @@ -27,16 +27,16 @@ const domainTmpl = ` - {{if .Hidden}} + {{- if .Hidden}} - {{end}} + {{- end}} - {{if gt .NUMANodeCount 1}} + {{- if gt .NUMANodeCount 1}} {{.NUMANodeXML}} - {{end}} + {{- end}} hvm @@ -75,12 +75,12 @@ const domainTmpl = ` /dev/random - {{if .GPU}} + {{- if .GPU}} {{.DevicesXML}} - {{end}} - {{if gt .ExtraDisks 0}} + {{- end}} + {{- if gt .ExtraDisks 0}} {{.ExtraDisksXML}} - {{end}} + {{- end}} ` diff --git a/pkg/drivers/kvm/domain_definition_x86.go b/pkg/drivers/kvm/domain_definition_x86.go index 4557cf9055..388ef1c403 100644 --- a/pkg/drivers/kvm/domain_definition_x86.go +++ b/pkg/drivers/kvm/domain_definition_x86.go @@ -27,16 +27,16 @@ const domainTmpl = ` - {{if .Hidden}} + {{- if .Hidden}} - {{end}} + {{- end}} - {{if gt .NUMANodeCount 1}} + {{- if gt .NUMANodeCount 1}} {{.NUMANodeXML}} - {{end}} + {{- end}} hvm @@ -55,6 +55,7 @@ const domainTmpl = ` + @@ -65,19 +66,23 @@ const domainTmpl = ` + + + + /dev/random - {{if .GPU}} + {{- if .GPU}} {{.DevicesXML}} - {{end}} - {{if gt .ExtraDisks 0}} + {{- end}} + {{- if gt .ExtraDisks 0}} {{.ExtraDisksXML}} - {{end}} + {{- end}} ` diff --git a/pkg/drivers/kvm/kvm.go b/pkg/drivers/kvm/kvm.go index 63c6286499..4b3785a553 100644 --- a/pkg/drivers/kvm/kvm.go +++ b/pkg/drivers/kvm/kvm.go @@ -141,11 +141,11 @@ func (d *Driver) GetURL() (string, error) { func (d *Driver) PreCommandCheck() error { conn, err := getConnection(d.ConnectionURI) if err != nil { - return errors.Wrap(err, "getting libvirt connection") + return fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() @@ -167,7 +167,7 @@ func (d *Driver) GetState() (state.State, error) { } defer func() { if err := closeDomain(dom, conn); err != nil { - log.Errorf("unable to close domain: %v", err) + log.Errorf("failed closing domain: %v", err) } }() @@ -225,11 +225,11 @@ func (d *Driver) GetIP() (string, error) { conn, err := getConnection(d.ConnectionURI) if err != nil { - return "", errors.Wrap(err, "getting libvirt connection") + return "", fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() @@ -265,7 +265,7 @@ func (d *Driver) Kill() error { } defer func() { if err := closeDomain(dom, conn); err != nil { - log.Errorf("unable to close domain: %v", err) + log.Errorf("failed closing domain: %v", err) } }() @@ -303,17 +303,47 @@ func (d *Driver) Start() error { } defer func() { if err := closeDomain(dom, conn); err != nil { - log.Errorf("unable to close domain: %v", err) + log.Errorf("failed closing domain: %v", err) + } + }() + + domXML, err := dom.GetXMLDesc(libvirt.DOMAIN_XML_SECURE) + if err != nil { + log.Debugf("failed to get domain XML: %v", lvErr(err)) + } else { + log.Debugf("starting domain XML:\n%s", domXML) + } + + // libvirt/qemu creates a console log file owned by root:root and permissions 0600, + // so we pre-create it (and close it immediately), just to be able to read it later + logPath := consoleLogPath(*d) + f, err := os.Create(logPath) + if err != nil { + log.Debugf("failed to create console log file %q: %v", logPath, err) + } else { + f.Close() + } + // ensure console log file is cleaned up + defer func() { + if _, err := os.Stat(logPath); err == nil { + if err := os.Remove(logPath); err != nil { + log.Debugf("failed removing console log file %q: %v", logPath, err) + } } }() - log.Info("creating domain...") if err := dom.Create(); err != nil { return errors.Wrap(err, "creating domain") } + log.Info("waiting for domain to start...") + if err := d.waitForDomainState(state.Running, 30*time.Second); err != nil { + return errors.Wrap(err, "waiting for domain to start") + } + log.Info("domain is now running") + log.Info("waiting for IP...") - if err := d.waitForStaticIP(conn); err != nil { + if err := d.waitForStaticIP(conn, 90*time.Second); err != nil { return errors.Wrap(err, "waiting for IP") } @@ -325,8 +355,51 @@ func (d *Driver) Start() error { return nil } +// consoleLogPath returns the path to the console log file for the given machine name. +func consoleLogPath(d Driver) string { + // return fmt.Sprintf("%s-console.log", machineName) + return d.ResolveStorePath("console.log") +} + +// waitForDomainState waits maxTime for the domain to reach a target state. +func (d *Driver) waitForDomainState(targetState state.State, maxTime time.Duration) error { + query := func() error { + currentState, err := d.GetState() + if err != nil { + return fmt.Errorf("failed getting domain state: %w", err) + } + + if currentState == targetState { + return nil + } + + log.Debugf("current domain state is %q, will retry", currentState.String()) + return fmt.Errorf("last domain state: %q", currentState.String()) + } + if err := retry.Local(query, maxTime); err != nil { + dumpConsoleLogs(consoleLogPath(*d)) + return fmt.Errorf("timed out waiting %v for domain to reach %q state: %w", maxTime, targetState.String(), err) + } + return nil +} + +// dumpConsoleLogs prints out the console log. +func dumpConsoleLogs(logPath string) { + if _, err := os.Stat(logPath); err != nil { + log.Debugf("failed checking console log file %q: %v", logPath, err) + return + } + + data, err := os.ReadFile(logPath) + if err != nil { + log.Debugf("failed dumping console log file %q: %v", logPath, err) + return + } + log.Debugf("console log:\n%s", data) +} + // waitForStaticIP waits for IP address of domain that has been created & starting and then makes that IP static. -func (d *Driver) waitForStaticIP(conn *libvirt.Connect) error { +func (d *Driver) waitForStaticIP(conn *libvirt.Connect, maxTime time.Duration) error { query := func() error { sip, err := ipFromAPI(conn, d.MachineName, d.PrivateNetwork) if err != nil { @@ -342,8 +415,9 @@ func (d *Driver) waitForStaticIP(conn *libvirt.Connect) error { return nil } - if err := retry.Local(query, 1*time.Minute); err != nil { - return fmt.Errorf("domain %s didn't return IP after 1 minute", d.MachineName) + if err := retry.Local(query, maxTime); err != nil { + dumpConsoleLogs(consoleLogPath(*d)) + return fmt.Errorf("domain %s didn't return IP after %v", d.MachineName, maxTime) } log.Info("reserving static IP address...") @@ -358,7 +432,7 @@ func (d *Driver) waitForStaticIP(conn *libvirt.Connect) error { // Create a host using the driver's config func (d *Driver) Create() error { - log.Info("creating KVM machine...") + log.Info("creating domain...") log.Info("creating network...") if err := d.createNetwork(); err != nil { @@ -418,15 +492,16 @@ func (d *Driver) Create() error { log.Errorf("unable to ensure permissions on %s: %v", store, err) } - log.Info("creating domain...") - - dom, err := d.createDomain() + log.Info("defining domain...") + dom, err := d.defineDomain() if err != nil { - return errors.Wrap(err, "creating domain") + return errors.Wrap(err, "defining domain") } defer func() { - if err := dom.Free(); err != nil { - log.Errorf("unable to free domain: %v", err) + if dom == nil { + log.Warnf("nil domain, cannot free") + } else if err := dom.Free(); err != nil { + log.Errorf("failed freeing %s domain: %v", d.MachineName, lvErr(err)) } }() @@ -434,7 +509,7 @@ func (d *Driver) Create() error { return errors.Wrap(err, "starting domain") } - log.Infof("KVM machine creation complete") + log.Infof("domain creation complete") return nil } @@ -470,28 +545,29 @@ func ensureDirPermissions(store string) error { // Stop a host gracefully or forcefully otherwise. func (d *Driver) Stop() error { + log.Info("stopping domain...") + s, err := d.GetState() if err != nil { - return errors.Wrap(err, "getting domain state") + return fmt.Errorf("getting domain state: %w", err) } if s == state.Stopped { + log.Info("domain already stopped, nothing to do") return nil } - log.Info("stopping domain...") - dom, conn, err := d.getDomain() if err != nil { - return errors.Wrap(err, "getting domain") + return fmt.Errorf("getting domain: %w", err) } defer func() { if err := closeDomain(dom, conn); err != nil { - log.Errorf("unable to close domain: %v", err) + log.Errorf("failed closing domain: %v", err) } }() - log.Info("gracefully shutting down domain...") + log.Info("gracefully shutting domain down...") // ref: https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainShutdownFlags // note: "The order in which the hypervisor tries each shutdown method is undefined, and a hypervisor is not required to support all methods." @@ -508,52 +584,25 @@ func (d *Driver) Stop() error { } if err := dom.Shutdown(); err != nil { - return errors.Wrap(err, "gracefully shutting down domain") + return fmt.Errorf("gracefully shutting domain down: %w", err) } - if s, err = d.waitForStopState(90, "graceful shutdown"); err == nil { - log.Info("domain gracefully shutdown") + if err = d.waitForDomainState(state.Stopped, 90*time.Second); err == nil { + log.Info("domain gracefully shut down") return nil } - // could not get domain state - if s == state.None { - return err - } + log.Warn("failed graceful domain shut down, will try to force-stop") - // at this point shutdown failed, so we try with a little bit of force - log.Warn("waiting for domain graceful shutdown failed, will try to force-stop") if err := d.Kill(); err != nil { - log.Warnf("force-stopping domain request failed: %v", err) - } - - if s, err := d.waitForStopState(30, "force-stop"); err != nil { - return fmt.Errorf("unable to stop domain %s, current state is %q", d.MachineName, s.String()) + return fmt.Errorf("force-stopping domain request failed: %w", err) } - log.Info("domain force-stopped") - - return nil -} - -// waitForStopState waits maxsec for the domain to reach a stopped state. -func (d *Driver) waitForStopState(maxsec int, method string) (state.State, error) { - var s state.State - var err error - for i := 0; i < maxsec; i++ { - if s, err = d.GetState(); err != nil { - return s, errors.Wrap(err, "getting domain state") - } - - if s == state.Stopped { - return state.Stopped, nil - } - - log.Infof("waiting for domain %s %d/%d", method, i, maxsec) - time.Sleep(1 * time.Second) + if err = d.waitForDomainState(state.Stopped, 30*time.Second); err == nil { + log.Info("domain force-stopped") + return nil } - - return s, fmt.Errorf("timed out waiting for domain %s, current state is %q", method, s) + return fmt.Errorf("unable to stop domain: %w", err) } // Remove a host @@ -562,11 +611,11 @@ func (d *Driver) Remove() error { conn, err := getConnection(d.ConnectionURI) if err != nil { - return errors.Wrap(err, "getting libvirt connection") + return fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() diff --git a/pkg/drivers/kvm/network.go b/pkg/drivers/kvm/network.go index 66e947ed5f..6e407c2dee 100644 --- a/pkg/drivers/kvm/network.go +++ b/pkg/drivers/kvm/network.go @@ -39,13 +39,13 @@ const networkTmpl = ` {{.Name}} - {{with .Parameters}} + {{- with .Parameters}} - {{end}} + {{- end}} ` @@ -84,9 +84,15 @@ const firstSubnetAddr = "192.168.39.0" func setupNetwork(conn *libvirt.Connect, name string) error { n, err := conn.LookupNetworkByName(name) if err != nil { - return errors.Wrapf(err, "checking network %s", name) + return fmt.Errorf("failed looking up network %s: %w", name, lvErr(err)) } - defer func() { _ = n.Free() }() + defer func() { + if n == nil { + log.Warnf("nil network, cannot free") + } else if err := n.Free(); err != nil { + log.Errorf("failed freeing %s network: %v", name, lvErr(err)) + } + }() // always ensure autostart is set on the network autostart, err := n.GetAutostart() @@ -104,7 +110,9 @@ func setupNetwork(conn *libvirt.Connect, name string) error { if err != nil { return errors.Wrapf(err, "checking network status for %s", name) } + if !active { + log.Debugf("network %s is not active, trying to start it...", name) if err := n.Create(); err != nil { return errors.Wrapf(err, "starting network %s", name) } @@ -116,11 +124,11 @@ func setupNetwork(conn *libvirt.Connect, name string) error { func (d *Driver) ensureNetwork() error { conn, err := getConnection(d.ConnectionURI) if err != nil { - return errors.Wrap(err, "getting libvirt connection") + return fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() @@ -164,11 +172,11 @@ func (d *Driver) createNetwork() error { conn, err := getConnection(d.ConnectionURI) if err != nil { - return errors.Wrap(err, "getting libvirt connection") + return fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() @@ -176,23 +184,34 @@ func (d *Driver) createNetwork() error { // It is assumed that the libvirt/kvm installation has already created this network netd, err := conn.LookupNetworkByName(d.Network) if err != nil { - return errors.Wrapf(err, "%s KVM network doesn't exist", d.Network) + return fmt.Errorf("failed looking up network %s, cannot continue: %w", d.Network, lvErr(err)) } - log.Debugf("found existing %s KVM network", d.Network) - if netd != nil { - _ = netd.Free() + log.Debugf("found existing %s network", d.Network) + + if netdXML, err := netd.GetXMLDesc(0); err != nil { + log.Debugf("failed getting %s network XML: %v", d.Network, lvErr(err)) + } else { + log.Debug(netdXML) + } + + if err := netd.Free(); err != nil { + log.Errorf("failed freeing %s network: %v", d.Network, lvErr(err)) } // network: private // Only create the private network if it does not already exist - netp, err := conn.LookupNetworkByName(d.PrivateNetwork) - defer func() { - if netp != nil { - _ = netp.Free() + if netp, err := conn.LookupNetworkByName(d.PrivateNetwork); err == nil { + log.Warnf("found existing %s private network, skipping creation", d.PrivateNetwork) + + if netpXML, err := netp.GetXMLDesc(0); err != nil { + log.Debugf("failed getting %s private network XML: %v", d.PrivateNetwork, lvErr(err)) + } else { + log.Debug(netpXML) + } + + if err := netp.Free(); err != nil { + log.Errorf("failed freeing %s private network: %v", d.PrivateNetwork, lvErr(err)) } - }() - if err == nil { - log.Debugf("found existing private KVM network %s", d.PrivateNetwork) return nil } @@ -203,7 +222,7 @@ func (d *Driver) createNetwork() error { var subnet *network.Parameters subnet, err = network.FreeSubnet(subnetAddr, 11, 20) if err != nil { - log.Debugf("failed to find free subnet for private KVM network %s after %d attempts: %v", d.PrivateNetwork, 20, err) + log.Debugf("failed finding free subnet for private network %s after %d attempts: %v", d.PrivateNetwork, 20, err) return fmt.Errorf("un-retryable: %w", err) } @@ -220,37 +239,42 @@ func (d *Driver) createNetwork() error { tmpl := template.Must(template.New("network").Parse(networkTmpl)) var networkXML bytes.Buffer if err = tmpl.Execute(&networkXML, tryNet); err != nil { - return fmt.Errorf("executing private KVM network template: %w", err) + return fmt.Errorf("executing private network template: %w", err) } - log.Debugf("created network xml: %s", networkXML.String()) // define the network using our template - var network *libvirt.Network - network, err = conn.NetworkDefineXML(networkXML.String()) + log.Debugf("defining private network:\n%s", networkXML.String()) + libvirtNet, err := conn.NetworkDefineXML(networkXML.String()) if err != nil { - return fmt.Errorf("defining private KVM network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err) + return fmt.Errorf("defining private network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err) } // and finally create & start it - log.Debugf("trying to create private KVM network %s %s...", d.PrivateNetwork, subnet.CIDR) - if err = network.Create(); err == nil { - log.Debugf("private KVM network %s %s created", d.PrivateNetwork, subnet.CIDR) + log.Debugf("creating private network %s %s...", d.PrivateNetwork, subnet.CIDR) + if err = libvirtNet.Create(); err == nil { + log.Debugf("private network %s %s created", d.PrivateNetwork, subnet.CIDR) + if netpXML, err := libvirtNet.GetXMLDesc(0); err != nil { + log.Debugf("failed getting %s private network XML: %v", d.PrivateNetwork, lvErr(err)) + } else { + log.Debug(netpXML) + } + return nil } - log.Debugf("failed to create private KVM network %s %s, will retry: %v", d.PrivateNetwork, subnet.CIDR, err) + log.Debugf("failed creating private network %s %s, will retry: %v", d.PrivateNetwork, subnet.CIDR, err) subnetAddr = subnet.IP } - return fmt.Errorf("failed to create private KVM network %s: %w", d.PrivateNetwork, err) + return fmt.Errorf("failed creating private network %s: %w", d.PrivateNetwork, err) } func (d *Driver) deleteNetwork() error { conn, err := getConnection(d.ConnectionURI) if err != nil { - return errors.Wrap(err, "getting libvirt connection") + return fmt.Errorf("failed opening libvirt connection: %w", err) } defer func() { if _, err := conn.Close(); err != nil { - log.Errorf("unable to close libvirt connection: %v", err) + log.Errorf("failed closing libvirt connection: %v", lvErr(err)) } }() @@ -263,7 +287,7 @@ func (d *Driver) deleteNetwork() error { // network: private log.Debugf("Checking if network %s exists...", d.PrivateNetwork) - network, err := conn.LookupNetworkByName(d.PrivateNetwork) + libvirtNet, err := conn.LookupNetworkByName(d.PrivateNetwork) if err != nil { if lvErr(err).Code == libvirt.ERR_NO_NETWORK { log.Warnf("Network %s does not exist. Skipping deletion", d.PrivateNetwork) @@ -271,7 +295,14 @@ func (d *Driver) deleteNetwork() error { } return errors.Wrapf(err, "failed looking up network %s", d.PrivateNetwork) } - defer func() { _ = network.Free() }() + defer func() { + if libvirtNet == nil { + log.Warnf("nil network, cannot free") + } else if err := libvirtNet.Free(); err != nil { + log.Errorf("failed freeing %s network: %v", d.PrivateNetwork, lvErr(err)) + } + }() + log.Debugf("Network %s exists", d.PrivateNetwork) err = d.checkDomains(conn) @@ -283,18 +314,18 @@ func (d *Driver) deleteNetwork() error { log.Debugf("Trying to delete network %s...", d.PrivateNetwork) deleteFunc := func() error { - active, err := network.IsActive() + active, err := libvirtNet.IsActive() if err != nil { return err } if active { log.Debugf("Destroying active network %s", d.PrivateNetwork) - if err := network.Destroy(); err != nil { + if err := libvirtNet.Destroy(); err != nil { return err } } log.Debugf("Undefining inactive network %s", d.PrivateNetwork) - return network.Undefine() + return libvirtNet.Undefine() } if err := retry.Local(deleteFunc, 10*time.Second); err != nil { return errors.Wrap(err, "deleting network") @@ -391,23 +422,29 @@ func (d *Driver) checkDomains(conn *libvirt.Connect) error { // addStaticIP appends new host's name, MAC and static IP address record to list of network DHCP leases. // It will return nil if host record already exists. -func addStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error { - l, err := dhcpLease(conn, network, hostname, mac, ip) +func addStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) error { + l, err := dhcpLease(conn, networkName, hostname, mac, ip) if err != nil { - return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", network, hostname, mac, ip, err) + return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", networkName, hostname, mac, ip, err) } if l != nil { - log.Debugf("skip adding static IP to network %s - found existing host DHCP lease matching {name: %q, mac: %q, ip: %q}", network, hostname, mac, ip) + log.Debugf("skip adding static IP to network %s - found existing host DHCP lease matching {name: %q, mac: %q, ip: %q}", networkName, hostname, mac, ip) return nil } - net, err := conn.LookupNetworkByName(network) + libvirtNet, err := conn.LookupNetworkByName(networkName) if err != nil { - return fmt.Errorf("failed looking up network %s: %w", network, err) + return fmt.Errorf("failed looking up network %s: %w", networkName, err) } - defer func() { _ = net.Free() }() + defer func() { + if libvirtNet == nil { + log.Warnf("nil network, cannot free") + } else if err := libvirtNet.Free(); err != nil { + log.Errorf("failed freeing %s network: %v", networkName, lvErr(err)) + } + }() - return net.Update( + return libvirtNet.Update( libvirt.NETWORK_UPDATE_COMMAND_ADD_LAST, libvirt.NETWORK_SECTION_IP_DHCP_HOST, -1, @@ -417,23 +454,29 @@ func addStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error // delStaticIP deletes static IP address record that matches given combination of host's name, MAC and IP from list of network DHCP leases. // It will return nil if record doesn't exist. -func delStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error { - l, err := dhcpLease(conn, network, hostname, mac, ip) +func delStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) error { + l, err := dhcpLease(conn, networkName, hostname, mac, ip) if err != nil { - return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", network, hostname, mac, ip, err) + return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", networkName, hostname, mac, ip, err) } if l == nil { - log.Debugf("skip deleting static IP from network %s - couldn't find host DHCP lease matching {name: %q, mac: %q, ip: %q}", network, hostname, mac, ip) + log.Debugf("skip deleting static IP from network %s - couldn't find host DHCP lease matching {name: %q, mac: %q, ip: %q}", networkName, hostname, mac, ip) return nil } - net, err := conn.LookupNetworkByName(network) + libvirtNet, err := conn.LookupNetworkByName(networkName) if err != nil { - return fmt.Errorf("failed looking up network %s: %w", network, err) + return fmt.Errorf("failed looking up network %s: %w", networkName, err) } - defer func() { _ = net.Free() }() + defer func() { + if libvirtNet == nil { + log.Warnf("nil network, cannot free") + } else if err := libvirtNet.Free(); err != nil { + log.Errorf("failed freeing %s network: %v", networkName, lvErr(err)) + } + }() - return net.Update( + return libvirtNet.Update( libvirt.NETWORK_UPDATE_COMMAND_DELETE, libvirt.NETWORK_SECTION_IP_DHCP_HOST, -1, @@ -442,56 +485,62 @@ func delStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error } // dhcpLease returns network DHCP lease that matches given combination of host's name, MAC and IP. -func dhcpLease(conn *libvirt.Connect, network, hostname, mac, ip string) (lease *libvirt.NetworkDHCPLease, err error) { +func dhcpLease(conn *libvirt.Connect, networkName, hostname, mac, ip string) (lease *libvirt.NetworkDHCPLease, err error) { if hostname == "" && mac == "" && ip == "" { return nil, nil } - net, err := conn.LookupNetworkByName(network) + libvirtNet, err := conn.LookupNetworkByName(networkName) if err != nil { - return nil, fmt.Errorf("failed looking up network %s: %w", network, err) + return nil, fmt.Errorf("failed looking up network %s: %w", networkName, err) } - defer func() { _ = net.Free() }() + defer func() { + if libvirtNet == nil { + log.Warnf("nil network, cannot free") + } else if err := libvirtNet.Free(); err != nil { + log.Errorf("failed freeing %s network: %v", networkName, lvErr(err)) + } + }() - leases, err := net.GetDHCPLeases() + leases, err := libvirtNet.GetDHCPLeases() if err != nil { return nil, fmt.Errorf("failed getting host DHCP leases: %w", err) } for _, l := range leases { if (hostname == "" || hostname == l.Hostname) && (mac == "" || mac == l.Mac) && (ip == "" || ip == l.IPaddr) { - log.Debugf("found host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s: %+v", hostname, mac, ip, network, l) + log.Debugf("found host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s: %+v", hostname, mac, ip, networkName, l) return &l, nil } } - log.Debugf("unable to find host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s", hostname, mac, ip, network) + log.Debugf("unable to find host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s", hostname, mac, ip, networkName) return nil, nil } // ipFromAPI returns current primary IP address of domain interface in network. -func ipFromAPI(conn *libvirt.Connect, domain, network string) (string, error) { - mac, err := macFromXML(conn, domain, network) +func ipFromAPI(conn *libvirt.Connect, domain, networkName string) (string, error) { + mac, err := macFromXML(conn, domain, networkName) if err != nil { return "", fmt.Errorf("failed getting MAC address: %w", err) } ifaces, err := ifListFromAPI(conn, domain) if err != nil { - return "", fmt.Errorf("failed getting network %s interfaces using API of domain %s: %w", network, domain, err) + return "", fmt.Errorf("failed getting network %s interfaces using API of domain %s: %w", networkName, domain, err) } for _, i := range ifaces { if i.Hwaddr == mac { if i.Addrs != nil { - log.Debugf("domain %s has current primary IP address %s and MAC address %s in network %s", domain, i.Addrs[0].Addr, mac, network) + log.Debugf("domain %s has current primary IP address %s and MAC address %s in network %s", domain, i.Addrs[0].Addr, mac, networkName) return i.Addrs[0].Addr, nil } - log.Debugf("domain %s with MAC address %s doesn't have current IP address in network %s: %+v", domain, mac, network, i) + log.Debugf("domain %s with MAC address %s doesn't have current IP address in network %s: %+v", domain, mac, networkName, i) return "", nil } } - log.Debugf("unable to find current IP address of domain %s in network %s", domain, network) + log.Debugf("unable to find current IP address of domain %s in network %s (interfaces detected: %+v)", domain, networkName, ifaces) return "", nil } @@ -499,22 +548,27 @@ func ipFromAPI(conn *libvirt.Connect, domain, network string) (string, error) { func ifListFromAPI(conn *libvirt.Connect, domain string) ([]libvirt.DomainInterface, error) { dom, err := conn.LookupDomainByName(domain) if err != nil { - return nil, fmt.Errorf("failed looking up domain %s: %w", domain, err) + return nil, fmt.Errorf("failed looking up domain %s: %w", domain, lvErr(err)) } - defer func() { _ = dom.Free() }() + defer func() { + if dom == nil { + log.Warnf("nil domain, cannot free") + } else if err := dom.Free(); err != nil { + log.Errorf("failed freeing %s domain: %v", domain, lvErr(err)) + } + }() - ifs, err := dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_ARP) - if ifs == nil { + ifs, err := dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE) + if len(ifs) == 0 { if err != nil { - log.Debugf("failed listing network interface addresses of domain %s(source=arp): %w", domain, err) + log.Debugf("failed listing network interface addresses of domain %s (source=lease): %v", domain, lvErr(err)) } else { - log.Debugf("No network interface addresses found for domain %s(source=arp)", domain) + log.Debugf("no network interface addresses found for domain %s (source=lease)", domain) } - log.Debugf("trying to list again with source=lease") + log.Debugf("trying to list again with source=arp") - ifs, err = dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE) - if err != nil { - return nil, fmt.Errorf("failed listing network interface addresses of domain %s(source=lease): %w", domain, err) + if ifs, err = dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_ARP); err != nil { + return nil, fmt.Errorf("failed listing network interface addresses of domain %s (source=arp): %w", domain, lvErr(err)) } } @@ -522,40 +576,40 @@ func ifListFromAPI(conn *libvirt.Connect, domain string) ([]libvirt.DomainInterf } // ipFromXML returns defined IP address of interface in network. -func ipFromXML(conn *libvirt.Connect, domain, network string) (string, error) { - mac, err := macFromXML(conn, domain, network) +func ipFromXML(conn *libvirt.Connect, domain, networkName string) (string, error) { + mac, err := macFromXML(conn, domain, networkName) if err != nil { return "", fmt.Errorf("failed getting MAC address: %w", err) } - lease, err := dhcpLease(conn, network, "", mac, "") + lease, err := dhcpLease(conn, networkName, "", mac, "") if err != nil { - return "", fmt.Errorf("failed looking up network %s for host DHCP lease {name: , mac: %q, ip: }: %w", network, mac, err) + return "", fmt.Errorf("failed looking up network %s for host DHCP lease {name: , mac: %q, ip: }: %w", networkName, mac, err) } if lease == nil { - log.Debugf("unable to find defined IP address of network %s interface with MAC address %s", network, mac) + log.Debugf("unable to find defined IP address of network %s interface with MAC address %s", networkName, mac) return "", nil } - log.Debugf("domain %s has defined IP address %s and MAC address %s in network %s", domain, lease.IPaddr, mac, network) + log.Debugf("domain %s has defined IP address %s and MAC address %s in network %s", domain, lease.IPaddr, mac, networkName) return lease.IPaddr, nil } // macFromXML returns defined MAC address of interface in network from domain XML. -func macFromXML(conn *libvirt.Connect, domain, network string) (string, error) { +func macFromXML(conn *libvirt.Connect, domain, networkName string) (string, error) { domIfs, err := ifListFromXML(conn, domain) if err != nil { - return "", fmt.Errorf("failed getting network %s interfaces using XML of domain %s: %w", network, domain, err) + return "", fmt.Errorf("failed getting network %s interfaces using XML of domain %s: %w", networkName, domain, err) } for _, i := range domIfs { - if i.Source.Network == network { - log.Debugf("domain %s has defined MAC address %s in network %s", domain, i.Mac.Address, network) + if i.Source.Network == networkName { + log.Debugf("domain %s has defined MAC address %s in network %s", domain, i.Mac.Address, networkName) return i.Mac.Address, nil } } - return "", fmt.Errorf("unable to get defined MAC address of network %s interface using XML of domain %s: network %s not found", network, domain, network) + return "", fmt.Errorf("unable to get defined MAC address of network %s interface using XML of domain %s: network %s not found", networkName, domain, networkName) } // ifListFromXML returns defined domain interfaces from domain XML. @@ -564,7 +618,13 @@ func ifListFromXML(conn *libvirt.Connect, domain string) ([]kvmIface, error) { if err != nil { return nil, fmt.Errorf("failed looking up domain %s: %w", domain, err) } - defer func() { _ = dom.Free() }() + defer func() { + if dom == nil { + log.Warnf("nil domain, cannot free") + } else if err := dom.Free(); err != nil { + log.Errorf("failed freeing %s domain: %v", domain, lvErr(err)) + } + }() domXML, err := dom.GetXMLDesc(0) if err != nil { diff --git a/pkg/drivers/qemu/qemu.go b/pkg/drivers/qemu/qemu.go index 08b6ff1ad2..141f1f9356 100644 --- a/pkg/drivers/qemu/qemu.go +++ b/pkg/drivers/qemu/qemu.go @@ -54,6 +54,7 @@ import ( const ( isoFilename = "boot2docker.iso" + serialFileName = "serial.log" privateNetworkName = "docker-machines" defaultSSHUser = "docker" @@ -464,6 +465,10 @@ func (d *Driver) Start() error { "virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=config-2") } + serialPath := d.ResolveStorePath(serialFileName) + startCmd = append(startCmd, + "-serial", fmt.Sprintf("file:%s", serialPath)) + for i := 0; i < d.ExtraDisks; i++ { // use a higher index for extra disks to reduce ID collision with current or future // low-indexed devices (e.g., firmware, ISO CDROM, cloud config, and network device) diff --git a/pkg/drivers/vfkit/vfkit.go b/pkg/drivers/vfkit/vfkit.go index e37408ba06..9c7e4fb551 100644 --- a/pkg/drivers/vfkit/vfkit.go +++ b/pkg/drivers/vfkit/vfkit.go @@ -53,10 +53,12 @@ import ( ) const ( - isoFilename = "boot2docker.iso" - pidFileName = "vfkit.pid" - sockFilename = "vfkit.sock" - defaultSSHUser = "docker" + isoFilename = "boot2docker.iso" + pidFileName = "vfkit.pid" + sockFilename = "vfkit.sock" + serialFileName = "serial.log" + efiVarsFileName = "vfkit.efivars" + defaultSSHUser = "docker" ) // Driver is the machine driver for vfkit (Virtualization.framework) @@ -67,7 +69,6 @@ type Driver struct { DiskSize int CPU int Memory int - Cmdline string ExtraDisks int Network string // "", "nat", "vmnet-shared" MACAddress string // For network=nat, network="" @@ -189,12 +190,6 @@ func (d *Driver) Create() error { if err := b2dutils.CopyIsoToMachineDir(d.Boot2DockerURL, d.MachineName); err != nil { return err } - isoPath := d.ResolveStorePath(isoFilename) - - log.Info("Extracting Kernel...") - if err := d.extractKernel(isoPath); err != nil { - return err - } log.Info("Creating SSH key...") if err := ssh.GenerateSSHKey(d.sshKeyPath()); err != nil { @@ -256,9 +251,10 @@ func (d *Driver) startVfkit(socketPath string) error { "--memory", fmt.Sprintf("%d", d.Memory), "--cpus", fmt.Sprintf("%d", d.CPU), "--restful-uri", fmt.Sprintf("unix://%s", d.sockfilePath())) - var isoPath = filepath.Join(machineDir, isoFilename) + + efiVarsPath := d.ResolveStorePath(efiVarsFileName) startCmd = append(startCmd, - "--device", fmt.Sprintf("virtio-blk,path=%s", isoPath)) + "--bootloader", fmt.Sprintf("efi,variable-store=%s,create", efiVarsPath)) if socketPath != "" { // The guest will be able to access other guests in the vmnet network. @@ -273,20 +269,21 @@ func (d *Driver) startVfkit(socketPath string) error { startCmd = append(startCmd, "--device", "virtio-rng") + var isoPath = filepath.Join(machineDir, isoFilename) startCmd = append(startCmd, - "--kernel", d.ResolveStorePath("bzimage")) - startCmd = append(startCmd, - "--kernel-cmdline", d.Cmdline) + "--device", fmt.Sprintf("virtio-blk,path=%s", isoPath)) + startCmd = append(startCmd, - "--initrd", d.ResolveStorePath("initrd")) + "--device", fmt.Sprintf("virtio-blk,path=%s", d.diskPath())) for i := 0; i < d.ExtraDisks; i++ { startCmd = append(startCmd, "--device", fmt.Sprintf("virtio-blk,path=%s", pkgdrivers.ExtraDiskPath(d.BaseDriver, i))) } + serialPath := d.ResolveStorePath(serialFileName) startCmd = append(startCmd, - "--device", fmt.Sprintf("virtio-blk,path=%s", d.diskPath())) + "--device", fmt.Sprintf("virtio-serial,logFilePath=%s", serialPath)) log.Debugf("executing: vfkit %s", strings.Join(startCmd, " ")) os.Remove(d.sockfilePath()) @@ -410,22 +407,6 @@ func (d *Driver) Restart() error { return d.Start() } -func (d *Driver) extractKernel(isoPath string) error { - for _, f := range []struct { - pathInIso string - destPath string - }{ - {"/boot/bzimage", "bzimage"}, - {"/boot/initrd", "initrd"}, - } { - fullDestPath := d.ResolveStorePath(f.destPath) - if err := pkgdrivers.ExtractFile(isoPath, f.pathInIso, fullDestPath); err != nil { - return err - } - } - return nil -} - func (d *Driver) killVfkit() error { if err := d.SetVFKitState("HardStop"); err != nil { // Typically fails with EOF due to https://github.com/crc-org/vfkit/issues/277. diff --git a/pkg/drivers/vmnet/vmnet.go b/pkg/drivers/vmnet/vmnet.go index 0bfe654304..eb0f8c44bc 100644 --- a/pkg/drivers/vmnet/vmnet.go +++ b/pkg/drivers/vmnet/vmnet.go @@ -55,6 +55,11 @@ type Helper struct { // will obtain the same MAC address from vmnet. InterfaceID string + // Offloading is required for krunkit, doss not work with vfkit. + // We must use this until libkrun add support for disabling offloading: + // https://github.com/containers/libkrun/issues/264 + Offloading bool + // Set when vmnet interface is started. macAddress string } @@ -115,13 +120,18 @@ func ValidateHelper() error { // machine. The helper will create a unix datagram socket at the specfied path. // The client (e.g. vfkit) will connect to this socket. func (h *Helper) Start(socketPath string) error { - cmd := exec.Command( - "sudo", + args := []string{ "--non-interactive", executablePath, "--socket", socketPath, "--interface-id", h.InterfaceID, - ) + } + + if h.Offloading { + args = append(args, "--enable-tso", "--enable-checksum-offload") + } + + cmd := exec.Command("sudo", args...) // Create vmnet-helper in a new process group so it is not harmed when // terminating the minikube process group. diff --git a/pkg/generate/docs.go b/pkg/generate/docs.go index 42230c7540..bdfac319a7 100644 --- a/pkg/generate/docs.go +++ b/pkg/generate/docs.go @@ -97,17 +97,17 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer) error { buf.WriteString(long + "\n\n") if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```shell\n%s\n```\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "```shell\n%s\n```\n\n", cmd.UseLine()) } if len(cmd.Aliases) > 0 { buf.WriteString("### Aliases\n\n") - buf.WriteString(fmt.Sprintf("%s\n\n", cmd.Aliases)) + fmt.Fprintf(buf, "%s\n\n", cmd.Aliases) } if len(cmd.Example) > 0 { buf.WriteString("### Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example) } if err := printOptions(buf, cmd); err != nil { diff --git a/pkg/generate/errorcodes.go b/pkg/generate/errorcodes.go index a81decdce4..145d92d47d 100644 --- a/pkg/generate/errorcodes.go +++ b/pkg/generate/errorcodes.go @@ -83,7 +83,7 @@ func ErrorCodes(docPath string, pathsToCheck []string) error { // This is the numeric code of the error, e.g. 80 for ExGuest Error code := s.Value - buf.WriteString(fmt.Sprintf("%s: %s \n", code, currentError)) + fmt.Fprintf(buf, "%s: %s \n", code, currentError) } return true }) @@ -100,7 +100,7 @@ func ErrorCodes(docPath string, pathsToCheck []string) error { currentNode = id.Name if strings.HasPrefix(currentNode, "Ex") && currentNode != "ExitCode" { // We have all the info we're going to get on this error, print it out - buf.WriteString(fmt.Sprintf("%s (Exit code %v) \n", currentID, currentNode)) + fmt.Fprintf(buf, "%s (Exit code %v) \n", currentID, currentNode) if currentComment != "" { buf.WriteString(currentComment + " \n") } diff --git a/pkg/kapi/kapi.go b/pkg/kapi/kapi.go index c7f2786017..4b34297c37 100644 --- a/pkg/kapi/kapi.go +++ b/pkg/kapi/kapi.go @@ -48,21 +48,21 @@ var ( ) // ClientConfig returns the client configuration for a kubectl context -func ClientConfig(context string) (*rest.Config, error) { +func ClientConfig(ctx string) (*rest.Config, error) { loader := clientcmd.NewDefaultClientConfigLoadingRules() - cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: context}) + cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: ctx}) c, err := cc.ClientConfig() if err != nil { return nil, fmt.Errorf("client config: %v", err) } c = proxy.UpdateTransport(c) - klog.V(1).Infof("client config for %s: %+v", context, c) + klog.V(1).Infof("client config for %s: %+v", ctx, c) return c, nil } // Client gets the Kubernetes client for a kubectl context name -func Client(context string) (*kubernetes.Clientset, error) { - c, err := ClientConfig(context) +func Client(ctx string) (*kubernetes.Clientset, error) { + c, err := ClientConfig(ctx) if err != nil { return nil, err } diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 54400b4d71..60a632a58d 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -277,11 +277,11 @@ var Addons = map[string]*Addon{ "0640"), }, false, "ingress", "Kubernetes", "", "https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/", map[string]string{ // https://github.com/kubernetes/ingress-nginx/blob/3476232f5c38383dd157ddaff3b4c7cebd57284e/deploy/static/provider/kind/deploy.yaml#L445 - "IngressController": "ingress-nginx/controller:v1.12.2@sha256:03497ee984628e95eca9b2279e3f3a3c1685dd48635479e627d219f00c8eefa9", + "IngressController": "ingress-nginx/controller:v1.12.3@sha256:ac444cd9515af325ba577b596fe4f27a34be1aa330538e8b317ad9d6c8fb94ee", // https://github.com/kubernetes/ingress-nginx/blob/3476232f5c38383dd157ddaff3b4c7cebd57284e/deploy/static/provider/kind/deploy.yaml#L552 - "KubeWebhookCertgenCreate": "ingress-nginx/kube-webhook-certgen:v1.5.3@sha256:2cf4ebfa82a37c357455458f6dfc334aea1392d508270b2517795a9933a02524", + "KubeWebhookCertgenCreate": "ingress-nginx/kube-webhook-certgen:v1.5.4@sha256:7a38cf0f8480775baaee71ab519c7465fd1dfeac66c421f28f087786e631456e", // https://github.com/kubernetes/ingress-nginx/blob/3476232f5c38383dd157ddaff3b4c7cebd57284e/deploy/static/provider/kind/deploy.yaml#L601 - "KubeWebhookCertgenPatch": "ingress-nginx/kube-webhook-certgen:v1.5.3@sha256:2cf4ebfa82a37c357455458f6dfc334aea1392d508270b2517795a9933a02524", + "KubeWebhookCertgenPatch": "ingress-nginx/kube-webhook-certgen:v1.5.4@sha256:7a38cf0f8480775baaee71ab519c7465fd1dfeac66c421f28f087786e631456e", }, map[string]string{ "IngressController": "registry.k8s.io", "KubeWebhookCertgenCreate": "registry.k8s.io", @@ -310,7 +310,7 @@ var Addons = map[string]*Addon{ MustBinAsset(addons.InspektorGadgetAssets, "inspektor-gadget/ig-deployment.yaml.tmpl", vmpath.GuestAddonsDir, "ig-deployment.yaml", "0640"), }, false, "inspektor-gadget", "3rd party (inspektor-gadget.io)", "https://github.com/orgs/inspektor-gadget/people", "https://minikube.sigs.k8s.io/docs/handbook/addons/inspektor-gadget/", map[string]string{ - "InspektorGadget": "inspektor-gadget/inspektor-gadget:v0.40.0@sha256:8675a014e349eb928dadd8109fd631595c645bb7efa226710cc5bbb85e0fcb6a", + "InspektorGadget": "inspektor-gadget/inspektor-gadget:v0.41.0@sha256:1ba1900f625d235ee85737a948b363f620b2494f0963eb06c39898f37e470469", }, map[string]string{ "InspektorGadget": "ghcr.io", }), @@ -322,7 +322,7 @@ var Addons = map[string]*Addon{ "0640"), }, false, "kong", "3rd party (Kong HQ)", "@gAmUssA", "https://minikube.sigs.k8s.io/docs/handbook/addons/kong-ingress/", map[string]string{ "Kong": "kong:3.9.0@sha256:0f5de480cfa95c612dcedf707272c13900e8d10e435b5e5bf57d950c87620268", - "KongIngress": "kong/kubernetes-ingress-controller:3.4.5@sha256:770676763f40edf8f9d0ccad74ddc114877dc4aaf018a964cdd164afa10effb3", + "KongIngress": "kong/kubernetes-ingress-controller:3.4.7@sha256:b38ef1b431f63261d6d5ddd4340661f9d41f135eb42d29349d695ad5817c9284", }, map[string]string{ "Kong": "docker.io", "KongIngress": "docker.io", @@ -334,7 +334,7 @@ var Addons = map[string]*Addon{ "pod.yaml", "0640"), }, false, "kubevirt", "3rd party (KubeVirt)", "", "https://minikube.sigs.k8s.io/docs/handbook/addons/kubevirt/", map[string]string{ - "Kubectl": "bitnami/kubectl:1.33.1@sha256:35f792b0f0b8b3072bb01cd50a23d2dc1ba2488eed70a1a951a1789a8e3bc994", + "Kubectl": "bitnami/kubectl:1.33.1@sha256:9081a6f83f4febf47369fc46b6f0f7683c7db243df5b43fc9defe51b0471a950", }, map[string]string{ "Kubectl": "docker.io", }), @@ -597,7 +597,7 @@ var Addons = map[string]*Addon{ "gcp-auth-webhook.yaml", "0640"), }, false, "gcp-auth", "Google", "", "https://minikube.sigs.k8s.io/docs/handbook/addons/gcp-auth/", map[string]string{ - "KubeWebhookCertgen": "ingress-nginx/kube-webhook-certgen:v1.5.3@sha256:2cf4ebfa82a37c357455458f6dfc334aea1392d508270b2517795a9933a02524", + "KubeWebhookCertgen": "ingress-nginx/kube-webhook-certgen:v1.5.4@sha256:7a38cf0f8480775baaee71ab519c7465fd1dfeac66c421f28f087786e631456e", "GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.1.3@sha256:94f0c448171b974aab7b4a96d00feb5799b1d69827a738a4f8b4b30c17fb74e7", }, map[string]string{ "GCPAuthWebhook": "gcr.io", @@ -610,9 +610,9 @@ var Addons = map[string]*Addon{ "volcano-deployment.yaml", "0640"), }, false, "volcano", "third-party (volcano)", "hwdef", "", map[string]string{ - "vc_webhook_manager": "volcanosh/vc-webhook-manager:v1.11.2@sha256:e3dd5fc9c8af79bfa2182ccd4f48ba0a87c2047d3bdd59dd415288bf19c80ddc", - "vc_controller_manager": "volcanosh/vc-controller-manager:v1.11.2@sha256:ef164e8b3061838a315442ad9ffeb9699a636a73123c5269665fd7aeab06757c", - "vc_scheduler": "volcanosh/vc-scheduler:v1.11.2@sha256:5b77f6b38127db41afe4a38bbf585fa2ea5555998459d34f4f1691233f506121", + "vc_webhook_manager": "volcanosh/vc-webhook-manager:v1.12.1@sha256:f8b50088a7329220cbdcc624067943a76a005bb18bda77647e618aab26cf759d", + "vc_controller_manager": "volcanosh/vc-controller-manager:v1.12.1@sha256:3815883c32f62c3a60b8208ba834f304d91d8f05cddfabd440aa15f7f8bef296", + "vc_scheduler": "volcanosh/vc-scheduler:v1.12.1@sha256:b24ea8af2d167a3525e8fc603b32eca6c9b46ef509fa7e87f09e1fadb992faf2", }, map[string]string{ "vc_webhook_manager": "docker.io", "vc_controller_manager": "docker.io", @@ -769,7 +769,7 @@ var Addons = map[string]*Addon{ "cloud-spanner": NewAddon([]*BinAsset{ MustBinAsset(addons.CloudSpanner, "cloud-spanner/deployment.yaml.tmpl", vmpath.GuestAddonsDir, "deployment.yaml", "0640"), }, false, "cloud-spanner", "Google", "", "https://minikube.sigs.k8s.io/docs/handbook/addons/cloud-spanner/", map[string]string{ - "CloudSpanner": "cloud-spanner-emulator/emulator:1.5.34@sha256:f98725ceb484500d858d17916ea4a04e2a83184b5a080a87113770e82c177744", + "CloudSpanner": "cloud-spanner-emulator/emulator:1.5.35@sha256:aee284a39a132636143b2646c38dfd71d366b9d791d1ed1e83c9dd241687f08a", }, map[string]string{ "CloudSpanner": "gcr.io", }), @@ -798,6 +798,21 @@ var Addons = map[string]*Addon{ map[string]string{ "Yakd": "docker.io", }), + "kubetail": NewAddon([]*BinAsset{ + MustBinAsset(addons.KubetailAssets, "kubetail/kubetail-namespace.yaml", vmpath.GuestAddonsDir, "kubetail-namespace.yaml", "0640"), + MustBinAsset(addons.KubetailAssets, "kubetail/kubetail-dashboard.yaml.tmpl", vmpath.GuestAddonsDir, "kubetail-dashboard.yaml", "0640"), + MustBinAsset(addons.KubetailAssets, "kubetail/kubetail-cluster-api.yaml.tmpl", vmpath.GuestAddonsDir, "kubetail-cluster-api.yaml", "0640"), + MustBinAsset(addons.KubetailAssets, "kubetail/kubetail-cluster-agent.yaml.tmpl", vmpath.GuestAddonsDir, "kubetail-cluster-agent.yaml", "0640"), + MustBinAsset(addons.KubetailAssets, "kubetail/kubetail-cli.yaml", vmpath.GuestAddonsDir, "kubetail-cli.yaml", "0640"), + }, false, "kubetail", "3rd party (kubetail.com)", "amorey", "https://minikube.sigs.k8s.io/docs/handbook/addons/kubetail/", + map[string]string{ + "KubetailDashboard": "kubetail/kubetail-dashboard:0.6.0@sha256:fc8d01805c09f2ad3f5a2c94016e399ece4c03ff7275dc007a213281087490ac", + "KubetailClusterAPI": "kubetail/kubetail-cluster-api:0.4.0@sha256:fec3154c589a31493f14ca5ecbbc48d3ad7bab6b5e30dcddabc2457bd297dae7", + "KubetailClusterAgent": "kubetail/kubetail-cluster-agent:0.4.0@sha256:5363ca1a5394943aa6bf4c160860a8dc616c3e939d2006cfa902f8863e182bae", + }, + map[string]string{ + "Kubetail": "docker.io", + }), } // parseMapString creates a map based on `str` which is encoded as =,=,... diff --git a/pkg/minikube/assets/vm_assets.go b/pkg/minikube/assets/vm_assets.go index faa573aa4c..f539a94882 100644 --- a/pkg/minikube/assets/vm_assets.go +++ b/pkg/minikube/assets/vm_assets.go @@ -20,7 +20,7 @@ import ( "bytes" "embed" "fmt" - "html/template" + "text/template" "io" "os" "path" diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 23f2cb3788..3354f6bd82 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -579,8 +579,8 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error { // canRead returns true if the file represented // by path exists and is readable, otherwise false. -func canRead(path string) bool { - f, err := os.Open(path) +func canRead(filePath string) bool { + f, err := os.Open(filePath) if err != nil { return false } diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index f139e18a5b..40d612e2ce 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -164,9 +164,11 @@ func auxiliary(mirror string) []string { func storageProvisioner(mirror string) string { cv := version.GetStorageProvisionerVersion() in := "k8s-minikube/storage-provisioner:" + cv - if mirror == "" { + + switch mirror { + case "": mirror = "gcr.io" - } else if mirror == constants.AliyunMirror { + case constants.AliyunMirror: in = "storage-provisioner:" + cv } return path.Join(mirror, in) @@ -183,7 +185,7 @@ func KindNet(repo string) string { } // all calico images are from https://github.com/projectcalico/calico/blob/master/manifests/calico.yaml -const calicoVersion = "v3.30.0" +const calicoVersion = "v3.30.2" const calicoRepo = "docker.io/calico" // CalicoDaemonSet returns the image used for calicoDaemonSet diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index fc6c25de15..04c67b6362 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -172,7 +172,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) { // init initialises primary control-plane using kubeadm. func (k *Bootstrapper) init(cfg config.ClusterConfig) error { - version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) + ver, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing Kubernetes version") } @@ -195,7 +195,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { "Swap", // For "none" users who have swap configured "NumCPU", // For "none" users who have too few CPUs } - if version.GE(semver.MustParse("1.20.0")) { + if ver.GE(semver.MustParse("1.20.0")) { ignore = append(ignore, "Mem", // For "none" users who have too little memory ) @@ -719,7 +719,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro // and by that time we would exit completely, so we wait until kubelet begins restarting pods klog.Info("waiting for restarted kubelet to initialise ...") start := time.Now() - wait := func() error { + waitFunc := func() error { pods, err := client.CoreV1().Pods(meta.NamespaceSystem).List(context.Background(), meta.ListOptions{LabelSelector: "tier=control-plane"}) if err != nil { return err @@ -731,7 +731,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro } return fmt.Errorf("kubelet not initialised") } - _ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute) + _ = retry.Expo(waitFunc, 250*time.Millisecond, 1*time.Minute) klog.Infof("kubelet initialised") klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start)) } @@ -784,11 +784,11 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) // avoid "Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock" error - version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) + ver, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) if err != nil { return "", errors.Wrap(err, "parsing Kubernetes version") } - cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: version}) + cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: ver}) if err != nil { klog.Errorf("cruntime: %v", err) } @@ -840,11 +840,11 @@ func StopKubernetes(runner command.Runner, cr cruntime.Manager) { // DeleteCluster removes the components that were started earlier func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { - version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) + ver, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing Kubernetes version") } - cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket, KubernetesVersion: version}) + cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket, KubernetesVersion: ver}) if err != nil { return errors.Wrap(err, "runtime") } @@ -852,7 +852,7 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion) sp := cr.SocketPath() cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp) - if version.LT(semver.MustParse("1.11.0")) { + if ver.LT(semver.MustParse("1.11.0")) { cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp) } @@ -874,12 +874,12 @@ func (k *Bootstrapper) SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCm func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { klog.Infof("updating cluster %+v ...", cfg) - images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) + imgs, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "kubeadm images") } - version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) + ver, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing Kubernetes version") } @@ -887,7 +887,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cfg.KubernetesConfig.CRISocket, - KubernetesVersion: version, + KubernetesVersion: ver, }) if err != nil { return errors.Wrap(err, "runtime") @@ -903,7 +903,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { } if cfg.KubernetesConfig.ShouldLoadCachedImages { - if err := machine.LoadCachedImages(&cfg, k.c, images, detect.ImageCacheDir(), false); err != nil { + if err := machine.LoadCachedImages(&cfg, k.c, imgs, detect.ImageCacheDir(), false); err != nil { out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err}) } } diff --git a/pkg/minikube/cluster/ip.go b/pkg/minikube/cluster/ip.go index 1f28d50c20..0e64bae991 100644 --- a/pkg/minikube/cluster/ip.go +++ b/pkg/minikube/cluster/ip.go @@ -34,21 +34,21 @@ import ( ) // HostIP gets the ip address to be used for mapping host -> VM and VM -> host -func HostIP(host *host.Host, clusterName string) (net.IP, error) { - switch host.DriverName { +func HostIP(hostInfo *host.Host, clusterName string) (net.IP, error) { + switch hostInfo.DriverName { case driver.Docker: - return oci.RoutableHostIPFromInside(oci.Docker, clusterName, host.Name) + return oci.RoutableHostIPFromInside(oci.Docker, clusterName, hostInfo.Name) case driver.Podman: - return oci.RoutableHostIPFromInside(oci.Podman, clusterName, host.Name) + return oci.RoutableHostIPFromInside(oci.Podman, clusterName, hostInfo.Name) case driver.SSH: - ip, err := host.Driver.GetIP() + ip, err := hostInfo.Driver.GetIP() if err != nil { return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address") } return net.ParseIP(ip), nil case driver.KVM2: // `host.Driver.GetIP` returns dhcp lease info for a given network(=`virsh net-dhcp-leases minikube-net`) - vmIPString, err := host.Driver.GetIP() + vmIPString, err := hostInfo.Driver.GetIP() if err != nil { return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address") } @@ -59,7 +59,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) { } return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil case driver.QEMU, driver.QEMU2: - ipString, err := host.Driver.GetIP() + ipString, err := hostInfo.Driver.GetIP() if err != nil { return []byte{}, errors.Wrap(err, "Error getting IP address") } @@ -70,7 +70,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) { // socket_vmnet network case return net.ParseIP("192.168.105.1"), nil case driver.HyperV: - v := reflect.ValueOf(host.Driver).Elem() + v := reflect.ValueOf(hostInfo.Driver).Elem() var hypervVirtualSwitch string // We don't have direct access to hyperv.Driver so use reflection to retrieve the virtual switch name for i := 0; i < v.NumField(); i++ { @@ -91,7 +91,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) { return ip, nil case driver.VirtualBox: vBoxManageCmd := driver.VBoxManagePath() - out, err := exec.Command(vBoxManageCmd, "showvminfo", host.Name, "--machinereadable").Output() + out, err := exec.Command(vBoxManageCmd, "showvminfo", hostInfo.Name, "--machinereadable").Output() if err != nil { return []byte{}, errors.Wrap(err, "vboxmanage") } @@ -126,11 +126,11 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) { return net.ParseIP(ip), nil case driver.HyperKit: - vmIPString, _ := host.Driver.GetIP() + vmIPString, _ := hostInfo.Driver.GetIP() gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1" return net.ParseIP(gatewayIPString), nil case driver.VMware: - vmIPString, err := host.Driver.GetIP() + vmIPString, err := hostInfo.Driver.GetIP() if err != nil { return []byte{}, errors.Wrap(err, "Error getting VM IP address") } @@ -140,28 +140,28 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) { } return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil case driver.VFKit: - vmIPString, _ := host.Driver.GetIP() + vmIPString, _ := hostInfo.Driver.GetIP() gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1" return net.ParseIP(gatewayIPString), nil case driver.None: return net.ParseIP("127.0.0.1"), nil default: - return []byte{}, fmt.Errorf("HostIP not yet implemented for %q driver", host.DriverName) + return []byte{}, fmt.Errorf("HostIP not yet implemented for %q driver", hostInfo.DriverName) } } // DriverIP gets the ip address of the current minikube cluster func DriverIP(api libmachine.API, machineName string) (net.IP, error) { - host, err := machine.LoadHost(api, machineName) + hostInfo, err := machine.LoadHost(api, machineName) if err != nil { return nil, err } - ipStr, err := host.Driver.GetIP() + ipStr, err := hostInfo.Driver.GetIP() if err != nil { return nil, errors.Wrap(err, "getting IP") } - if driver.IsKIC(host.DriverName) { + if driver.IsKIC(hostInfo.DriverName) { ipStr = oci.DefaultBindIPV4 } ip := net.ParseIP(ipStr) diff --git a/pkg/minikube/cni/calico.yaml b/pkg/minikube/cni/calico.yaml index 0b11978fc6..dcec8f6d22 100644 --- a/pkg/minikube/cni/calico.yaml +++ b/pkg/minikube/cni/calico.yaml @@ -525,6 +525,8 @@ spec: Option to keep the original nexthop field when routes are sent to a BGP Peer. Setting "true" configures the selected BGP Peers node to use the "next hop keep;" instead of "next hop self;"(default) in the specific branch of the Node on "bird.cfg". + Note: that this field is deprecated. Users should use the NextHopMode field to control + the next hop attribute for a BGP peer. type: boolean localWorkloadSelector: description: |- @@ -536,6 +538,26 @@ spec: Time to allow for software restart. When specified, this is configured as the graceful restart timeout. When not specified, the BIRD default of 120s is used. type: string + nextHopMode: + allOf: + - enum: + - Auto + - Self + - Keep + - enum: + - Auto + - Self + - Keep + description: |- + NextHopMode defines the method of calculating the next hop attribute for received routes. + This replaces and expands the deprecated KeepOriginalNextHop field. + Users should use this setting to control the next hop attribute for a BGP peer. + When this is set, the value of the KeepOriginalNextHop field is ignored. + if neither keepOriginalNextHop or nextHopMode is specified, BGP's default behaviour is used. + Set it to “Auto” to apply BGP’s default behaviour. + Set it to "Self" to configure "next hop self;" in "bird.cfg". + Set it to "Keep" to configure "next hop keep;" in "bird.cfg". + type: string node: description: |- The node name identifying the Calico node instance that is targeted by this peer. @@ -1628,6 +1650,13 @@ spec: description: FlowLogGoldmaneServer is the flow server endpoint to which flow data should be published. type: string + flowLogsLocalReporter: + description: 'FlowLogsLocalReporter configures local unix socket for + reporting flow data from each node. [Default: Disabled]' + enum: + - Disabled + - Enabled + type: string flowLogsPolicyEvaluationMode: description: |- Continuous - Felix evaluates active flows on a regular basis to determine the rule @@ -1871,18 +1900,18 @@ spec: logSeverityFile: description: 'LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info]' - pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ + pattern: ^(?i)(Trace|Debug|Info|Warning|Error|Fatal)?$ type: string logSeverityScreen: description: 'LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info]' - pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ + pattern: ^(?i)(Trace|Debug|Info|Warning|Error|Fatal)?$ type: string logSeveritySys: description: |- LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. [Default: Info] - pattern: ^(?i)(Debug|Info|Warning|Error|Fatal)?$ + pattern: ^(?i)(Trace|Debug|Info|Warning|Error|Fatal)?$ type: string maxIpsetSize: description: |- diff --git a/pkg/minikube/cni/cilium.yaml b/pkg/minikube/cni/cilium.yaml index 703b3614f8..88d2fada27 100644 --- a/pkg/minikube/cni/cilium.yaml +++ b/pkg/minikube/cni/cilium.yaml @@ -860,7 +860,7 @@ spec: type: Unconfined containers: - name: cilium-agent - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -1019,7 +1019,7 @@ spec: mountPath: /tmp initContainers: - name: config - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent command: - cilium-dbg @@ -1042,7 +1042,7 @@ spec: # Required to mount cgroup2 filesystem on the underlying Kubernetes node. # We use nsenter command with host's cgroup and mount namespaces enabled. - name: mount-cgroup - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent env: - name: CGROUP_ROOT @@ -1079,7 +1079,7 @@ spec: drop: - ALL - name: apply-sysctl-overwrites - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent env: - name: BIN_PATH @@ -1117,7 +1117,7 @@ spec: # from a privileged container because the mount propagation bidirectional # only works from privileged containers. - name: mount-bpf-fs - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' @@ -1133,7 +1133,7 @@ spec: mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -1180,7 +1180,7 @@ spec: mountPath: /var/run/cilium # wait-for-kube-proxy # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "quay.io/cilium/cilium:v1.17.4@sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a" + image: "quay.io/cilium/cilium:v1.17.5@sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" @@ -1363,7 +1363,7 @@ spec: type: Unconfined containers: - name: cilium-envoy - image: "quay.io/cilium/cilium-envoy:v1.32.6-1746661844-0f602c28cb2aa57b29078195049fb257d5b5246c@sha256:a04218c6879007d60d96339a441c448565b6f86650358652da27582e0efbf182" + image: "quay.io/cilium/cilium-envoy:v1.32.6-1749271279-0864395884b263913eac200ee2048fd985f8e626@sha256:9f69e290a7ea3d4edf9192acd81694089af048ae0d8a67fb63bd62dc1d72203e" imagePullPolicy: IfNotPresent command: - /usr/bin/cilium-envoy-starter @@ -1538,7 +1538,7 @@ spec: spec: containers: - name: cilium-operator - image: "quay.io/cilium/operator-generic:v1.17.4@sha256:a3906412f477b09904f46aac1bed28eb522bef7899ed7dd81c15f78b7aa1b9b5" + image: "quay.io/cilium/operator-generic:v1.17.5@sha256:f954c97eeb1b47ed67d08cc8fb4108fb829f869373cbb3e698a7f8ef1085b09e" imagePullPolicy: IfNotPresent command: - cilium-operator-generic diff --git a/pkg/minikube/cni/cni.go b/pkg/minikube/cni/cni.go index bb8b0bb8fc..a8c3147915 100644 --- a/pkg/minikube/cni/cni.go +++ b/pkg/minikube/cni/cni.go @@ -245,18 +245,18 @@ func ConfigureDefaultBridgeCNIs(r Runner, networkPlugin string) error { // disableAllBridgeCNIs disables all bridge cnis by changing extension to "mk_disabled" of all *bridge* config file(s) found in default location (ie, /etc/cni/net.d). func disableAllBridgeCNIs(r Runner) error { - path := "/etc/cni/net.d" + cniPath := "/etc/cni/net.d" out, err := r.RunCmd(exec.Command( // for cri-o, we also disable 87-podman.conflist (that does not have 'bridge' in its name) - "sudo", "find", path, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c", + "sudo", "find", cniPath, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c", `sudo mv {} {}.mk_disabled`, ";")) if err != nil { - return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", path, err) + return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", cniPath, err) } configs := strings.Trim(out.Stdout.String(), ", ") if len(configs) == 0 { - klog.Infof("no active bridge cni configs found in %q - nothing to disable", path) + klog.Infof("no active bridge cni configs found in %q - nothing to disable", cniPath) return nil } klog.Infof("disabled [%s] bridge cni config(s)", configs) diff --git a/pkg/minikube/cni/flannel.yaml b/pkg/minikube/cni/flannel.yaml index 5b8bd52666..1fd7be0ed1 100644 --- a/pkg/minikube/cni/flannel.yaml +++ b/pkg/minikube/cni/flannel.yaml @@ -133,7 +133,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni-plugin - image: ghcr.io/flannel-io/flannel-cni-plugin:v1.6.2-flannel1 + image: ghcr.io/flannel-io/flannel-cni-plugin:v1.7.1-flannel1 command: - cp args: @@ -144,7 +144,7 @@ spec: - name: cni-plugin mountPath: /opt/cni/bin - name: install-cni - image: ghcr.io/flannel-io/flannel:v0.26.7 + image: ghcr.io/flannel-io/flannel:v0.27.0 command: - cp args: @@ -158,7 +158,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: ghcr.io/flannel-io/flannel:v0.26.7 + image: ghcr.io/flannel-io/flannel:v0.27.0 command: - /opt/bin/flanneld args: diff --git a/pkg/minikube/command/kic_runner.go b/pkg/minikube/command/kic_runner.go index 8c393b123f..8eacd7a743 100644 --- a/pkg/minikube/command/kic_runner.go +++ b/pkg/minikube/command/kic_runner.go @@ -44,10 +44,10 @@ type kicRunner struct { } // NewKICRunner returns a kicRunner implementor of runner which runs cmds inside a container -func NewKICRunner(containerNameOrID string, oci string) Runner { +func NewKICRunner(containerNameOrID string, ociName string) Runner { return &kicRunner{ nameOrID: containerNameOrID, - ociBin: oci, // docker or podman + ociBin: ociName, // docker or podman } } @@ -271,8 +271,8 @@ func copyToPodman(src string, dest string) error { defer file.Close() parts := strings.Split(dest, ":") container := parts[0] - path := parts[1] - cmd := exec.Command(oci.Podman, "exec", "-i", container, "tee", path) + containerPath := parts[1] + cmd := exec.Command(oci.Podman, "exec", "-i", container, "tee", containerPath) cmd.Stdin = file klog.Infof("Run: %v", cmd) if err := cmd.Run(); err != nil { diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 0f67fa0960..b9f7ea9da8 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -58,7 +58,7 @@ func ControlPlanes(cc ClusterConfig) []Node { func IsPrimaryControlPlane(cc ClusterConfig, node Node) bool { // TODO (prezha): find where, for "none" driver, we set first (ie, primary control-plane) node name to "m01" - that should not happen but it's happening before pr #17909 // return node.ControlPlane && node.Name == "" - return cc.Nodes != nil && cc.Nodes[0].Name == node.Name + return len(cc.Nodes) > 0 && cc.Nodes[0].Name == node.Name } // IsValid checks if the profile has the essential info needed for a profile diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index cf7df8444a..7a4fdce1b3 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -34,10 +34,10 @@ var ( const ( // DefaultKubernetesVersion is the default Kubernetes version - DefaultKubernetesVersion = "v1.33.1" + DefaultKubernetesVersion = "v1.33.2" // NewestKubernetesVersion is the newest Kubernetes version to test against // NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go - NewestKubernetesVersion = "v1.33.1" + NewestKubernetesVersion = "v1.33.2" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.20.0" // NoKubernetesVersion is the version used when users does NOT want to install kubernetes diff --git a/pkg/minikube/constants/constants_kubeadm_images.go b/pkg/minikube/constants/constants_kubeadm_images.go index 1d7425e11c..c983686ced 100644 --- a/pkg/minikube/constants/constants_kubeadm_images.go +++ b/pkg/minikube/constants/constants_kubeadm_images.go @@ -18,6 +18,36 @@ package constants var ( KubeadmImages = map[string]map[string]string{ + "v1.34.0-alpha.2": { + "coredns/coredns": "v1.12.1", + "etcd": "3.5.21-0", + "pause": "3.10", + }, + "v1.33.2": { + "coredns/coredns": "v1.12.0", + "etcd": "3.5.21-0", + "pause": "3.10", + }, + "v1.32.6": { + "coredns/coredns": "v1.11.3", + "etcd": "3.5.16-0", + "pause": "3.10", + }, + "v1.31.10": { + "coredns/coredns": "v1.11.3", + "etcd": "3.5.15-0", + "pause": "3.10", + }, + "v1.30.14": { + "coredns/coredns": "v1.11.3", + "etcd": "3.5.15-0", + "pause": "3.9", + }, + "v1.34.0-alpha.1": { + "coredns/coredns": "v1.12.1", + "etcd": "3.5.21-0", + "pause": "3.10", + }, "v1.33.1": { "coredns/coredns": "v1.12.0", "etcd": "3.5.21-0", diff --git a/pkg/minikube/constants/constants_kubernetes_versions.go b/pkg/minikube/constants/constants_kubernetes_versions.go index cfeed84f9a..ba61f0b32d 100644 --- a/pkg/minikube/constants/constants_kubernetes_versions.go +++ b/pkg/minikube/constants/constants_kubernetes_versions.go @@ -21,6 +21,9 @@ package constants // ValidKubernetesVersions is a list of Kubernetes versions in order from newest to oldest // This is used when outputting Kubernetes versions and to select the latest patch version when unspecified var ValidKubernetesVersions = []string{ + "v1.34.0-alpha.2", + "v1.34.0-alpha.1", + "v1.33.2", "v1.33.1", "v1.33.0", "v1.33.0-rc.1", @@ -29,6 +32,7 @@ var ValidKubernetesVersions = []string{ "v1.33.0-alpha.3", "v1.33.0-alpha.2", "v1.33.0-alpha.1", + "v1.32.6", "v1.32.5", "v1.32.4", "v1.32.3", @@ -42,6 +46,7 @@ var ValidKubernetesVersions = []string{ "v1.32.0-alpha.3", "v1.32.0-alpha.2", "v1.32.0-alpha.1", + "v1.31.10", "v1.31.9", "v1.31.8", "v1.31.7", @@ -58,6 +63,7 @@ var ValidKubernetesVersions = []string{ "v1.31.0-alpha.3", "v1.31.0-alpha.2", "v1.31.0-alpha.1", + "v1.30.14", "v1.30.13", "v1.30.12", "v1.30.11", diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index 290007e1bd..931ee6bb1b 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -281,9 +281,9 @@ func (r *Containerd) ListImages(ListImagesOptions) ([]ListImage, error) { } // LoadImage loads an image into this runtime -func (r *Containerd) LoadImage(path string) error { - klog.Infof("Loading image: %s", path) - c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", path) +func (r *Containerd) LoadImage(imagePath string) error { + klog.Infof("Loading image: %s", imagePath) + c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", imagePath) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrapf(err, "ctr images import") } @@ -296,9 +296,9 @@ func (r *Containerd) PullImage(name string) error { } // SaveImage save an image from this runtime -func (r *Containerd) SaveImage(name string, path string) error { - klog.Infof("Saving image %s: %s", name, path) - c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", path, name) +func (r *Containerd) SaveImage(name string, destPath string) error { + klog.Infof("Saving image %s: %s", name, destPath) + c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", destPath, name) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrapf(err, "ctr images export") } @@ -526,11 +526,11 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error { cRuntime := cc.KubernetesConfig.ContainerRuntime // If images already exist, return - images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) + imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) if err != nil { return errors.Wrap(err, "getting images") } - if containerdImagesPreloaded(r.Runner, images) { + if containerdImagesPreloaded(r.Runner, imgs) { klog.Info("Images already preloaded, skipping extraction") return nil } @@ -583,7 +583,7 @@ func (r *Containerd) Restart() error { } // containerdImagesPreloaded returns true if all images have been preloaded -func containerdImagesPreloaded(runner command.Runner, images []string) bool { +func containerdImagesPreloaded(runner command.Runner, imgs []string) bool { var rr *command.RunResult imageList := func() (err error) { @@ -604,7 +604,7 @@ func containerdImagesPreloaded(runner command.Runner, images []string) bool { } // Make sure images == imgs - for _, i := range images { + for _, i := range imgs { found := false for _, ji := range jsonImages.Images { for _, rt := range ji.RepoTags { @@ -629,6 +629,6 @@ func containerdImagesPreloaded(runner command.Runner, images []string) bool { } // ImagesPreloaded returns true if all images have been preloaded -func (r *Containerd) ImagesPreloaded(images []string) bool { - return containerdImagesPreloaded(r.Runner, images) +func (r *Containerd) ImagesPreloaded(imgs []string) bool { + return containerdImagesPreloaded(r.Runner, imgs) } diff --git a/pkg/minikube/cruntime/cri.go b/pkg/minikube/cruntime/cri.go index 99e28632d7..110f92828b 100644 --- a/pkg/minikube/cruntime/cri.go +++ b/pkg/minikube/cruntime/cri.go @@ -184,7 +184,7 @@ func unpauseCRIContainers(cr CommandRunner, root string, ids []string) error { return nil } -// criCRIContainers kills a list of containers using crictl +// killCRIContainers kills a list of containers using crictl func killCRIContainers(cr CommandRunner, ids []string) error { if len(ids) == 0 { return nil diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index fec6339b6b..85699fa0d3 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -271,9 +271,9 @@ func (r *CRIO) ListImages(ListImagesOptions) ([]ListImage, error) { } // LoadImage loads an image into this runtime -func (r *CRIO) LoadImage(path string) error { - klog.Infof("Loading image: %s", path) - c := exec.Command("sudo", "podman", "load", "-i", path) +func (r *CRIO) LoadImage(imgPath string) error { + klog.Infof("Loading image: %s", imgPath) + c := exec.Command("sudo", "podman", "load", "-i", imgPath) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "crio load image") } @@ -286,9 +286,9 @@ func (r *CRIO) PullImage(name string) error { } // SaveImage saves an image from this runtime -func (r *CRIO) SaveImage(name string, path string) error { - klog.Infof("Saving image %s: %s", name, path) - c := exec.Command("sudo", "podman", "save", name, "-o", path) +func (r *CRIO) SaveImage(name string, destPath string) error { + klog.Infof("Saving image %s: %s", name, destPath) + c := exec.Command("sudo", "podman", "save", name, "-o", destPath) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "crio save image") } @@ -425,11 +425,11 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error { cRuntime := cc.KubernetesConfig.ContainerRuntime // If images already exist, return - images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) + imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) if err != nil { return errors.Wrap(err, "getting images") } - if crioImagesPreloaded(r.Runner, images) { + if crioImagesPreloaded(r.Runner, imgs) { klog.Info("Images already preloaded, skipping extraction") return nil } @@ -477,7 +477,7 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error { } // crioImagesPreloaded returns true if all images have been preloaded -func crioImagesPreloaded(runner command.Runner, images []string) bool { +func crioImagesPreloaded(runner command.Runner, imgs []string) bool { rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json")) if err != nil { return false @@ -491,7 +491,7 @@ func crioImagesPreloaded(runner command.Runner, images []string) bool { } // Make sure images == imgs - for _, i := range images { + for _, i := range imgs { found := false for _, ji := range jsonImages.Images { for _, rt := range ji.RepoTags { @@ -516,6 +516,6 @@ func crioImagesPreloaded(runner command.Runner, images []string) bool { } // ImagesPreloaded returns true if all images have been preloaded -func (r *CRIO) ImagesPreloaded(images []string) bool { - return crioImagesPreloaded(r.Runner, images) +func (r *CRIO) ImagesPreloaded(imgs []string) bool { + return crioImagesPreloaded(r.Runner, imgs) } diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index b54e8175df..db97878d0c 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -285,9 +285,9 @@ func (r *Docker) ListImages(ListImagesOptions) ([]ListImage, error) { Tag string `json:"Tag"` Size string `json:"Size"` } - images := strings.Split(rr.Stdout.String(), "\n") + imgs := strings.Split(rr.Stdout.String(), "\n") result := []ListImage{} - for _, img := range images { + for _, img := range imgs { if img == "" { continue } @@ -313,9 +313,9 @@ func (r *Docker) ListImages(ListImagesOptions) ([]ListImage, error) { } // LoadImage loads an image into this runtime -func (r *Docker) LoadImage(path string) error { - klog.Infof("Loading image: %s", path) - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", path)) +func (r *Docker) LoadImage(imgPath string) error { + klog.Infof("Loading image: %s", imgPath) + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", imgPath)) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "loadimage docker") } @@ -336,9 +336,9 @@ func (r *Docker) PullImage(name string) error { } // SaveImage saves an image from this runtime -func (r *Docker) SaveImage(name string, path string) error { - klog.Infof("Saving image %s: %s", name, path) - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, path)) +func (r *Docker) SaveImage(name string, imagePath string) error { + klog.Infof("Saving image %s: %s", name, imagePath) + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, imagePath)) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "saveimage docker") } @@ -594,13 +594,14 @@ func (r *Docker) configureDocker(driver string) error { StorageDriver: "overlay2", } - if r.GPUs == "all" || r.GPUs == "nvidia" { + switch r.GPUs { + case "all", "nvidia": assets.Addons["nvidia-device-plugin"].EnableByDefault() daemonConfig.DefaultRuntime = "nvidia" runtimes := &dockerDaemonRuntimes{} runtimes.Nvidia.Path = "/usr/bin/nvidia-container-runtime" daemonConfig.Runtimes = runtimes - } else if r.GPUs == "amd" { + case "amd": assets.Addons["amd-gpu-device-plugin"].EnableByDefault() } @@ -624,11 +625,11 @@ func (r *Docker) Preload(cc config.ClusterConfig) error { cRuntime := cc.KubernetesConfig.ContainerRuntime // If images already exist, return - images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) + imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) if err != nil { return errors.Wrap(err, "getting images") } - if dockerImagesPreloaded(r.Runner, images) { + if dockerImagesPreloaded(r.Runner, imgs) { klog.Info("Images already preloaded, skipping extraction") return nil } @@ -687,7 +688,7 @@ func (r *Docker) Preload(cc config.ClusterConfig) error { } // dockerImagesPreloaded returns true if all images have been preloaded -func dockerImagesPreloaded(runner command.Runner, images []string) bool { +func dockerImagesPreloaded(runner command.Runner, imgs []string) bool { rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}")) if err != nil { klog.Warning(err) @@ -702,7 +703,7 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool { klog.Infof("Got preloaded images: %s", rr.Output()) // Make sure images == imgs - for _, i := range images { + for _, i := range imgs { i = image.TrimDockerIO(i) if _, ok := preloadedImages[i]; !ok { klog.Infof("%s wasn't preloaded", i) @@ -759,8 +760,8 @@ func dockerBoundToContainerd(runner command.Runner) bool { } // ImagesPreloaded returns true if all images have been preloaded -func (r *Docker) ImagesPreloaded(images []string) bool { - return dockerImagesPreloaded(r.Runner, images) +func (r *Docker) ImagesPreloaded(imgs []string) bool { + return dockerImagesPreloaded(r.Runner, imgs) } const ( diff --git a/pkg/minikube/download/iso.go b/pkg/minikube/download/iso.go index 2e86d3865c..2a258bccab 100644 --- a/pkg/minikube/download/iso.go +++ b/pkg/minikube/download/iso.go @@ -41,7 +41,7 @@ const fileScheme = "file" // DefaultISOURLs returns a list of ISO URL's to consult by default, in priority order func DefaultISOURLs() []string { v := version.GetISOVersion() - isoBucket := "minikube/iso" + isoBucket := "minikube-builds/iso/20895" return []string{ fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s-%s.iso", isoBucket, v, runtime.GOARCH), @@ -67,8 +67,8 @@ func LocalISOResource(isoURL string) string { } // fileURI returns a file:// URI for a path -func fileURI(path string) string { - return "file://" + filepath.ToSlash(path) +func fileURI(filePath string) string { + return "file://" + filepath.ToSlash(filePath) } // localISOPath returns where an ISO should be stored locally diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 970ad656e5..6d6696b7a6 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -250,10 +250,10 @@ func saveChecksumFile(k8sVersion, containerRuntime string, checksum []byte) erro // verifyChecksum returns true if the checksum of the local binary matches // the checksum of the remote binary -func verifyChecksum(k8sVersion, containerRuntime, path string) error { - klog.Infof("verifying checksum of %s ...", path) +func verifyChecksum(k8sVersion, containerRuntime, binaryPath string) error { + klog.Infof("verifying checksum of %s ...", binaryPath) // get md5 checksum of tarball path - contents, err := os.ReadFile(path) + contents, err := os.ReadFile(binaryPath) if err != nil { return errors.Wrap(err, "reading tarball") } @@ -266,7 +266,7 @@ func verifyChecksum(k8sVersion, containerRuntime, path string) error { // create a slice of checksum, which is [16]byte if string(remoteChecksum) != string(checksum[:]) { - return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", path, string(remoteChecksum), string(checksum[:])) + return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", binaryPath, string(remoteChecksum), string(checksum[:])) } return nil } diff --git a/pkg/minikube/driver/auxdriver/install.go b/pkg/minikube/driver/auxdriver/install.go index 854e9a48b5..76e3b78333 100644 --- a/pkg/minikube/driver/auxdriver/install.go +++ b/pkg/minikube/driver/auxdriver/install.go @@ -159,7 +159,7 @@ func extractDriverVersion(s string) string { return strings.TrimPrefix(v, "v") } -func driverExists(driver string) bool { - _, err := exec.LookPath(driver) +func driverExists(driverName string) bool { + _, err := exec.LookPath(driverName) return err == nil } diff --git a/pkg/minikube/firewall/firewall.go b/pkg/minikube/firewall/firewall.go index 9d879433ef..63ccdfb414 100644 --- a/pkg/minikube/firewall/firewall.go +++ b/pkg/minikube/firewall/firewall.go @@ -38,28 +38,28 @@ func IsBootpdBlocked(cc config.ClusterConfig) bool { if cc.Driver != driver.QEMU2 || runtime.GOOS != "darwin" || cc.Network != "socket_vmnet" { return false } - out, err := exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getglobalstate").Output() + rest, err := exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getglobalstate").Output() if err != nil { klog.Warningf("failed to get firewall state: %v", err) return false } - if regexp.MustCompile(`Firewall is disabled`).Match(out) { + if regexp.MustCompile(`Firewall is disabled`).Match(rest) { return false } - out, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getallowsigned").Output() + rest, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getallowsigned").Output() if err != nil { // macOS < 15 or other issue: need to use --list. klog.Warningf("failed to list firewall allowedsinged option: %v", err) // macOS >= 15: bootpd may be allowed as builtin software - } else if regexp.MustCompile(`Automatically allow built-in signed software ENABLED`).Match(out) { + } else if regexp.MustCompile(`Automatically allow built-in signed software ENABLED`).Match(rest) { return false } - out, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--listapps").Output() + rest, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--listapps").Output() if err != nil { klog.Warningf("failed to list firewall apps: %v", err) return false } - return !regexp.MustCompile(`\/usr\/libexec\/bootpd.*\n.*\( Allow`).Match(out) + return !regexp.MustCompile(`\/usr\/libexec\/bootpd.*\n.*\( Allow`).Match(rest) } // UnblockBootpd adds bootpd to the built-in macOS firewall and then unblocks it diff --git a/pkg/minikube/image/image.go b/pkg/minikube/image/image.go index b567036c63..d2bfc9a93f 100644 --- a/pkg/minikube/image/image.go +++ b/pkg/minikube/image/image.go @@ -340,6 +340,6 @@ func normalizeTagName(image string) string { // Remove docker.io prefix since it won't be included in image names // when we call `docker images`. -func TrimDockerIO(name string) string { - return strings.TrimPrefix(name, "docker.io/") +func TrimDockerIO(imageName string) string { + return strings.TrimPrefix(imageName, "docker.io/") } diff --git a/pkg/minikube/localpath/localpath.go b/pkg/minikube/localpath/localpath.go index ef77e07dc7..f3ce7de894 100644 --- a/pkg/minikube/localpath/localpath.go +++ b/pkg/minikube/localpath/localpath.go @@ -184,9 +184,9 @@ func replaceWinDriveLetterToVolumeName(s string) (string, error) { if err != nil { return "", err } - path := vname + s[3:] + p := vname + s[3:] - return path, nil + return p, nil } func getWindowsVolumeNameCmd(d string) (string, error) { diff --git a/pkg/minikube/machine/build_images.go b/pkg/minikube/machine/build_images.go index 8b5f77c5af..5366c2e710 100644 --- a/pkg/minikube/machine/build_images.go +++ b/pkg/minikube/machine/build_images.go @@ -42,7 +42,7 @@ import ( var buildRoot = path.Join(vmpath.GuestPersistentDir, "build") // BuildImage builds image to all profiles -func BuildImage(path string, file string, tag string, push bool, env []string, opt []string, profiles []*config.Profile, allNodes bool, nodeName string) error { +func BuildImage(srcPath string, file string, tag string, push bool, env []string, opt []string, profiles []*config.Profile, allNodes bool, nodeName string) error { api, err := NewAPIClient() if err != nil { return errors.Wrap(err, "api") @@ -52,12 +52,12 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o succeeded := []string{} failed := []string{} - u, err := url.Parse(path) + u, err := url.Parse(srcPath) if err == nil && u.Scheme == "file" { - path = u.Path + srcPath = u.Path } remote := err == nil && u.Scheme != "" - if runtime.GOOS == "windows" && filepath.VolumeName(path) != "" { + if runtime.GOOS == "windows" && filepath.VolumeName(srcPath) != "" { remote = false } @@ -116,9 +116,9 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o return err } if remote { - err = buildImage(cr, c.KubernetesConfig, path, file, tag, push, env, opt) + err = buildImage(cr, c.KubernetesConfig, srcPath, file, tag, push, env, opt) } else { - err = transferAndBuildImage(cr, c.KubernetesConfig, path, file, tag, push, env, opt) + err = transferAndBuildImage(cr, c.KubernetesConfig, srcPath, file, tag, push, env, opt) } if err != nil { failed = append(failed, m) diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index df8d081103..73f29c427d 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -73,19 +73,19 @@ func CacheImagesForBootstrapper(imageRepository, version string) error { } // LoadCachedImages loads previously cached images into the container runtime -func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string, overwrite bool) error { +func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, imgs []string, cacheDir string, overwrite bool) error { cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: runner}) if err != nil { return errors.Wrap(err, "runtime") } // Skip loading images if images already exist - if !overwrite && cr.ImagesPreloaded(images) { + if !overwrite && cr.ImagesPreloaded(imgs) { klog.Infof("Images are preloaded, skipping loading") return nil } - klog.Infof("LoadCachedImages start: %s", images) + klog.Infof("LoadCachedImages start: %s", imgs) start := time.Now() defer func() { @@ -102,19 +102,19 @@ func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images [] } } - for _, image := range images { - image := image + for _, img := range imgs { + img := img g.Go(func() error { // Put a ten second limit on deciding if an image needs transfer // because it takes much less than that time to just transfer the image. // This is needed because if running in offline mode, we can spend minutes here // waiting for i/o timeout. - err := timedNeedsTransfer(imgClient, image, cr, 10*time.Second) + err := timedNeedsTransfer(imgClient, img, cr, 10*time.Second) if err == nil { return nil } - klog.Infof("%q needs transfer: %v", image, err) - return transferAndLoadCachedImage(runner, cc.KubernetesConfig, image, cacheDir) + klog.Infof("%q needs transfer: %v", img, err) + return transferAndLoadCachedImage(runner, cc.KubernetesConfig, img, cacheDir) }) } if err := g.Wait(); err != nil { @@ -172,10 +172,10 @@ func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager // LoadLocalImages loads images into the container runtime func LoadLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string) error { var g errgroup.Group - for _, image := range images { - image := image + for _, img := range images { + img := img g.Go(func() error { - return transferAndLoadImage(runner, cc.KubernetesConfig, image, image) + return transferAndLoadImage(runner, cc.KubernetesConfig, img, img) }) } if err := g.Wait(); err != nil { @@ -353,10 +353,10 @@ func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images [] var g errgroup.Group - for _, image := range images { - image := image + for _, img := range images { + img := img g.Go(func() error { - return transferAndSaveCachedImage(runner, cc.KubernetesConfig, image, cacheDir) + return transferAndSaveCachedImage(runner, cc.KubernetesConfig, img, cacheDir) }) } if err := g.Wait(); err != nil { @@ -369,10 +369,10 @@ func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images [] // SaveLocalImages saves images from the container runtime func SaveLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string, output string) error { var g errgroup.Group - for _, image := range images { - image := image + for _, img := range images { + img := img g.Go(func() error { - return transferAndSaveImage(runner, cc.KubernetesConfig, output, image) + return transferAndSaveImage(runner, cc.KubernetesConfig, output, img) }) } if err := g.Wait(); err != nil { @@ -527,8 +527,8 @@ func transferAndSaveImage(cr command.Runner, k8s config.KubernetesConfig, dst st } // pullImages pulls images to the container run time -func pullImages(cruntime cruntime.Manager, images []string) error { - klog.Infof("pullImages start: %s", images) +func pullImages(crMgr cruntime.Manager, imgs []string) error { + klog.Infof("pullImages start: %s", imgs) start := time.Now() defer func() { @@ -537,10 +537,10 @@ func pullImages(cruntime cruntime.Manager, images []string) error { var g errgroup.Group - for _, image := range images { - image := image + for _, img := range imgs { + img := img g.Go(func() error { - return cruntime.PullImage(image) + return crMgr.PullImage(img) }) } if err := g.Wait(); err != nil { @@ -588,11 +588,11 @@ func PullImages(images []string, profile *config.Profile) error { if err != nil { return err } - cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) + crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) if err != nil { return errors.Wrap(err, "error creating container runtime") } - err = pullImages(cruntime, images) + err = pullImages(crMgr, images) if err != nil { failed = append(failed, m) klog.Warningf("Failed to pull images for profile %s %v", pName, err.Error()) @@ -608,8 +608,8 @@ func PullImages(images []string, profile *config.Profile) error { } // removeImages removes images from the container run time -func removeImages(cruntime cruntime.Manager, images []string) error { - klog.Infof("removeImages start: %s", images) +func removeImages(crMgr cruntime.Manager, imgs []string) error { + klog.Infof("removeImages start: %s", imgs) start := time.Now() defer func() { @@ -618,10 +618,10 @@ func removeImages(cruntime cruntime.Manager, images []string) error { var g errgroup.Group - for _, image := range images { - image := image + for _, img := range imgs { + img := img g.Go(func() error { - return cruntime.RemoveImage(image) + return crMgr.RemoveImage(img) }) } if err := g.Wait(); err != nil { @@ -669,11 +669,11 @@ func RemoveImages(images []string, profile *config.Profile) error { if err != nil { return err } - cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) + crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) if err != nil { return errors.Wrap(err, "error creating container runtime") } - err = removeImages(cruntime, images) + err = removeImages(crMgr, images) if err != nil { failed = append(failed, m) klog.Warningf("Failed to remove images for profile %s %v", pName, err.Error()) @@ -757,19 +757,19 @@ func ListImages(profile *config.Profile, format string) error { } renderImagesTable(data) case "json": - json, err := json.Marshal(uniqueImages) + jsondata, err := json.Marshal(uniqueImages) if err != nil { klog.Warningf("Error marshalling images list: %v", err.Error()) return nil } - fmt.Printf("%s\n", json) + fmt.Printf("%s\n", jsondata) case "yaml": - yaml, err := yaml.Marshal(uniqueImages) + yamldata, err := yaml.Marshal(uniqueImages) if err != nil { klog.Warningf("Error marshalling images list: %v", err.Error()) return nil } - fmt.Printf("%s\n", yaml) + fmt.Printf("%s\n", yamldata) default: res := []string{} for _, item := range uniqueImages { @@ -892,11 +892,11 @@ func TagImage(profile *config.Profile, source string, target string) error { if err != nil { return err } - cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) + crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) if err != nil { return errors.Wrap(err, "error creating container runtime") } - err = cruntime.TagImage(source, target) + err = crMgr.TagImage(source, target) if err != nil { failed = append(failed, m) klog.Warningf("Failed to tag image for profile %s %v", pName, err.Error()) @@ -912,8 +912,8 @@ func TagImage(profile *config.Profile, source string, target string) error { } // pushImages pushes images from the container run time -func pushImages(cruntime cruntime.Manager, images []string) error { - klog.Infof("pushImages start: %s", images) +func pushImages(crMgr cruntime.Manager, imgs []string) error { + klog.Infof("pushImages start: %s", imgs) start := time.Now() defer func() { @@ -922,10 +922,10 @@ func pushImages(cruntime cruntime.Manager, images []string) error { var g errgroup.Group - for _, image := range images { - image := image + for _, img := range imgs { + img := img g.Go(func() error { - return cruntime.PushImage(image) + return crMgr.PushImage(img) }) } if err := g.Wait(); err != nil { @@ -973,11 +973,11 @@ func PushImages(images []string, profile *config.Profile) error { if err != nil { return err } - cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) + crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) if err != nil { return errors.Wrap(err, "error creating container runtime") } - err = pushImages(cruntime, images) + err = pushImages(crMgr, images) if err != nil { failed = append(failed, m) klog.Warningf("Failed to push image for profile %s %v", pName, err.Error()) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 056e042fbe..a883f1028a 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -37,7 +37,7 @@ import ( // deleteOrphanedKIC attempts to delete an orphaned docker instance for machines without a config file // used as last effort clean up not returning errors, won't warn user. func deleteOrphanedKIC(ociBin string, name string) { - if !(ociBin == oci.Podman || ociBin == oci.Docker) { + if ociBin != oci.Podman && ociBin != oci.Docker { return } @@ -68,8 +68,8 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool) delAbandoned = deleteAbandoned[0] } - host, err := api.Load(machineName) - if err != nil && host == nil && delAbandoned { + hostInfo, err := api.Load(machineName) + if err != nil && hostInfo == nil && delAbandoned { deleteOrphanedKIC(oci.Docker, machineName) deleteOrphanedKIC(oci.Podman, machineName) // Keep going even if minikube does not know about the host @@ -88,7 +88,7 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool) } // some drivers need manual shut down before delete to avoid getting stuck. - if driver.NeedsShutdown(host.Driver.DriverName()) { + if driver.NeedsShutdown(hostInfo.Driver.DriverName()) { if err := StopHost(api, machineName); err != nil { klog.Warningf("stop host: %v", err) } @@ -96,8 +96,8 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool) time.Sleep(1 * time.Second) } - out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - return deleteHost(api, host, machineName) + out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": hostInfo.DriverName}) + return deleteHost(api, hostInfo, machineName) } // delete removes a host and its local data files diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 63faa5acb1..082e624d42 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -209,12 +209,12 @@ func ensureSyncedGuestClock(h hostRunner, drv string) error { // guestClockDelta returns the approximate difference between the host and guest system clock // NOTE: This does not currently take into account ssh latency. func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) { - out, err := h.RunSSHCommand("date +%s.%N") + rest, err := h.RunSSHCommand("date +%s.%N") if err != nil { return 0, errors.Wrap(err, "get clock") } - klog.Infof("guest clock: %s", out) - ns := strings.Split(strings.TrimSpace(out), ".") + klog.Infof("guest clock: %s", rest) + ns := strings.Split(strings.TrimSpace(rest), ".") secs, err := strconv.ParseInt(strings.TrimSpace(ns[0]), 10, 64) if err != nil { return 0, errors.Wrap(err, "atoi") @@ -232,8 +232,8 @@ func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) { // adjustGuestClock adjusts the guest system clock to be nearer to the host system clock func adjustGuestClock(h hostRunner, t time.Time) error { - out, err := h.RunSSHCommand(fmt.Sprintf("sudo date -s @%d", t.Unix())) - klog.Infof("clock set: %s (err=%v)", out, err) + rest, err := h.RunSSHCommand(fmt.Sprintf("sudo date -s @%d", t.Unix())) + klog.Infof("clock set: %s (err=%v)", rest, err) return err } @@ -253,10 +253,12 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) { } func machineExistsDocker(s state.State, err error) (bool, error) { - if s == state.Error { + + switch s { + case state.Error: // if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return return false, constants.ErrMachineMissing - } else if s == state.None { + case state.None: // if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return return false, constants.ErrMachineMissing } diff --git a/pkg/minikube/machine/host.go b/pkg/minikube/machine/host.go index 02aff3bdbb..bf9c332510 100644 --- a/pkg/minikube/machine/host.go +++ b/pkg/minikube/machine/host.go @@ -35,12 +35,12 @@ func Status(api libmachine.API, machineName string) (string, error) { return state.None.String(), nil } - host, err := api.Load(machineName) + hostInfo, err := api.Load(machineName) if err != nil { return "", errors.Wrapf(err, "load") } - s, err := host.Driver.GetState() + s, err := hostInfo.Driver.GetState() if err != nil { return "", errors.Wrap(err, "state") } diff --git a/pkg/minikube/machine/info.go b/pkg/minikube/machine/info.go index 1e28b79984..968a1e1440 100644 --- a/pkg/minikube/machine/info.go +++ b/pkg/minikube/machine/info.go @@ -193,11 +193,11 @@ func cachedCPUInfo() ([]cpu.InfoStat, error) { } // ParseMemFree parses the output of the `free -m` command -func parseMemFree(out string) (int64, error) { +func parseMemFree(s string) (int64, error) { // total used free shared buff/cache available //Mem: 1987 706 194 1 1086 1173 //Swap: 0 0 0 - outlines := strings.Split(out, "\n") + outlines := strings.Split(s, "\n") l := len(outlines) for _, line := range outlines[1 : l-1] { parsedLine := strings.Fields(line) @@ -217,10 +217,10 @@ func parseMemFree(out string) (int64, error) { } // ParseDiskFree parses the output of the `df -m` command -func parseDiskFree(out string) (int64, error) { +func parseDiskFree(s string) (int64, error) { // Filesystem 1M-blocks Used Available Use% Mounted on // /dev/sda1 39643 3705 35922 10% / - outlines := strings.Split(out, "\n") + outlines := strings.Split(s, "\n") l := len(outlines) for _, line := range outlines[1 : l-1] { parsedLine := strings.Fields(line) diff --git a/pkg/minikube/machine/ssh.go b/pkg/minikube/machine/ssh.go index 4f37487bbe..a0206a5678 100644 --- a/pkg/minikube/machine/ssh.go +++ b/pkg/minikube/machine/ssh.go @@ -31,12 +31,12 @@ import ( // GetHost find node's host information by name in the given cluster. func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { machineName := config.MachineName(cc, n) - host, err := LoadHost(api, machineName) + hostInfo, err := LoadHost(api, machineName) if err != nil { return nil, errors.Wrap(err, "host exists and load") } - currentState, err := host.Driver.GetState() + currentState, err := hostInfo.Driver.GetState() if err != nil { return nil, errors.Wrap(err, "state") } @@ -45,12 +45,12 @@ func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return nil, errors.Errorf("%q is not running", machineName) } - return host, nil + return hostInfo, nil } // CreateSSHShell creates a new SSH shell / client func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, args []string, native bool) error { - host, err := GetHost(api, cc, n) + hostInfo, err := GetHost(api, cc, n) if err != nil { return err } @@ -61,7 +61,7 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, ssh.SetDefaultClient(ssh.External) } - client, err := host.CreateSSHClient() + client, err := hostInfo.CreateSSHClient() if err != nil { return errors.Wrap(err, "Creating ssh client") @@ -71,16 +71,16 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, // GetSSHHostAddrPort returns the host address and port for ssh func GetSSHHostAddrPort(api libmachine.API, cc config.ClusterConfig, n config.Node) (string, int, error) { - host, err := GetHost(api, cc, n) + hostInfo, err := GetHost(api, cc, n) if err != nil { return "", 0, err } - addr, err := host.Driver.GetSSHHostname() + addr, err := hostInfo.Driver.GetSSHHostname() if err != nil { return "", 0, err } - port, err := host.Driver.GetSSHPort() + port, err := hostInfo.Driver.GetSSHPort() if err != nil { return "", 0, err } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index d71fa5e140..34a1f1b58c 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -412,7 +412,7 @@ func AddHostAlias(c command.Runner, name string, ip net.IP) error { return nil } -func addHostAliasCommand(name string, record string, sudo bool, path string) *exec.Cmd { +func addHostAliasCommand(name string, record string, sudo bool, destPath string) *exec.Cmd { sudoCmd := "sudo" if !sudo { // for testing sudoCmd = "" @@ -421,9 +421,9 @@ func addHostAliasCommand(name string, record string, sudo bool, path string) *ex script := fmt.Sprintf( `{ grep -v $'\t%s$' "%s"; echo "%s"; } > /tmp/h.$$; %s cp /tmp/h.$$ "%s"`, name, - path, + destPath, record, sudoCmd, - path) + destPath) return exec.Command("/bin/bash", "-c", script) } diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 4397052c74..e6a83029b2 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -95,9 +95,9 @@ func trySSHPowerOff(h *host.Host) error { err := oci.ShutDown(h.DriverName, h.Name) klog.Infof("shutdown container: err=%v", err) } else { - out, err := h.RunSSHCommand("sudo poweroff") + rest, err := h.RunSSHCommand("sudo poweroff") // poweroff always results in an error, since the host disconnects. - klog.Infof("poweroff result: out=%s, err=%v", out, err) + klog.Infof("poweroff result: out=%s, err=%v", rest, err) } return nil } diff --git a/pkg/minikube/mustload/mustload.go b/pkg/minikube/mustload/mustload.go index 5090698d87..7d61b81e6d 100644 --- a/pkg/minikube/mustload/mustload.go +++ b/pkg/minikube/mustload/mustload.go @@ -132,7 +132,7 @@ func running(name string, first bool) []ClusterController { continue } - host, err := machine.LoadHost(api, machineName) + hostInfo, err := machine.LoadHost(api, machineName) if err != nil { if last { exit.Message(reason.GuestLoadHost, `Unable to load control-plane node {{.name}} host: {{.err}}`, out.V{"name": machineName, "err": err}) @@ -141,7 +141,7 @@ func running(name string, first bool) []ClusterController { continue } - cr, err := machine.CommandRunner(host) + cr, err := machine.CommandRunner(hostInfo) if err != nil { if last { exit.Message(reason.InternalCommandRunner, `Unable to get control-plane node {{.name}} host command runner: {{.err}}`, out.V{"name": machineName, "err": err}) @@ -150,7 +150,7 @@ func running(name string, first bool) []ClusterController { continue } - hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) + hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, hostInfo.DriverName) if err != nil { if last { exit.Message(reason.DrvCPEndpoint, `Unable to get control-plane node {{.name}} endpoint: {{.err}}`, out.V{"name": machineName, "err": err}) @@ -164,7 +164,7 @@ func running(name string, first bool) []ClusterController { Config: cc, CP: ControlPlane{ Runner: cr, - Host: host, + Host: hostInfo, Node: &cp, Hostname: hostname, IP: ip, @@ -223,8 +223,8 @@ func Healthy(name string) ClusterController { // exitTip returns an action tip and exits func exitTip(action string, profile string, code int) { - command := ExampleCmd(profile, action) - out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": command}) + cmd := ExampleCmd(profile, action) + out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": cmd}) exit.Code(code) } diff --git a/pkg/minikube/node/cache.go b/pkg/minikube/node/cache.go index 4efb50efef..34871d12b9 100644 --- a/pkg/minikube/node/cache.go +++ b/pkg/minikube/node/cache.go @@ -282,12 +282,12 @@ func imagesInConfigFile() ([]string, error) { } func updateKicImageRepo(imgName string, repo string) string { - image := strings.TrimPrefix(imgName, "gcr.io/") + imageName := strings.TrimPrefix(imgName, "gcr.io/") if repo == constants.AliyunMirror { // for aliyun registry must strip namespace from image name, e.g. // registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-minikube/kicbase:v0.0.25 will not work // registry.cn-hangzhou.aliyuncs.com/google_containers/kicbase:v0.0.25 does work - image = strings.TrimPrefix(image, "k8s-minikube/") + imageName = strings.TrimPrefix(imageName, "k8s-minikube/") } - return path.Join(repo, image) + return path.Join(repo, imageName) } diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 5f1d4ce127..9b6418c66e 100755 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -522,8 +522,8 @@ func cgroupDriver(cc config.ClusterConfig) string { return detect.CgroupDriver() } -func pathExists(runner cruntime.CommandRunner, path string) (bool, error) { - _, err := runner.RunCmd(exec.Command("stat", path)) +func pathExists(runner cruntime.CommandRunner, p string) (bool, error) { + _, err := runner.RunCmd(exec.Command("stat", p)) if err == nil { return true, nil } @@ -624,18 +624,18 @@ func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node, // setupKubeconfig generates kubeconfig. func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings { - host := cc.KubernetesConfig.APIServerHAVIP + hostIP := cc.KubernetesConfig.APIServerHAVIP port := cc.APIServerPort if !config.IsHA(cc) || driver.NeedsPortForward(cc.Driver) { var err error - if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil { + if hostIP, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil { exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)}) } } - addr := fmt.Sprintf("https://%s", net.JoinHostPort(host, strconv.Itoa(port))) + addr := fmt.Sprintf("https://%s", net.JoinHostPort(hostIP, strconv.Itoa(port))) if cc.KubernetesConfig.APIServerName != constants.APIServerName { - addr = strings.ReplaceAll(addr, host, cc.KubernetesConfig.APIServerName) + addr = strings.ReplaceAll(addr, hostIP, cc.KubernetesConfig.APIServerName) } kcs := &kubeconfig.Settings{ @@ -654,29 +654,29 @@ func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, cluste } // StartMachine starts a VM -func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) { +func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, hostInfo *host.Host, err error) { m, err := machine.NewAPIClient() if err != nil { - return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client") + return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to get machine client") } - host, preExists, err = startHostInternal(m, cfg, node, delOnFail) + hostInfo, preExists, err = startHostInternal(m, cfg, node, delOnFail) if err != nil { - return runner, preExists, m, host, errors.Wrap(err, "Failed to start host") + return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to start host") } - runner, err = machine.CommandRunner(host) + runner, err = machine.CommandRunner(hostInfo) if err != nil { - return runner, preExists, m, host, errors.Wrap(err, "Failed to get command runner") + return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to get command runner") } - ip, err := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository) + ip, err := validateNetwork(hostInfo, runner, cfg.KubernetesConfig.ImageRepository) if err != nil { - return runner, preExists, m, host, errors.Wrap(err, "Failed to validate network") + return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to validate network") } - if driver.IsQEMU(host.Driver.DriverName()) && network.IsBuiltinQEMU(cfg.Network) { + if driver.IsQEMU(hostInfo.Driver.DriverName()) && network.IsBuiltinQEMU(cfg.Network) { apiServerPort, err := getPort() if err != nil { - return runner, preExists, m, host, errors.Wrap(err, "Failed to find apiserver port") + return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to find apiserver port") } cfg.APIServerPort = apiServerPort } @@ -687,7 +687,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) out.FailureT("Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) } - return runner, preExists, m, host, err + return runner, preExists, m, hostInfo, err } // getPort asks the kernel for a free open port that is ready to use @@ -707,9 +707,9 @@ func getPort() (int, error) { // startHostInternal starts a new minikube host using a VM or None func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.Node, delOnFail bool) (*host.Host, bool, error) { - host, exists, err := machine.StartHost(api, cc, n) + hostInfo, exists, err := machine.StartHost(api, cc, n) if err == nil { - return host, exists, nil + return hostInfo, exists, nil } klog.Warningf("error starting host: %v", err) // NOTE: People get very cranky if you delete their preexisting VM. Only delete new ones. @@ -722,7 +722,7 @@ func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.N if err, ff := errors.Cause(err).(*oci.FailFastError); ff { klog.Infof("will skip retrying to create machine because error is not retriable: %v", err) - return host, exists, err + return hostInfo, exists, err } out.ErrT(style.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) @@ -739,15 +739,15 @@ func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.N } } - host, exists, err = machine.StartHost(api, cc, n) + hostInfo, exists, err = machine.StartHost(api, cc, n) if err == nil { - return host, exists, nil + return hostInfo, exists, nil } // Don't use host.Driver to avoid nil pointer deref drv := cc.Driver out.ErrT(style.Sad, `Failed to start {{.driver}} {{.driver_type}}. Running "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "delete"), "error": err}) - return host, exists, err + return hostInfo, exists, err } // validateNetwork tries to catch network problems as soon as possible @@ -760,7 +760,8 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st optSeen := false warnedOnce := false for _, k := range proxy.EnvVars { - if v := os.Getenv(k); v != "" { + v := os.Getenv(k) + if v != "" { if !optSeen { out.Styled(style.Internet, "Found network options:") optSeen = true @@ -847,9 +848,9 @@ func tryRegistry(r command.Runner, driverName, imageRepository, ip string) { // 2 second timeout. For best results, call tryRegistry in a non-blocking manner. opts := []string{"-sS", "-m", "2"} - proxy := os.Getenv("HTTPS_PROXY") - if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { - opts = append([]string{"-x", proxy}, opts...) + httpsProxy := os.Getenv("HTTPS_PROXY") + if httpsProxy != "" && !strings.HasPrefix(httpsProxy, "localhost") && !strings.HasPrefix(httpsProxy, "127.0") { + opts = append([]string{"-x", httpsProxy}, opts...) } if imageRepository == "" { @@ -931,16 +932,16 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo // get current coredns configmap via kubectl get := fmt.Sprintf("sudo %s --kubeconfig=%s -n kube-system get configmap coredns -o yaml", kubectl, kubecfg) - out, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get)) + rest, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get)) if err != nil { klog.Errorf("failed to get current CoreDNS ConfigMap: %v", err) return err } - cm := strings.TrimSpace(out.Stdout.String()) + cm := strings.TrimSpace(rest.Stdout.String()) // check if this specific host entry already exists in coredns configmap, so not to duplicate/override it - host := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name)) - if host.MatchString(cm) { + hostInfo := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name)) + if hostInfo.MatchString(cm) { klog.Infof("CoreDNS already contains %q host record, skipping...", name) return nil } @@ -956,8 +957,8 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo } // check if logging is already enabled (via log plugin) in coredns configmap, so not to duplicate it - logs := regexp.MustCompile(`(?smU)^ *log *$`) - if !logs.MatchString(cm) { + regex := regexp.MustCompile(`(?smU)^ *log *$`) + if !regex.MatchString(cm) { // inject log plugin into coredns configmap sed = fmt.Sprintf("%s -e '/^ errors *$/i \\ log'", sed) } diff --git a/pkg/minikube/notify/notify.go b/pkg/minikube/notify/notify.go index f398a645ed..489137bbe0 100644 --- a/pkg/minikube/notify/notify.go +++ b/pkg/minikube/notify/notify.go @@ -96,21 +96,21 @@ func maybePrintBetaUpdateText(betaReleasesURL string, localVersion semver.Versio return true } -func printUpdateTextCommon(version semver.Version) { +func printUpdateTextCommon(ver semver.Version) { if err := writeTimeToFile(lastUpdateCheckFilePath, time.Now().UTC()); err != nil { klog.Errorf("write time failed: %v", err) } - url := "https://github.com/kubernetes/minikube/releases/tag/v" + version.String() - out.Styled(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": version, "url": url}) + url := "https://github.com/kubernetes/minikube/releases/tag/v" + ver.String() + out.Styled(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": ver, "url": url}) } -func printUpdateText(version semver.Version) { - printUpdateTextCommon(version) +func printUpdateText(ver semver.Version) { + printUpdateTextCommon(ver) out.Styled(style.Tip, "To disable this notice, run: 'minikube config set WantUpdateNotification false'\n") } -func printBetaUpdateText(version semver.Version) { - printUpdateTextCommon(version) +func printBetaUpdateText(ver semver.Version) { + printUpdateTextCommon(ver) out.Styled(style.Tip, "To disable beta notices, run: 'minikube config set WantBetaUpdateNotification false'") out.Styled(style.Tip, "To disable update notices in general, run: 'minikube config set WantUpdateNotification false'\n") } @@ -248,14 +248,14 @@ func timeFromFileIfExists(path string) time.Time { } // DownloadURL returns a URL to get minikube binary version ver for platform os/arch -func DownloadURL(ver, os, arch string) string { - if ver == "" || strings.HasSuffix(ver, "-unset") || os == "" || arch == "" { +func DownloadURL(ver, osName, arch string) string { + if ver == "" || strings.HasSuffix(ver, "-unset") || osName == "" || arch == "" { return "https://github.com/kubernetes/minikube/releases" } sfx := "" - if os == "windows" { + if osName == "windows" { sfx = ".exe" } return fmt.Sprintf("https://github.com/kubernetes/minikube/releases/download/%s/minikube-%s-%s%s", - ver, os, arch, sfx) + ver, osName, arch, sfx) } diff --git a/pkg/minikube/out/out.go b/pkg/minikube/out/out.go index 9c3dcae952..065abc7dc3 100644 --- a/pkg/minikube/out/out.go +++ b/pkg/minikube/out/out.go @@ -107,8 +107,8 @@ func Styled(st style.Enum, format string, a ...V) { Infof(format, a...) return } - outStyled, spinner := stylized(st, useColor, format, a...) - if spinner { + outStyled, useSpinner := stylized(st, useColor, format, a...) + if useSpinner { spinnerString(outStyled) } else { String(outStyled) @@ -116,12 +116,12 @@ func Styled(st style.Enum, format string, a ...V) { } func boxedCommon(printFunc func(format string, a ...interface{}), cfg box.Config, title string, format string, a ...V) { - box := box.New(cfg) + b := box.New(cfg) if !useColor { - box.Config.Color = nil + b.Config.Color = nil } str := Sprintf(style.None, format, a...) - printFunc(box.String(title, strings.TrimSpace(str))) + printFunc(b.String(title, strings.TrimSpace(str))) } // Boxed writes a stylized and templated message in a box to stdout using the default style config diff --git a/pkg/minikube/perf/logs.go b/pkg/minikube/perf/logs.go index a76fc44656..1ac440abc2 100644 --- a/pkg/minikube/perf/logs.go +++ b/pkg/minikube/perf/logs.go @@ -51,13 +51,13 @@ func timeCommandLogs(cmd *exec.Cmd) (*result, error) { var timings []float64 for scanner.Scan() { - log := scanner.Text() + logData := scanner.Text() // this is the time it took to complete the previous log timeTaken := time.Since(timer).Seconds() - klog.Infof("%f: %s", timeTaken, log) + klog.Infof("%f: %s", timeTaken, logData) timer = time.Now() - logs = append(logs, log) + logs = append(logs, logData) timings = append(timings, timeTaken) } // add the time it took to get from the final log to finishing the command diff --git a/pkg/minikube/perf/start.go b/pkg/minikube/perf/start.go index 9a942da8cd..4ba3f9ba6c 100644 --- a/pkg/minikube/perf/start.go +++ b/pkg/minikube/perf/start.go @@ -63,12 +63,12 @@ func CompareMinikubeStart(ctx context.Context, binaries []*Binary) error { return nil } -func collectResults(ctx context.Context, binaries []*Binary, driver string, runtime string) (*resultManager, error) { +func collectResults(ctx context.Context, binaries []*Binary, driver string, runtimeName string) (*resultManager, error) { rm := newResultManager() for run := 0; run < runs; run++ { log.Printf("Executing run %d/%d...", run+1, runs) for _, binary := range binaries { - r, err := timeMinikubeStart(ctx, binary, driver, runtime) + r, err := timeMinikubeStart(ctx, binary, driver, runtimeName) if err != nil { return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name()) } @@ -97,9 +97,9 @@ func average(nums []float64) float64 { return total / float64(len(nums)) } -func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, runtime string) error { +func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, runtimeName string) error { for _, b := range binaries { - c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtime)) + c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtimeName)) c.Stderr = os.Stderr log.Printf("Running: %v...", c.Args) if err := c.Run(); err != nil { @@ -115,8 +115,8 @@ func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, r } // timeMinikubeStart returns the time it takes to execute `minikube start` -func timeMinikubeStart(ctx context.Context, binary *Binary, driver string, runtime string) (*result, error) { - startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtime)) +func timeMinikubeStart(ctx context.Context, binary *Binary, driver string, runtimeName string) (*result, error) { + startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtimeName)) startCmd.Stderr = os.Stderr r, err := timeCommandLogs(startCmd) @@ -147,6 +147,6 @@ func skipIngress(driver string) bool { // We only want to run the tests if: // 1. It's a VM driver and docker container runtime // 2. It's docker driver with any container runtime -func proceed(driver string, runtime string) bool { - return runtime == "docker" || driver == "docker" +func proceed(driver string, runtimeName string) bool { + return runtimeName == "docker" || driver == "docker" } diff --git a/pkg/minikube/process/process.go b/pkg/minikube/process/process.go index f461414fd1..0b93ef4317 100644 --- a/pkg/minikube/process/process.go +++ b/pkg/minikube/process/process.go @@ -33,7 +33,7 @@ func WritePidfile(path string, pid int) error { return os.WriteFile(path, []byte(data), pidfileMode) } -// ReadPid reads a pid from path. +// ReadPidfile reads a pid from path. func ReadPidfile(path string) (int, error) { data, err := os.ReadFile(path) if err != nil { diff --git a/pkg/minikube/registry/drvs/vfkit/vfkit.go b/pkg/minikube/registry/drvs/vfkit/vfkit.go index b6fe05fd70..06e7693547 100644 --- a/pkg/minikube/registry/drvs/vfkit/vfkit.go +++ b/pkg/minikube/registry/drvs/vfkit/vfkit.go @@ -98,7 +98,6 @@ func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) { DiskSize: cfg.DiskSize, Memory: cfg.Memory, CPU: cfg.CPUs, - Cmdline: "", ExtraDisks: cfg.ExtraDisks, Network: cfg.Network, MACAddress: mac, diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 87d89ad21e..188e8ef261 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -66,8 +66,8 @@ func init() { } // GetCoreClient returns a core client -func (k *K8sClientGetter) GetCoreClient(context string) (typed_core.CoreV1Interface, error) { - client, err := kapi.Client(context) +func (k *K8sClientGetter) GetCoreClient(ctx string) (typed_core.CoreV1Interface, error) { + client, err := kapi.Client(ctx) if err != nil { return nil, errors.Wrap(err, "client") } @@ -288,8 +288,8 @@ func WaitForService(api libmachine.API, cname string, namespace string, service } for _, bareURLString := range serviceURL.URLs { - url, _ := OptionallyHTTPSFormattedURLString(bareURLString, https) - urlList = append(urlList, url) + urlString, _ := OptionallyHTTPSFormattedURLString(bareURLString, https) + urlList = append(urlList, urlString) } return urlList, nil } @@ -314,7 +314,7 @@ func getServiceListFromServicesByLabel(services typed_core.ServiceInterface, key } // CreateSecret creates or modifies secrets -func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labels map[string]string) error { +func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labelData map[string]string) error { client, err := K8s.GetCoreClient(cname) if err != nil { return &retry.RetriableError{Err: err} @@ -344,7 +344,7 @@ func CreateSecret(cname string, namespace, name string, dataValues map[string]st secretObj := &core.Secret{ ObjectMeta: meta.ObjectMeta{ Name: name, - Labels: labels, + Labels: labelData, }, Data: data, Type: core.SecretTypeOpaque, diff --git a/pkg/minikube/shell/shell.go b/pkg/minikube/shell/shell.go index 0c9958e423..d3ff9957d7 100644 --- a/pkg/minikube/shell/shell.go +++ b/pkg/minikube/shell/shell.go @@ -165,11 +165,11 @@ func Detect() (string, error) { } func (c EnvConfig) getShell() shellData { - shell, ok := shellConfigMap[c.Shell] + shellData, ok := shellConfigMap[c.Shell] if !ok { - shell = defaultShell + shellData = defaultShell } - return shell + return shellData } func generateUsageHint(ec EnvConfig, usgPlz, usgCmd string) string { diff --git a/pkg/minikube/storageclass/storageclass.go b/pkg/minikube/storageclass/storageclass.go index e64b4af1ab..055933d3ae 100644 --- a/pkg/minikube/storageclass/storageclass.go +++ b/pkg/minikube/storageclass/storageclass.go @@ -71,8 +71,8 @@ func SetDefaultStorageClass(storage storagev1.StorageV1Interface, name string) e } // GetStoragev1 return storage v1 interface for client -func GetStoragev1(context string) (storagev1.StorageV1Interface, error) { - client, err := kapi.Client(context) +func GetStoragev1(ctx string) (storagev1.StorageV1Interface, error) { + client, err := kapi.Client(ctx) if err != nil { return nil, err } diff --git a/pkg/minikube/tests/api_mock.go b/pkg/minikube/tests/api_mock.go index 022d1103c5..4f553b027f 100644 --- a/pkg/minikube/tests/api_mock.go +++ b/pkg/minikube/tests/api_mock.go @@ -137,10 +137,10 @@ func (api *MockAPI) Remove(name string) error { } // Save saves a host to disk. -func (api *MockAPI) Save(host *host.Host) error { +func (api *MockAPI) Save(hostInfo *host.Host) error { api.SaveCalled = true - api.Logf("MockAPI.Save: %+v", host) - return api.FakeStore.Save(host) + api.Logf("MockAPI.Save: %+v", hostInfo) + return api.FakeStore.Save(hostInfo) } // GetMachinesDir returns the directory to store machines in. diff --git a/pkg/minikube/tunnel/cluster_inspector.go b/pkg/minikube/tunnel/cluster_inspector.go index 95aae6c459..658c60a92a 100644 --- a/pkg/minikube/tunnel/cluster_inspector.go +++ b/pkg/minikube/tunnel/cluster_inspector.go @@ -80,10 +80,10 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) { return hostState, route, nil } -func getRoute(host *host.Host, clusterConfig config.ClusterConfig) (*Route, error) { - hostDriverIP, err := host.Driver.GetIP() +func getRoute(hostInfo *host.Host, clusterConfig config.ClusterConfig) (*Route, error) { + hostDriverIP, err := hostInfo.Driver.GetIP() if err != nil { - return nil, errors.Wrapf(err, "error getting host IP for %s", host.Name) + return nil, errors.Wrapf(err, "error getting host IP for %s", hostInfo.Name) } _, ipNet, err := net.ParseCIDR(clusterConfig.KubernetesConfig.ServiceCIDR) diff --git a/pkg/minikube/tunnel/kic/service_tunnel.go b/pkg/minikube/tunnel/kic/service_tunnel.go index 264301f74c..aa8997d596 100644 --- a/pkg/minikube/tunnel/kic/service_tunnel.go +++ b/pkg/minikube/tunnel/kic/service_tunnel.go @@ -28,7 +28,8 @@ import ( "k8s.io/klog/v2" ) -// ServiceTunnel ... +// ServiceTunnel manages an SSH tunnel for a Kubernetes service. +// It holds configuration for the SSH connection and the tunnel's state. type ServiceTunnel struct { sshPort string sshKey string @@ -37,7 +38,11 @@ type ServiceTunnel struct { suppressStdOut bool } -// NewServiceTunnel ... +// NewServiceTunnel creates and returns a new ServiceTunnel instance. +// sshPort is the port number for the SSH connection. +// sshKey is the path to the SSH private key file. +// v1Core is the Kubernetes CoreV1 client interface for interacting with services. +// suppressStdOut controls whether standard output from the tunnel process should be suppressed. func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface, suppressStdOut bool) *ServiceTunnel { return &ServiceTunnel{ sshPort: sshPort, @@ -47,7 +52,12 @@ func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface, } } -// Start ... +// Start establishes an SSH tunnel for the specified Kubernetes service. +// It retrieves service details, creates an SSH connection with random local ports +// for each service port, and starts the tunnel in a new goroutine. +// It returns a slice of URLs (e.g., "http://127.0.0.1:local_port") corresponding +// to the tunnelled ports, or an error if the setup fails. +// Errors from the tunnel running in the background are logged via klog. func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) { svc, err := t.v1Core.Services(namespace).Get(context.Background(), svcName, metav1.GetOptions{}) if err != nil { @@ -75,7 +85,8 @@ func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) { return urls, nil } -// Stop ... +// Stop attempts to gracefully stop the active SSH tunnel. +// Any errors encountered during the stop process are logged as warnings. func (t *ServiceTunnel) Stop() { err := t.sshConn.stop() if err != nil { diff --git a/pkg/minikube/tunnel/kic/ssh_conn.go b/pkg/minikube/tunnel/kic/ssh_conn.go index ead2ec5768..7410019112 100644 --- a/pkg/minikube/tunnel/kic/ssh_conn.go +++ b/pkg/minikube/tunnel/kic/ssh_conn.go @@ -130,20 +130,20 @@ func createSSHConnWithRandomPorts(name, sshPort, sshKey string, svc *v1.Service) usedPorts := make([]int, 0, len(svc.Spec.Ports)) for _, port := range svc.Spec.Ports { - freeport, err := freeport.GetFreePort() + freePort, err := freeport.GetFreePort() if err != nil { return nil, err } arg := fmt.Sprintf( "-L %d:%s:%d", - freeport, + freePort, svc.Spec.ClusterIP, port.Port, ) sshArgs = append(sshArgs, arg) - usedPorts = append(usedPorts, freeport) + usedPorts = append(usedPorts, freePort) } cmd := exec.Command("ssh", sshArgs...) diff --git a/pkg/minikube/tunnel/kic/ssh_tunnel.go b/pkg/minikube/tunnel/kic/ssh_tunnel.go index 001639ef76..fcf795358d 100644 --- a/pkg/minikube/tunnel/kic/ssh_tunnel.go +++ b/pkg/minikube/tunnel/kic/ssh_tunnel.go @@ -32,7 +32,9 @@ import ( "k8s.io/minikube/pkg/minikube/tunnel" ) -// SSHTunnel ... +// SSHTunnel manages and reconciles SSH tunnels for Kubernetes Services +// (specifically type LoadBalancer) and Ingress resources. It periodically +// checks the cluster state and creates, maintains, or removes tunnels as needed. type SSHTunnel struct { ctx context.Context sshPort string @@ -45,7 +47,13 @@ type SSHTunnel struct { connsToStop map[string]*sshConn } -// NewSSHTunnel ... +// NewSSHTunnel creates and returns a new SSHTunnel instance. +// ctx is the context that controls the lifecycle of the tunnel manager. +// sshPort is the port number of the SSH server to connect to. +// sshKey is the path to the SSH private key file for authentication. +// bindAddress is the local address on which the tunnels will listen. +// v1Core is a Kubernetes CoreV1 client interface for interacting with Services. +// v1Networking is a Kubernetes NetworkingV1 client interface for interacting with Ingresses. func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Core typed_core.CoreV1Interface, v1Networking typed_networking.NetworkingV1Interface) *SSHTunnel { return &SSHTunnel{ ctx: ctx, @@ -60,7 +68,12 @@ func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Co } } -// Start ... +// Start begins the main reconciliation loop for the SSHTunnel. +// This loop periodically scans for Kubernetes Services (type LoadBalancer) +// and Ingresses, creating or tearing down SSH tunnels as necessary. +// This method blocks until the provided context (t.ctx) is canceled. +// It returns any error associated with context cancellation or initial setup. +// Runtime errors during the tunnel management loop are logged via klog. func (t *SSHTunnel) Start() error { for { select { diff --git a/pkg/minikube/tunnel/registry.go b/pkg/minikube/tunnel/registry.go index 374ab23124..b443df96a4 100644 --- a/pkg/minikube/tunnel/registry.go +++ b/pkg/minikube/tunnel/registry.go @@ -91,17 +91,18 @@ func (r *persistentRegistry) Register(tunnel *ID) (rerr error) { // tunnels simultaneously. It is possible that an old tunnel // from an old profile has duplicated route information so we // need to check both machine name and route information. - if tunnel.MachineName == t.MachineName && t.Route.Equal(tunnel.Route) { - isRunning, err := checkIfRunning(t.Pid) - if err != nil { - return fmt.Errorf("error checking whether conflicting tunnel (%v) is running: %s", t, err) - } - if isRunning { - return errorTunnelAlreadyExists(t) - } - tunnels[i] = tunnel - alreadyExists = true + if tunnel.MachineName != t.MachineName || !tunnel.Route.Equal(t.Route) { + continue + } + isRunning, err := checkIfRunning(t.Pid) + if err != nil { + return fmt.Errorf("error checking whether conflicting tunnel (%v) is running: %s", t, err) + } + if isRunning { + return errorTunnelAlreadyExists(t) } + tunnels[i] = tunnel + alreadyExists = true } if !alreadyExists { diff --git a/pkg/minikube/tunnel/reporter.go b/pkg/minikube/tunnel/reporter.go index 8535a7dcdf..e6502f929e 100644 --- a/pkg/minikube/tunnel/reporter.go +++ b/pkg/minikube/tunnel/reporter.go @@ -68,7 +68,7 @@ func (r *simpleReporter) Report(tunnelState *Status) { loadbalancer emulator: %s `, minikubeError, routerError, lbError) - _, err := r.out.Write([]byte(fmt.Sprintf( + _, err := fmt.Fprintf(r.out, `Status: machine: %s pid: %d @@ -80,7 +80,7 @@ func (r *simpleReporter) Report(tunnelState *Status) { tunnelState.TunnelID.Route, minikubeState, managedServices, - errors))) + errors) if err != nil { klog.Errorf("failed to report state %s", err) } diff --git a/pkg/minikube/tunnel/route.go b/pkg/minikube/tunnel/route.go index 68f4888770..dbd0bbaf66 100644 --- a/pkg/minikube/tunnel/route.go +++ b/pkg/minikube/tunnel/route.go @@ -105,7 +105,7 @@ func (t *routingTable) Equal(other *routingTable) bool { for i := range *t { routesEqual := (*t)[i].route.Equal((*other)[i].route) linesEqual := (*t)[i].line == ((*other)[i].line) - if !(routesEqual && linesEqual) { + if !routesEqual || !linesEqual { return false } } diff --git a/pkg/perf/monitor/github.go b/pkg/perf/monitor/github.go index ea9b2e2848..93ff15395e 100644 --- a/pkg/perf/monitor/github.go +++ b/pkg/perf/monitor/github.go @@ -22,7 +22,7 @@ import ( "os" "time" - "github.com/google/go-github/v72/github" + "github.com/google/go-github/v73/github" "github.com/pkg/errors" "golang.org/x/oauth2" ) diff --git a/pkg/storage/storage_provisioner.go b/pkg/storage/storage_provisioner.go index 1a3f753e31..569f7bca19 100644 --- a/pkg/storage/storage_provisioner.go +++ b/pkg/storage/storage_provisioner.go @@ -57,14 +57,14 @@ var _ controller.Provisioner = &hostPathProvisioner{} // Provision creates a storage asset and returns a PV object representing it. func (p *hostPathProvisioner) Provision(_ context.Context, options controller.ProvisionOptions) (*core.PersistentVolume, controller.ProvisioningState, error) { - path := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name) - klog.Infof("Provisioning volume %v to %s", options, path) - if err := os.MkdirAll(path, 0777); err != nil { + hostPath := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name) + klog.Infof("Provisioning volume %v to %s", options, hostPath) + if err := os.MkdirAll(hostPath, 0777); err != nil { return nil, controller.ProvisioningFinished, err } // Explicitly chmod created dir, so we know mode is set to 0777 regardless of umask - if err := os.Chmod(path, 0777); err != nil { + if err := os.Chmod(hostPath, 0777); err != nil { return nil, controller.ProvisioningFinished, err } @@ -83,7 +83,7 @@ func (p *hostPathProvisioner) Provision(_ context.Context, options controller.Pr }, PersistentVolumeSource: core.PersistentVolumeSource{ HostPath: &core.HostPathVolumeSource{ - Path: path, + Path: hostPath, }, }, }, diff --git a/site/content/en/docs/_index.md b/site/content/en/docs/_index.md index afd40960be..b32718c3ff 100644 --- a/site/content/en/docs/_index.md +++ b/site/content/en/docs/_index.md @@ -11,7 +11,7 @@ minikube quickly sets up a local Kubernetes cluster on macOS, Linux, and Windows ![Screenshot](/images/screenshot.png) -🎉 Latest Release: v1.35.0 - Jan 15, 2025 ([changelog](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md)) +🎉 Latest Release: v1.36.0 - May 22, 2025 ([changelog](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md)) ## Highlights diff --git a/site/content/en/docs/benchmarks/timeToK8s/v1.36.0.md b/site/content/en/docs/benchmarks/timeToK8s/v1.36.0.md new file mode 100644 index 0000000000..1667055454 --- /dev/null +++ b/site/content/en/docs/benchmarks/timeToK8s/v1.36.0.md @@ -0,0 +1,27 @@ +--- +title: "v1.36.0 Benchmark" +linkTitle: "v1.36.0 Benchmark" +weight: -20250522 +--- + +![time-to-k8s](/images/benchmarks/timeToK8s/v1.36.0-time.png) + +| | minikube version: v1.36.0 | kind v0.29.0 go1.24.2 linux/amd64 | k3d version v5.8.3 | +|----------------------|---------------------------|-----------------------------------|--------------------| +| Command Exec | 21.056 | 14.240 | 12.593 | +| API Server Answering | 0.055 | 0.056 | 0.067 | +| Kubernetes SVC | 0.048 | 0.051 | 0.052 | +| DNS SVC | 0.048 | 0.049 | 0.049 | +| App Running | 6.393 | 17.979 | 3.039 | +| DNS Answering | 23.211 | 0.601 | 4.137 | +| Total | 50.812 | 32.977 | 19.937 | + + + +![cpu-to-k8s](/images/benchmarks/timeToK8s/v1.36.0-cpu.png) + +| | minikube version: v1.36.0 | kind v0.29.0 go1.24.2 linux/amd64 | k3d version v5.8.3 | +|--------------------|---------------------------|-----------------------------------|--------------------| +| CPU Utilization(%) | 18.742 | 33.290 | 34.301 | +| CPU Time(seconds) | 8.712 | 10.952 | 6.813 | + diff --git a/site/content/en/docs/commands/profile.md b/site/content/en/docs/commands/profile.md index f4bea12057..73fc091a8d 100644 --- a/site/content/en/docs/commands/profile.md +++ b/site/content/en/docs/commands/profile.md @@ -93,6 +93,7 @@ minikube profile list [flags] ### Options ``` + -d, --detailed If true, returns a detailed list of profiles. -l, --light If true, returns list of profiles faster by skipping validating the status of the cluster. -o, --output string The output format. One of 'json', 'table' (default "table") ``` diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 02a9215d42..df1cb28752 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -48,7 +48,6 @@ minikube start [flags] -d, --driver string Used to specify the driver to run Kubernetes in. The list of available drivers depends on operating system. --dry-run dry-run mode. Validates configuration, but does not mutate system state --embed-certs if true, will embed the certs in kubeconfig. - --enable-default-cni DEPRECATED: Replaced by --cni=bridge --extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler @@ -74,14 +73,14 @@ minikube start [flags] --interactive Allow user prompts for more information (default true) --iso-url strings Locations to fetch the minikube ISO from. The list depends on the machine architecture. --keep-context This will keep the existing kubectl context and will create a minikube context. - --kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.33.1, 'latest' for v1.33.1). Defaults to 'stable'. + --kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.33.2, 'latest' for v1.33.2). Defaults to 'stable'. --kvm-gpu Enable experimental NVIDIA GPU support in minikube --kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only) --kvm-network string The KVM default network name. (kvm2 driver only) (default "default") --kvm-numa-count int Simulate numa node count in minikube, supported numa node count range is 1-8 (kvm2 driver only) (default 1) --kvm-qemu-uri string The KVM QEMU connection URI. (kvm2 driver only) (default "qemu:///system") --listen-address string IP Address to use to expose ports (docker and podman driver only) - --memory string Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use "max" to use the maximum amount of memory. Use "no-limit" to not specify a limit (Docker/Podman only) + -m, --memory string Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). Use "max" to use the maximum amount of memory. Use "no-limit" to not specify a limit (Docker/Podman only) --mount This will start the mount daemon and automatically mount files into minikube. --mount-9p-version string Specify the 9p version that the mount should use (default "9p2000.L") --mount-gid string Default group id used for the mount (default "docker") @@ -96,7 +95,6 @@ minikube start [flags] --nat-nic-type string NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") --native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true) --network string network to run minikube with. Used by docker/podman, qemu, kvm, and vfkit drivers. If left empty, minikube will create a new network. - --network-plugin string DEPRECATED: Replaced by --cni --nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only) --nfs-shares-root string Where to root the NFS Shares, defaults to /nfsshares (hyperkit driver only) (default "/nfsshares") --no-kubernetes If set, minikube VM/container will start without starting or configuring Kubernetes. (only works on new clusters) @@ -119,7 +117,6 @@ minikube start [flags] --trace string Send trace events. Options include: [gcp] --uuid string Provide VM UUID to restore MAC address (hyperkit driver only) --vm Filter to use only VM Drivers - --vm-driver driver DEPRECATED, use driver instead. --wait strings comma separated list of Kubernetes components to verify and wait for after starting a cluster. defaults to "apiserver,system_pods", available options: "apiserver,system_pods,default_sa,apps_running,node_ready,kubelet,extra" . other acceptable values are 'all' or 'none', 'true' and 'false' (default [apiserver,system_pods]) --wait-timeout duration max time to wait per Kubernetes or host to be healthy. (default 6m0s) ``` diff --git a/site/content/en/docs/contrib/building/iso.md b/site/content/en/docs/contrib/building/iso.md index 2af07c6eba..05d13d7d98 100644 --- a/site/content/en/docs/contrib/building/iso.md +++ b/site/content/en/docs/contrib/building/iso.md @@ -10,6 +10,9 @@ The minikube ISO is booted by each hypervisor to provide a stable minimal Linux ## Prerequisites +* Machine with x86\_64 CPU +* Ubuntu 22.04.5 LTS (Jammy Jellyfish) +* docker * A recent GNU Make distribution (>=4.0) * A recent Go distribution (>=1.22.0) * If you are on Windows or Mac, you'll need Docker to be installed. @@ -25,30 +28,59 @@ cd minikube ## Building ### Building in Docker -To build for x86 + +To build for x86: + ```shell $ make buildroot-image -$ make out/minikube-amd64.iso +$ make minikube-iso-x86_64 ``` -To build for ARM +To build for ARM: + ```shell $ make buildroot-image -$ make out/minikube-arm64.iso +$ make minikube-iso-aarch64 ``` -The build will occur inside a docker container. +The build will occur inside a docker container. The bootable ISO image will be available in `out/minikube-.iso`. -### Building on Baremetal -If you want to do this on baremetal, replace `make out/minikube-.iso` with `IN_DOCKER=1 make out/minikube-.iso`. +### Building without docker + +Install required tools: + +```shell +sudo apt-get install \ + automake \ + bc \ + build-essential \ + cpio \ + gcc-multilib \ + genisoimage \ + git \ + gnupg2 \ + libtool \ + locales \ + p7zip-full \ + python2 \ + unzip \ + wget \ +``` + +Install Go using these instructions: +https://go.dev/doc/install + +To build without docker run: -* Prerequisite build tools to install: ```shell -sudo apt-get install build-essential gnupg2 p7zip-full git wget cpio python \ - unzip bc gcc-multilib automake libtool locales +IN_DOCKER=1 make minikube-iso- ``` +> [!IMPORTANT] +> Some external projects will try to use docker even when building +> without docker. You must install docker on the build host. + ## Using a local ISO image ```shell diff --git a/site/content/en/docs/contrib/leaderboard/2025.html b/site/content/en/docs/contrib/leaderboard/2025.html index 73c283911f..3c3d2276f1 100644 --- a/site/content/en/docs/contrib/leaderboard/2025.html +++ b/site/content/en/docs/contrib/leaderboard/2025.html @@ -87,7 +87,7 @@

kubernetes/minikube

-
2025-01-01 — 2025-04-30
+
2025-01-01 — 2025-05-31

Reviewers

@@ -103,9 +103,11 @@

Most Influential

function drawreviewCounts() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of Merged PRs reviewed', type: 'number'}, { role: 'annotation' }], - ["medyagh", 13, "13"], - ["cfergeau", 1, "1"], - ["nirs", 1, "1"], + ["medyagh", 24, "24"], + ["nirs", 4, "4"], + ["prezha", 3, "3"], + ["cfergeau", 2, "2"], + ["afbjorklund", 1, "1"], ]); @@ -138,9 +140,11 @@

Most Helpful

function drawreviewWords() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of words written in merged PRs', type: 'number'}, { role: 'annotation' }], - ["medyagh", 369, "369"], - ["nirs", 62, "62"], - ["cfergeau", 10, "10"], + ["medyagh", 857, "857"], + ["nirs", 615, "615"], + ["prezha", 523, "523"], + ["cfergeau", 228, "228"], + ["afbjorklund", 98, "98"], ]); @@ -173,9 +177,11 @@

Most Demanding

function drawreviewComments() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of Review Comments in merged PRs', type: 'number'}, { role: 'annotation' }], - ["medyagh", 9, "9"], - ["nirs", 2, "2"], - ["cfergeau", 1, "1"], + ["nirs", 26, "26"], + ["medyagh", 19, "19"], + ["prezha", 9, "9"], + ["cfergeau", 2, "2"], + ["afbjorklund", 1, "1"], ]); @@ -212,21 +218,21 @@

Most Active

function drawprCounts() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of Pull Requests Merged', type: 'number'}, { role: 'annotation' }], - ["medyagh", 9, "9"], - ["ComradeProgrammer", 7, "7"], + ["medyagh", 12, "12"], + ["nirs", 10, "10"], + ["ComradeProgrammer", 9, "9"], ["prezha", 6, "6"], - ["nirs", 3, "3"], + ["LJTian", 3, "3"], + ["Victorthedev", 2, "2"], + ["zvdy", 2, "2"], ["Aaina26", 1, "1"], + ["danielcristho", 1, "1"], + ["cdw8431", 1, "1"], + ["SzymonNadbrzezny", 1, "1"], + ["liangyuanpeng", 1, "1"], ["xcarolan", 1, "1"], - ["luchenhan", 1, "1"], ["jeffmaury", 1, "1"], ["wuwentao", 1, "1"], - ["cdw8431", 1, "1"], - ["liangyuanpeng", 1, "1"], - ["LJTian", 1, "1"], - ["danielcristho", 1, "1"], - ["joaquimrocha", 1, "1"], - ["SzymonNadbrzezny", 1, "1"], ]); @@ -259,21 +265,21 @@

Big Movers

function drawprDeltas() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: 'Lines of code (delta)', type: 'number'}, { role: 'annotation' }], - ["nirs", 598, "598"], + ["nirs", 1448, "1448"], + ["LJTian", 1264, "1264"], + ["ComradeProgrammer", 742, "742"], + ["panyam", 738, "738"], + ["Victorthedev", 507, "507"], ["prezha", 405, "405"], - ["medyagh", 299, "299"], - ["ComradeProgrammer", 262, "262"], + ["medyagh", 360, "360"], ["xcarolan", 27, "27"], + ["zvdy", 21, "21"], ["liangyuanpeng", 10, "10"], ["luchenhan", 4, "4"], ["wuwentao", 2, "2"], - ["SzymonNadbrzezny", 2, "2"], + ["Cosmicoppai", 2, "2"], ["joaquimrocha", 2, "2"], - ["Aaina26", 0, "0"], - ["LJTian", 0, "0"], - ["jeffmaury", 0, "0"], - ["cdw8431", 0, "0"], - ["danielcristho", 0, "0"], + ["SzymonNadbrzezny", 2, "2"], ]); @@ -306,21 +312,21 @@

Most difficult to review

function drawprSize() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: 'Average PR size (added+changed)', type: 'number'}, { role: 'annotation' }], - ["nirs", 186, "186"], + ["panyam", 504, "504"], + ["LJTian", 247, "247"], + ["Victorthedev", 155, "155"], + ["nirs", 131, "131"], + ["ComradeProgrammer", 47, "47"], ["prezha", 35, "35"], - ["ComradeProgrammer", 30, "30"], ["xcarolan", 25, "25"], - ["medyagh", 22, "22"], + ["medyagh", 20, "20"], + ["zvdy", 9, "9"], ["liangyuanpeng", 5, "5"], ["luchenhan", 2, "2"], ["wuwentao", 1, "1"], ["joaquimrocha", 1, "1"], ["SzymonNadbrzezny", 1, "1"], - ["danielcristho", 0, "0"], - ["Aaina26", 0, "0"], - ["jeffmaury", 0, "0"], - ["cdw8431", 0, "0"], - ["LJTian", 0, "0"], + ["Cosmicoppai", 1, "1"], ]); @@ -360,17 +366,17 @@

Most Active

["Ritikaa96", 60, "60"], ["Ruchi1499", 42, "42"], ["dhairya-seth", 31, "31"], - ["afbjorklund", 17, "17"], - ["medyagh", 16, "16"], + ["afbjorklund", 24, "24"], + ["medyagh", 19, "19"], + ["nirs", 11, "11"], ["AmarNathChary", 10, "10"], + ["LJTian", 8, "8"], ["kundan2707", 7, "7"], - ["nirs", 6, "6"], - ["LJTian", 6, "6"], - ["ljluestc", 4, "4"], ["xcarolan", 4, "4"], - ["Angelin01", 3, "3"], + ["ljluestc", 4, "4"], + ["rvs497", 3, "3"], ["panyam", 3, "3"], - ["azrulafiq", 3, "3"], + ["iankingori", 3, "3"], ["arlandi1974", 3, "3"], ]); @@ -405,20 +411,20 @@

Most Helpful

var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of words (excludes authored)', type: 'number'}, { role: 'annotation' }], ["huttsMichael", 2234, "2234"], + ["nirs", 1205, "1205"], ["Ruchi1499", 1084, "1084"], ["Ritikaa96", 988, "988"], ["mardonner", 790, "790"], ["ljluestc", 715, "715"], ["dhairya-seth", 615, "615"], - ["medyagh", 520, "520"], - ["afbjorklund", 440, "440"], + ["medyagh", 603, "603"], + ["afbjorklund", 571, "571"], + ["polarathene", 556, "556"], ["Zenner2", 423, "423"], - ["nirs", 361, "361"], ["danmaninc", 336, "336"], ["ptiseo-mcs", 326, "326"], ["haoyun", 314, "314"], ["ComradeProgrammer", 221, "221"], - ["LJTian", 139, "139"], ]); @@ -451,7 +457,7 @@

Top Closers

function drawissueCloser() { var data = new google.visualization.arrayToDataTable([ [{label:'',type:'string'},{label: '# of issues closed (excludes authored)', type: 'number'}, { role: 'annotation' }], - ["medyagh", 6, "6"], + ["medyagh", 10, "10"], ["prezha", 2, "2"], ["afbjorklund", 1, "1"], diff --git a/site/content/en/docs/contrib/leaderboard/v1.36.0.html b/site/content/en/docs/contrib/leaderboard/v1.36.0.html new file mode 100644 index 0000000000..ed6ec87768 --- /dev/null +++ b/site/content/en/docs/contrib/leaderboard/v1.36.0.html @@ -0,0 +1,488 @@ +--- +title: "v1.36.0 - 2025-05-22" +linkTitle: "v1.36.0 - 2025-05-22" +weight: -122 +--- + + + kubernetes/minikube - Leaderboard + + + + + + + + +

kubernetes/minikube

+
2025-01-16 — 2025-05-22
+ + +

Reviewers

+ + +
+

Most Influential

+

# of Merged PRs reviewed

+
+ +
+ +
+

Most Helpful

+

# of words written in merged PRs

+
+ +
+ +
+

Most Demanding

+

# of Review Comments in merged PRs

+
+ +
+ + +

Pull Requests

+ + +
+

Most Active

+

# of Pull Requests Merged

+
+ +
+ +
+

Big Movers

+

Lines of code (delta)

+
+ +
+ +
+

Most difficult to review

+

Average PR size (added+changed)

+
+ +
+ + +

Issues

+ + +
+

Most Active

+

# of comments

+
+ +
+ +
+

Most Helpful

+

# of words (excludes authored)

+
+ +
+ +
+

Top Closers

+

# of issues closed (excludes authored)

+
+ +
+ + + + diff --git a/site/content/en/docs/drivers/vfkit.md b/site/content/en/docs/drivers/vfkit.md index 14a6b00114..a99aee8246 100644 --- a/site/content/en/docs/drivers/vfkit.md +++ b/site/content/en/docs/drivers/vfkit.md @@ -11,6 +11,11 @@ aliases: macOS virtualization, optimized for lightweight virtual machines and container deployment. +## Requirements + +- Requires macOS 13 or later. +- Requires minikube version 1.36.0 or later. + ## Networking The vfkit driver has two networking options: `nat` and `vmnet-shared`. @@ -25,8 +30,6 @@ installation instructions bellow. ### Requirements -- Requires macOS 10.15 or later -- Requires minikube version 1.36.0 or later. - Requires [vmnet-helper](https://github.com/nirs/vmnet-helper). ### Install vment-helper @@ -94,13 +97,10 @@ Run `minikube start --driver vfkit --alsologtostderr -v=7` to debug crashes ### Upgrade VFKit -New updates to macOS often require an updated vfkit driver. To upgrade: - -* If Podman Desktop is installed, it also bundles `vfkit` -* If you have Brew Package Manager, run: `brew upgrade vfkit` -* As a final alternative, you install the latest VFKit from [GitHub](https://github.com/crc-org/vfkit/releases) -* To check your current version, run: `vfkit -v` -* If the version didn't change after upgrading verify the correct VFKit is in the path. run: `which vfkit` +```shell +brew update +brew upgrade vfkit +``` ### Troubleshooting the vmnet-shared network diff --git a/site/content/en/docs/handbook/addons/ingress-dns.md b/site/content/en/docs/handbook/addons/ingress-dns.md index cfe05c9ce7..4f49f78061 100644 --- a/site/content/en/docs/handbook/addons/ingress-dns.md +++ b/site/content/en/docs/handbook/addons/ingress-dns.md @@ -291,10 +291,24 @@ Do not use .local as this is a reserved TLD for mDNS and bind9 DNS servers #### mDNS reloading Each time a file is created or a change is made to a file in `/etc/resolver` you may need to run the following to reload Mac OS mDNS resolver. +For macOS versions prior to Big Sur, you can reload the mDNS resolver using the following legacy commands: + ```bash sudo launchctl unload -w /System/Library/LaunchDaemons/com.apple.mDNSResponder.plist sudo launchctl load -w /System/Library/LaunchDaemons/com.apple.mDNSResponder.plist ``` +However, if you're using a newer macOS version (Big Sur and beyond), running the legacy commands may result in the following error: + +```bash +Load failed: 5: Input/output error +Try running `launchctl bootstrap` as root for richer errors. +``` +In this case, the recommended approach is to use the following commands instead: + +```bash +sudo launchctl enable system/com.apple.mDNSResponder.reloaded +sudo launchctl disable system/com.apple.mDNSResponder.reloaded +``` ## TODO - Add a service that runs on the host OS which will update the files in `/etc/resolver` automatically diff --git a/site/package-lock.json b/site/package-lock.json index 87b9594e44..564d18a9be 100644 --- a/site/package-lock.json +++ b/site/package-lock.json @@ -146,12 +146,12 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -446,9 +446,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "dependencies": { "to-regex-range": "^5.0.1" diff --git a/site/static/images/benchmarks/timeToK8s/v1.36.0-cpu.png b/site/static/images/benchmarks/timeToK8s/v1.36.0-cpu.png new file mode 100644 index 0000000000..7d2f3e9f8e Binary files /dev/null and b/site/static/images/benchmarks/timeToK8s/v1.36.0-cpu.png differ diff --git a/site/static/images/benchmarks/timeToK8s/v1.36.0-time.png b/site/static/images/benchmarks/timeToK8s/v1.36.0-time.png new file mode 100644 index 0000000000..c03b66d339 Binary files /dev/null and b/site/static/images/benchmarks/timeToK8s/v1.36.0-time.png differ diff --git a/site/themes/docsy b/site/themes/docsy index cf0c68f041..ace4e37cee 160000 --- a/site/themes/docsy +++ b/site/themes/docsy @@ -1 +1 @@ -Subproject commit cf0c68f041daac066a0292d521461dbd092d7c31 +Subproject commit ace4e37ceedcec9c48d329adb1128201061ef23d diff --git a/test/integration/aab_offline_test.go b/test/integration/aab_offline_test.go index 25ea2716c9..19f6824a70 100644 --- a/test/integration/aab_offline_test.go +++ b/test/integration/aab_offline_test.go @@ -43,7 +43,7 @@ func TestOffline(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=2048", "--wait=true"} + startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=3072", "--wait=true"} startArgs = append(startArgs, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) env := os.Environ() diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 7e719a16a7..837a19ecbb 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -101,7 +101,7 @@ func TestAddons(t *testing.T) { // so we override that here to let minikube auto-detect appropriate cgroup driver os.Setenv(constants.MinikubeForceSystemdEnv, "") - args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=registry-creds", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=nvidia-device-plugin", "--addons=yakd", "--addons=volcano", "--addons=amd-gpu-device-plugin"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4096", "--alsologtostderr", "--addons=registry", "--addons=registry-creds", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=nvidia-device-plugin", "--addons=yakd", "--addons=volcano", "--addons=amd-gpu-device-plugin"}, StartArgs()...) if !NoneDriver() { args = append(args, "--addons=ingress", "--addons=ingress-dns", "--addons=storage-provisioner-rancher") } diff --git a/test/integration/cert_options_test.go b/test/integration/cert_options_test.go index 101b5c01d3..caedfb13c0 100644 --- a/test/integration/cert_options_test.go +++ b/test/integration/cert_options_test.go @@ -39,7 +39,7 @@ func TestCertOptions(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) defer CleanupWithLogs(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--memory=2048", "--apiserver-ips=127.0.0.1", "--apiserver-ips=192.168.15.15", "--apiserver-names=localhost", "--apiserver-names=www.google.com", "--apiserver-port=8555"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", "--apiserver-ips=127.0.0.1", "--apiserver-ips=192.168.15.15", "--apiserver-names=localhost", "--apiserver-names=www.google.com", "--apiserver-port=8555"}, StartArgs()...) // We can safely override --apiserver-name with if NeedsPortForward() { @@ -118,7 +118,7 @@ func TestCertExpiration(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) defer CleanupWithLogs(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--memory=2048", "--cert-expiration=3m"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", "--cert-expiration=3m"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { @@ -127,7 +127,7 @@ func TestCertExpiration(t *testing.T) { // Now wait 3 minutes for the certs to expire and make sure minikube starts properly time.Sleep(time.Minute * 3) - args = append([]string{"start", "-p", profile, "--memory=2048", "--cert-expiration=8760h"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", "--cert-expiration=8760h"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to start minikube after cert expiration: %q : %v", rr.Command(), err) diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index 71a44c587d..e7afa7c4e8 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -47,7 +47,7 @@ func TestDockerFlags(t *testing.T) { defer CleanupWithLogs(t, profile, cancel) // Use the most verbose logging for the simplest test. If it fails, something is very wrong. - args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=2048", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=3072", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) @@ -87,7 +87,7 @@ func TestForceSystemdFlag(t *testing.T) { defer CleanupWithLogs(t, profile, cancel) // Use the most verbose logging for the simplest test. If it fails, something is very wrong. - args := append([]string{"start", "-p", profile, "--memory=2048", "--force-systemd", "--alsologtostderr", "-v=5"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", "--force-systemd", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) @@ -149,7 +149,7 @@ func TestForceSystemdEnv(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) defer CleanupWithLogs(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--memory=2048", "--alsologtostderr", "-v=5"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...) cmd := exec.CommandContext(ctx, Target(), args...) cmd.Env = append(os.Environ(), "MINIKUBE_FORCE_SYSTEMD=true") rr, err := Run(t, cmd) diff --git a/test/integration/error_spam_test.go b/test/integration/error_spam_test.go index 8934e7dbd2..bbd7df4d78 100644 --- a/test/integration/error_spam_test.go +++ b/test/integration/error_spam_test.go @@ -76,7 +76,7 @@ func TestErrorSpam(t *testing.T) { t.Run("setup", func(t *testing.T) { // This should likely use multi-node once it's ready // use `--log_dir` flag to run isolated and avoid race condition - ie, failing to clean up (locked) log files created by other concurently-run tests, or counting them in results - args := append([]string{"start", "-p", profile, "-n=1", "--memory=2250", "--wait=false", fmt.Sprintf("--log_dir=%s", logDir)}, StartArgs()...) + args := append([]string{"start", "-p", profile, "-n=1", "--memory=3072", "--wait=false", fmt.Sprintf("--log_dir=%s", logDir)}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 9a36e35c6c..212e1a1026 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -2236,10 +2236,10 @@ func startHTTPProxy(t *testing.T) (*http.Server, error) { func startMinikubeWithProxy(ctx context.Context, t *testing.T, profile string, proxyEnv string, addr string) { // Use more memory so that we may reliably fit MySQL and nginx - memoryFlag := "--memory=4000" + memoryFlag := "--memory=4096" // to avoid failure for mysq/pv on virtualbox on darwin on free github actions, if detect.GithubActionRunner() && VirtualboxDriver() { - memoryFlag = "--memory=6000" + memoryFlag = "--memory=6144" } // passing --api-server-port so later verify it didn't change in soft start. startArgs := append([]string{"start", "-p", profile, memoryFlag, fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=all"}, StartArgsWithContext(ctx)...) diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index 34f4c13d6b..90e7dbba3d 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "os/exec" + "strings" "testing" "k8s.io/minikube/pkg/minikube/vmpath" @@ -36,7 +37,7 @@ func TestGuestEnvironment(t *testing.T) { defer CleanupWithLogs(t, profile, cancel) t.Run("Setup", func(t *testing.T) { - args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=2048", "--wait=false", "--disable-metrics=true"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=3072", "--wait=false", "--disable-metrics=true"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err) diff --git a/test/integration/gvisor_addon_test.go b/test/integration/gvisor_addon_test.go index 7b9a06b2a6..e594a6cc5c 100644 --- a/test/integration/gvisor_addon_test.go +++ b/test/integration/gvisor_addon_test.go @@ -48,7 +48,7 @@ func TestGvisorAddon(t *testing.T) { CleanupWithLogs(t, profile, cancel) }() - startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) + startArgs := append([]string{"start", "-p", profile, "--memory=3072", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err) diff --git a/test/integration/ha_test.go b/test/integration/ha_test.go index 02c5a55889..1d0a9ca2be 100644 --- a/test/integration/ha_test.go +++ b/test/integration/ha_test.go @@ -97,7 +97,7 @@ func TestMultiControlPlane(t *testing.T) { // validateHAStartCluster ensures ha (multi-control plane) cluster can start. func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) { // start ha (multi-control plane) cluster - startArgs := append([]string{"-p", profile, "start", "--ha", "--memory", "2200", "--wait", "true", "--alsologtostderr", "-v", "5"}, StartArgs()...) + startArgs := append([]string{"-p", profile, "start", "--ha", "--memory", "3072", "--wait", "true", "--alsologtostderr", "-v", "5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { t.Fatalf("failed to fresh-start ha (multi-control plane) cluster. args %q : %v", rr.Command(), err) diff --git a/test/integration/json_output_test.go b/test/integration/json_output_test.go index 12e2a18dff..5e0924eb46 100644 --- a/test/integration/json_output_test.go +++ b/test/integration/json_output_test.go @@ -43,7 +43,7 @@ func TestJSONOutput(t *testing.T) { }{ { command: "start", - args: append([]string{"--memory=2200", "--wait=true"}, StartArgs()...), + args: append([]string{"--memory=3072", "--wait=true"}, StartArgs()...), }, { command: "pause", }, { @@ -155,7 +155,7 @@ func TestErrorJSONOutput(t *testing.T) { // force a failure via --driver=fail so that we can make sure errors // are printed as expected - startArgs := []string{"start", "-p", profile, "--memory=2200", "--output=json", "--wait=true", "--driver=fail"} + startArgs := []string{"start", "-p", profile, "--memory=3072", "--output=json", "--wait=true", "--driver=fail"} rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err == nil { diff --git a/test/integration/mount_start_test.go b/test/integration/mount_start_test.go index 4425d0519d..1d6e56910d 100644 --- a/test/integration/mount_start_test.go +++ b/test/integration/mount_start_test.go @@ -93,7 +93,7 @@ func validateStartWithMount(ctx context.Context, t *testing.T, profile string) { // We have to increment this because if you have two mounts with the same port, when you kill one cluster the mount will break for the other mountStartPort++ - args := []string{"start", "-p", profile, "--memory=2048", "--mount", "--mount-gid", mountGID, "--mount-msize", mountMSize, "--mount-port", mountPort(), "--mount-uid", mountUID, "--no-kubernetes"} + args := []string{"start", "-p", profile, "--memory=3072", "--mount", "--mount-gid", mountGID, "--mount-msize", mountMSize, "--mount-port", mountPort(), "--mount-uid", mountUID, "--no-kubernetes"} args = append(args, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 89696e7995..a3f190ca03 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -92,7 +92,7 @@ func TestMultiNode(t *testing.T) { // validateMultiNodeStart makes sure a 2 node cluster can start func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { // Start a 2 node cluster with the --nodes param - startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2", "-v=5", "--alsologtostderr"}, StartArgs()...) + startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=3072", "--nodes=2", "-v=5", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) diff --git a/test/integration/net_test.go b/test/integration/net_test.go index 41b0284423..0084404321 100644 --- a/test/integration/net_test.go +++ b/test/integration/net_test.go @@ -239,7 +239,7 @@ func validateFalseCNI(ctx context.Context, t *testing.T, profile string) { cr = "crio" } - startArgs := []string{"start", "-p", profile, "--memory=2048", "--alsologtostderr", "--cni=false"} + startArgs := []string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "--cni=false"} startArgs = append(startArgs, StartArgs()...) mkCmd := exec.CommandContext(ctx, Target(), startArgs...) diff --git a/test/integration/no_kubernetes_test.go b/test/integration/no_kubernetes_test.go index e4458b1feb..7b5b86c32a 100644 --- a/test/integration/no_kubernetes_test.go +++ b/test/integration/no_kubernetes_test.go @@ -91,7 +91,7 @@ func validateStartWithK8S(ctx context.Context, t *testing.T, profile string) { defer PostMortemLogs(t, profile) // docs: start minikube with Kubernetes. - args := append([]string{"start", "-p", profile}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err) @@ -108,7 +108,7 @@ func validateStartWithStopK8s(ctx context.Context, t *testing.T, profile string) defer PostMortemLogs(t, profile) // docs: start minikube with no Kubernetes. - args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--no-kubernetes", "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err) @@ -132,7 +132,7 @@ func validateStartNoK8S(ctx context.Context, t *testing.T, profile string) { defer PostMortemLogs(t, profile) // docs: start minikube with no Kubernetes. - args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--no-kubernetes", "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err) diff --git a/test/integration/pause_test.go b/test/integration/pause_test.go index f6626ce6cf..2d3d54e85f 100644 --- a/test/integration/pause_test.go +++ b/test/integration/pause_test.go @@ -76,7 +76,7 @@ func TestPause(t *testing.T) { func validateFreshStart(ctx context.Context, t *testing.T, profile string) { defer PostMortemLogs(t, profile) - args := append([]string{"start", "-p", profile, "--memory=2048", "--install-addons=false", "--wait=all"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", "--install-addons=false", "--wait=all"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err) diff --git a/test/integration/preload_test.go b/test/integration/preload_test.go index 298c76cfed..59c9b73b2d 100644 --- a/test/integration/preload_test.go +++ b/test/integration/preload_test.go @@ -36,7 +36,7 @@ func TestPreload(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer CleanupWithLogs(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "--wait=true", "--preload=false"} + startArgs := []string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "--wait=true", "--preload=false"} startArgs = append(startArgs, StartArgs()...) k8sVersion := "v1.24.4" startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) @@ -61,7 +61,7 @@ func TestPreload(t *testing.T) { } // re-start the cluster and check if image is preserved - startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1", "--wait=true"} + startArgs = []string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1", "--wait=true"} startArgs = append(startArgs, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { diff --git a/test/integration/scheduled_stop_test.go b/test/integration/scheduled_stop_test.go index 49067c2e99..2b0181be2e 100644 --- a/test/integration/scheduled_stop_test.go +++ b/test/integration/scheduled_stop_test.go @@ -124,7 +124,7 @@ func TestScheduledStopUnix(t *testing.T) { } func startMinikube(ctx context.Context, t *testing.T, profile string) { - args := append([]string{"start", "-p", profile, "--memory=2048"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("starting minikube: %v\n%s", err, rr.Output()) diff --git a/test/integration/skaffold_test.go b/test/integration/skaffold_test.go index a407426d5e..159a412999 100644 --- a/test/integration/skaffold_test.go +++ b/test/integration/skaffold_test.go @@ -62,7 +62,7 @@ func TestSkaffold(t *testing.T) { } t.Logf("skaffold version: %s", rr.Stdout.Bytes()) - args := append([]string{"start", "-p", profile, "--memory=2600"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("starting minikube: %v\n%s", err, rr.Output()) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 84e20cce3b..716bb1e42b 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -106,7 +106,7 @@ func TestStartStop(t *testing.T) { waitFlag = "--wait=apiserver,system_pods,default_sa" } - startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", waitFlag}, tc.args...) + startArgs := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", waitFlag}, tc.args...) startArgs = append(startArgs, StartArgs()...) startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version)) diff --git a/test/integration/status_test.go b/test/integration/status_test.go index 5dd168b3a2..95108edb3a 100644 --- a/test/integration/status_test.go +++ b/test/integration/status_test.go @@ -41,7 +41,7 @@ func TestInsufficientStorage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(5)) defer Cleanup(t, profile, cancel) - startArgs := []string{"start", "-p", profile, "--memory=2048", "--output=json", "--wait=true"} + startArgs := []string{"start", "-p", profile, "--memory=3072", "--output=json", "--wait=true"} startArgs = append(startArgs, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) // artificially set /var to 100% capacity diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 3b9bc3633d..6c91bc6040 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -95,7 +95,7 @@ func TestRunningBinaryUpgrade(t *testing.T) { } defer os.Remove(tf.Name()) - args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072"}, legacyStartArgs()...) rr := &RunResult{} r := func() error { c := exec.CommandContext(ctx, tf.Name(), args...) @@ -126,7 +126,7 @@ func TestRunningBinaryUpgrade(t *testing.T) { t.Fatalf("legacy %s start failed: %v", desiredLegacyVersion, err) } - args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("upgrade from %s to HEAD failed: %s: %v", desiredLegacyVersion, rr.Command(), err) @@ -158,7 +158,7 @@ func TestStoppedBinaryUpgrade(t *testing.T) { defer os.Remove(tf.Name()) t.Run("Upgrade", func(t *testing.T) { - args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072"}, legacyStartArgs()...) rr := &RunResult{} r := func() error { c := exec.CommandContext(ctx, tf.Name(), args...) @@ -194,7 +194,7 @@ func TestStoppedBinaryUpgrade(t *testing.T) { t.Errorf("failed to stop cluster: %s: %v", rr.Command(), err) } - args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Fatalf("upgrade from %s to HEAD failed: %s: %v", desiredLegacyVersion, rr.Command(), err) @@ -218,7 +218,7 @@ func TestKubernetesUpgrade(t *testing.T) { defer CleanupWithLogs(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to start minikube HEAD with oldest k8s version: %s: %v", rr.Command(), err) @@ -239,7 +239,7 @@ func TestKubernetesUpgrade(t *testing.T) { t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String()) } - args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed to upgrade with newest k8s version. args: %s : %v", rr.Command(), err) @@ -265,13 +265,13 @@ func TestKubernetesUpgrade(t *testing.T) { } t.Logf("Attempting to downgrade Kubernetes (should fail)") - args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion)}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion)}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err == nil { t.Fatalf("downgrading Kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) } t.Logf("Attempting restart after unsuccessful downgrade") - args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("start after failed upgrade: %s: %v", rr.Command(), err) @@ -303,7 +303,7 @@ func TestMissingContainerUpgrade(t *testing.T) { } defer os.Remove(tf.Name()) - args := append([]string{"start", "-p", profile, "--memory=2200"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--memory=3072"}, StartArgs()...) rr := &RunResult{} r := func() error { rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...)) @@ -325,7 +325,7 @@ func TestMissingContainerUpgrade(t *testing.T) { t.Fatalf("%s failed: %v", rr.Command(), err) } - args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...) + args = append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { t.Errorf("failed missing container upgrade from %s. args: %s : %v", legacyVersion, rr.Command(), err) diff --git a/translations/de.json b/translations/de.json index 54fae209a7..ca86701fb8 100644 --- a/translations/de.json +++ b/translations/de.json @@ -392,6 +392,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "Wenn true, laden Sie nur Dateien für die spätere Verwendung herunter und speichern Sie sie – installieren oder starten Sie nichts.", "If true, pods might get deleted and restarted on addon enable": "Falls gesetzt, könnten Pods gelöscht und neugestartet werden, wenn ein Addon aktiviert wird", "If true, print web links to addons' documentation if using --output=list (default).": "Falls gesetzt, gibt Links zu den Dokumentationen der Addons aus. Funktioniert nur, wenn --output=list (default).", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "Falls gesetzt, gibt die Liste der Profile schneller aus, indem das Validieren des Status des Clusters ausgelassen wird.", "If true, the added node will be marked for work. Defaults to true.": "Falls gesetzt, wird der hinzugefügte Node als Arbeitsnode markiert. Default: true", "If true, the node added will also be a control plane in addition to a worker.": "Falls gesetzt, wird der Knoten auch als Control Plane hinzugefügt, zusätzlich zu als Worker.", diff --git a/translations/es.json b/translations/es.json index 42f6e26347..86b1554295 100644 --- a/translations/es.json +++ b/translations/es.json @@ -388,6 +388,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "Si el valor es \"true\", los archivos solo se descargan y almacenan en caché (no se instala ni inicia nada).", "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", diff --git a/translations/fr.json b/translations/fr.json index 760e0cec55..055fb3fc95 100644 --- a/translations/fr.json +++ b/translations/fr.json @@ -25,15 +25,15 @@ "--kvm-numa-count range is 1-8": "la tranche de --kvm-numa-count est 1 à 8", "--network flag is only valid with the docker/podman and KVM drivers, it will be ignored": "l'indicateur --network est valide uniquement avec les pilotes docker/podman et KVM, il va être ignoré", "--network flag is only valid with the docker/podman, KVM and Qemu drivers, it will be ignored": "L'indicateur --network n'est valide qu'avec les pilotes docker/podman, KVM et Qemu, il sera ignoré", - "--network flag is only valid with the docker/podman, qemu, kvm, and vfkit drivers, it will be ignored": "", + "--network flag is only valid with the docker/podman, qemu, kvm, and vfkit drivers, it will be ignored": "L'indicateur --network n'est valide qu'avec les pilotes docker/podman, qemu, kvm et vfkit, il sera ignoré", "--network with QEMU must be 'builtin' or 'socket_vmnet'": "--network avec QEMU doit être 'builtin' ou 'socket_vmnet'", "--network with QEMU must be 'user' or 'socket_vmnet'": "--network avec QEMU doit être 'user' ou 'socket_vmnet'", - "--network with vfkit must be 'nat' or 'vmnet-shared'": "", + "--network with vfkit must be 'nat' or 'vmnet-shared'": "--network avec vfkit doit être 'nat' ou 'vmnet-shared'", "--static-ip is only implemented on Docker and Podman drivers, flag will be ignored": "--static-ip n'est implémenté que sur les pilotes Docker et Podman, l'indicateur sera ignoré", "--static-ip overrides --subnet, --subnet will be ignored": "--static-ip remplace --subnet, --subnet sera ignoré", - "1) Recreate the cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube delete {{.profile}}\n\t\t minikube start {{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Create a second cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Use the existing cluster at version Kubernetes {{.old}}, by running:\n\t \n\t\t minikube start {{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t": "1) Recréez le cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n\t\t minikube delete {{.profile}}\n\t\t minikube start {{.profile}} - -kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Créez un deuxième cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n \t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Utiliser le cluster existant à la version Kubernetes {{.old}}, en exécutant :\n\t \n\t\t minikube start {{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t \t", - "1) Recreate the cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube delete{{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Create a second cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Use the existing cluster at version Kubernetes {{.old}}, by running:\n\t \n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t": "1) Recréez le cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n\t\t minikube delete {{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Créez un deuxième cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n \t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Utiliser le cluster existant à la version Kubernetes {{.old}}, en exécutant :\n\t \n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t \t", - "1) Recreate the cluster with Kubernetes {{.new}}, by running:\n\n\t\t minikube delete{{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\n\t\t2) Create a second cluster with Kubernetes {{.new}}, by running:\n\n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\n\t\t3) Use the existing cluster at version Kubernetes {{.old}}, by running:\n\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t": "", + "1) Recreate the cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube delete {{.profile}}\n\t\t minikube start {{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Create a second cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Use the existing cluster at version Kubernetes {{.old}}, by running:\n\t \n\t\t minikube start {{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t": "1) Recréez le cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n\t\t minikube delete {{.profile}}\n\t\t minikube start {{.profile}} - -kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Créez un deuxième cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n \t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Utilisez le cluster existant à la version Kubernetes {{.old}}, en exécutant :\n\t \n\t\t minikube start {{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t \t", + "1) Recreate the cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube delete{{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Create a second cluster with Kubernetes {{.new}}, by running:\n\t \n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Use the existing cluster at version Kubernetes {{.old}}, by running:\n\t \n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t": "1) Recréez le cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n\t\t minikube delete {{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t2) Créez un deuxième cluster avec Kubernetes {{.new}}, en exécutant :\n\t \n \t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\t \n\t\t3) Utilisez le cluster existant à la version Kubernetes {{.old}}, en exécutant :\n\t \n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t \t", + "1) Recreate the cluster with Kubernetes {{.new}}, by running:\n\n\t\t minikube delete{{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\n\t\t2) Create a second cluster with Kubernetes {{.new}}, by running:\n\n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\n\t\t3) Use the existing cluster at version Kubernetes {{.old}}, by running:\n\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t": "1) Recréez le cluster avec Kubernetes {{.new}}, en exécutant :\n\n\t\t minikube delete{{.profile}}\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.new}}\n\n\t\t2) Créez un deuxième cluster avec Kubernetes {{.new}}, en exécutant :\n\n\t\t minikube start -p {{.suggestedName}} --kubernetes-version={{.prefix}}{{.new}}\n\n\t\t3) Utilisez le cluster existant à la version Kubernetes {{.old}}, en exécutant :\n\n\t\t minikube start{{.profile}} --kubernetes-version={{.prefix}}{{.old}}\n\t\t", "1. Click on \"Docker for Desktop\" menu icon\n\t\t\t2. Click \"Preferences\"\n\t\t\t3. Click \"Resources\"\n\t\t\t4. Increase \"CPUs\" slider bar to 2 or higher\n\t\t\t5. Click \"Apply \u0026 Restart\"": "1. Cliquez sur l'icône de menu \"Docker for Desktop\"\n\t\t\t2. Cliquez sur \"Preferences\"\n\t\t\t3. Cliquez sur \"Ressources\"\n\t\t\t4. Augmentez la barre de défilement \"CPU\" à 2 ou plus\n\t\t\t5. Cliquez sur \"Apply \u0026 Restart\"", "1. Click on \"Docker for Desktop\" menu icon\n\t\t\t2. Click \"Preferences\"\n\t\t\t3. Click \"Resources\"\n\t\t\t4. Increase \"Memory\" slider bar to {{.recommend}} or higher\n\t\t\t5. Click \"Apply \u0026 Restart\"": "1. Cliquez sur l'icône de menu \"Docker for Desktop\"\n\t\t\t2. Cliquez sur \"Preferences\"\n\t\t\t3. Cliquez sur \"Ressources\"\n\t\t\t4. Augmentez la barre de défilement \"Memory\" à {{.recommend}} ou plus\n\t\t\t5. Cliquez sur \"Apply \u0026 Restart\"", "1. Open the \"Docker Desktop\" menu by clicking the Docker icon in the system tray\n\t\t2. Click \"Settings\"\n\t\t3. Click \"Resources\"\n\t\t4. Increase \"CPUs\" slider bar to 2 or higher\n\t\t5. Click \"Apply \u0026 Restart\"": "1. Ouvrez le menu \"Docker Desktop\" en cliquant sur l'icône Docker dans la barre d'état système\n\t\t2. Cliquez sur \"Settings\"\n\t\t3. Cliquez sur \"Ressources\"\n\t\t4. Augmentez la barre de défilement \"CPU\" à 2 ou plus\n\t\t5. Cliquez sur \"Apply \u0026 Restart\"", @@ -77,7 +77,7 @@ "Alternatively you could install one of these drivers:": "Vous pouvez également installer l'un de ces pilotes :", "Amount of time to wait for a service in seconds": "Temps d'attente pour un service en secondes", "Amount of time to wait for service in seconds": "Temps d'attente pour un service en secondes", - "An optional configuration file to read addon specific configs from instead of being prompted each time.": "", + "An optional configuration file to read addon specific configs from instead of being prompted each time.": "Un fichier de configuration facultatif pour lire les configurations spécifiques aux modules complémentaires au lieu d'être invité à chaque fois.", "Another hypervisor, such as VirtualBox, is conflicting with KVM. Please stop the other hypervisor, or use --driver to switch to it.": "Un autre hyperviseur, tel que VirtualBox, est en conflit avec KVM. Veuillez arrêter l'autre hyperviseur ou utiliser --driver pour y basculer.", "Another minikube instance is downloading dependencies... ": "Une autre instance minikube télécharge des dépendances", "Another program is using a file required by minikube. If you are using Hyper-V, try stopping the minikube VM from within the Hyper-V manager": "Un autre programme utilise un fichier requis par minikube. Si vous utilisez Hyper-V, essayez d'arrêter la machine virtuelle minikube à partir du gestionnaire Hyper-V", @@ -125,7 +125,7 @@ "Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "Configurez un commutateur réseau externe en suivant la documentation officielle, puis ajoutez `--hyperv-virtual-switch=\u003cswitch-name\u003e` à `minikube start`", "Configure environment to use minikube's Docker daemon": "Configurer l'environnement pour utiliser le démon Docker de minikube", "Configure environment to use minikube's Podman service": "Configurer l'environnement pour utiliser le service Podman de minikube", - "Configure vmnet-helper to run without a password.\n\n\t\tPlease install a vmnet-helper sudoers rule using these instructions:\n\n\t\thttps://github.com/nirs/vmnet-helper#granting-permission-to-run-vmnet-helper": "", + "Configure vmnet-helper to run without a password.\n\n\t\tPlease install a vmnet-helper sudoers rule using these instructions:\n\n\t\thttps://github.com/nirs/vmnet-helper#granting-permission-to-run-vmnet-helper": "Configurez vmnet-helper pour qu'il s'exécute sans mot de passe.\n\n\t\tVeuillez installer une règle sudoers vmnet-helper en suivant ces instructions :\n\n\t\thttps://github.com/nirs/vmnet-helper#granting-permission-to-run-vmnet-helper", "Configures the addon w/ADDON_NAME within minikube (example: minikube addons configure registry-creds). For a list of available addons use: minikube addons list": "Configure le module w/ADDON_NAME dans minikube (exemple : minikube addons configure registry-creds). Pour une liste des modules disponibles, utilisez : minikube addons list", "Configuring RBAC rules ...": "Configuration des règles RBAC ...", "Configuring local host environment ...": "Configuration de l'environnement de l'hôte local...", @@ -380,12 +380,13 @@ "If set, pause all namespaces": "Si défini, suspend tous les espaces de noms", "If set, unpause all namespaces": "Si défini, annule la pause de tous les espaces de noms", "If the above advice does not help, please let us know:": "Si les conseils ci-dessus ne vous aident pas, veuillez nous en informer :", - "If the host has a firewall:\n\t\t\n\t\t1. Allow a port through the firewall\n\t\t2. Specify \"--port=\u003cport_number\u003e\" for \"minikube mount\"": "Si l'hôte dispose d'un pare-feu :\n\t\t\n\t\t1. Autoriser un port à travers le pare-feu\n\t\t2. Spécifiez \"--port=\u003cport_number\u003e\" pour \"minikube mount\"", - "If the host has a firewall:\n\n\t\t1. Allow a port through the firewall\n\t\t2. Specify \"--port=\u003cport_number\u003e\" for \"minikube mount\"": "", + "If the host has a firewall:\n\t\t\n\t\t1. Allow a port through the firewall\n\t\t2. Specify \"--port=\u003cport_number\u003e\" for \"minikube mount\"": "Si l'hôte dispose d'un pare-feu :\n\t\t\n\t\t1. Autoriser un port à traverser le pare-feu\n\t\t2. Spécifiez \"--port=\u003cport_number\u003e\" pour \"minikube mount\"", + "If the host has a firewall:\n\n\t\t1. Allow a port through the firewall\n\t\t2. Specify \"--port=\u003cport_number\u003e\" for \"minikube mount\"": "Si l'hôte dispose d'un pare-feu :\n\n\t\t1. Autoriser un port à traverser le pare-feu\n\t\t2. Spécifier « --port=\u003cport_number\u003e » pour \"minikube mount\"", "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none.": "Si vrai, met en cache les images Docker pour le programme d'amorçage actuel et les charge dans la machine. Toujours faux avec --driver=none.", "If true, only download and cache files for later use - don't install or start anything.": "Si la valeur est \"true\", téléchargez les fichiers et mettez-les en cache uniquement pour une utilisation future. Ne lancez pas d'installation et ne commencez aucun processus.", "If true, pods might get deleted and restarted on addon enable": "Si vrai, les pods peuvent être supprimés et redémarrés lors addon enable", "If true, print web links to addons' documentation if using --output=list (default).": "Si vrai, affiche les liens Web vers la documentation des addons si vous utilisez --output=list (défaut).", + "If true, returns a detailed list of profiles.": "Si vrai, renvoie une liste détaillée des profils.", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "Si vrai, renvoie la liste des profils plus rapidement en ignorant la validation de l'état du cluster.", "If true, the added node will be marked for work. Defaults to true.": "Si vrai, le nœud ajouté sera marqué pour le travail. La valeur par défaut est true.", "If true, the node added will also be a control plane in addition to a worker.": "Si vrai, le nœud ajouté sera également un plan de contrôle en plus d'un travailleur.", @@ -486,7 +487,7 @@ "Noticed you have an activated podman-env on {{.driver_name}} driver in this terminal:": "Vous avez remarqué que vous avez un pilote podman-env activé sur {{.driver_name}} dans ce terminal :", "Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit and kvm2 drivers)": "Nombre de disques supplémentaires créés et attachés à la machine virtuelle minikube (actuellement implémenté uniquement pour les pilotes hyperkit et kvm2)", "Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit, kvm2, and qemu2 drivers)": "Nombre de disques supplémentaires créés et attachés à la machine virtuelle minikube (actuellement uniquement implémenté pour les pilotes hyperkit, kvm2 et qemu2)", - "Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit, kvm2, qemu2, and vfkit drivers)": "", + "Number of extra disks created and attached to the minikube VM (currently only implemented for hyperkit, kvm2, qemu2, and vfkit drivers)": "Nombre de disques supplémentaires créés et attachés à la VM minikube (actuellement implémenté uniquement pour les pilotes hyperkit, kvm2, qemu2 et vfkit)", "Number of lines back to go within the log": "Nombre de lignes à remonter dans le journal", "OS release is {{.pretty_name}}": "La version du système d'exploitation est {{.pretty_name}}", "One of 'text', 'yaml' or 'json'.": "Un parmi 'text', 'yaml' ou 'json'.", @@ -783,9 +784,9 @@ "The node {{.name}} network is not available. Please verify network settings.": "Le réseau du nœud {{.name}} n'est pas disponible. Veuillez vérifier les paramètres réseau.", "The none driver is not compatible with multi-node clusters.": "Le pilote none n'est pas compatible avec les clusters multi-nœuds.", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\t\t\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "Le pilote none avec Kubernetes v1.24+ et l'environnement d'exécution du conteneur docker nécessitent cri-dockerd.\n\t\t\n\t\tVeuillez installer cri-dockerd en suivant ces instructions :\n\n\t\thttps://github.com/Mirantis/cri-dockerd", - "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "", + "The none driver with Kubernetes v1.24+ and the docker container-runtime requires cri-dockerd.\n\n\t\tPlease install cri-dockerd using these instructions:\n\n\t\thttps://github.com/Mirantis/cri-dockerd": "Le pilote none avec Kubernetes v1.24+ et le conteneur runtime Docker nécessitent cri-dockerd.\n\n\t\tVeuillez installer cri-dockerd en suivant ces instructions :\n\n\t\thttps://github.com/Mirantis/cri-dockerd", "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\t\t\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "Le pilote none avec Kubernetes v1.24+ et l'environnement d'exécution du conteneur docker nécessitent dockerd.\n\t\t\n\t\tVeuillez installer dockerd en suivant ces instructions :\n\n\t\thttps://docs.docker.com/engine/install/", - "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "", + "The none driver with Kubernetes v1.24+ and the docker container-runtime requires dockerd.\n\n\t\tPlease install dockerd using these instructions:\n\n\t\thttps://docs.docker.com/engine/install/": "Le pilote none avec Kubernetes v1.24+ et le conteneur runtime Docker nécessitent dockerd.\n\n\t\tVeuillez installer dockerd en suivant ces instructions :\n\n\t\thttps://docs.docker.com/engine/install/", "The none driver with Kubernetes v1.24+ requires containernetworking-plugins.\n\n\t\tPlease install containernetworking-plugins using these instructions:\n\n\t\thttps://minikube.sigs.k8s.io/docs/faq/#how-do-i-install-containernetworking-plugins-for-none-driver": "Le pilote none avec Kubernetes v1.24+ nécessite containernetworking-plugins.\n\n\t\tVeuillez installer containernetworking-plugins en suivant ces instructions :\n\n\t\thttps://minikube.sigs.k8s.io/docs /faq/#how-do-i-install-containernetworking-plugins-for-none-driver", "The number of bytes to use for 9p packet payload": "Le nombre d'octets à utiliser pour la charge utile du paquet 9p", "The number of nodes to spin up. Defaults to 1.": "Le nombre de nœuds à faire tourner. La valeur par défaut est 1.", @@ -806,7 +807,7 @@ "The total number of nodes to spin up. Defaults to 1.": "Le nombre total de nœuds à faire tourner. La valeur par défaut est 1.", "The value passed to --format is invalid": "La valeur passée à --format n'est pas valide", "The value passed to --format is invalid: {{.error}}": "La valeur passée à --format n'est pas valide : {{.error}}", - "The vfkit driver is only supported on macOS": "", + "The vfkit driver is only supported on macOS": "Le pilote vfkit n'est pris en charge que sur macOS", "The {{.addon}} addon is only supported with the KVM driver.\n\nFor GPU setup instructions see: https://minikube.sigs.k8s.io/docs/tutorials/nvidia/": "Le module complémentaire {{.addon}} n'est pris en charge qu'avec le pilote KVM.\n\nPour les instructions de configuration du GPU, consultez : https://minikube.sigs.k8s.io/docs/tutorials/nvidia/", "There are a couple ways to enable the required file sharing:\n1. Enable \"Use the WSL 2 based engine\" in Docker Desktop\nor\n2. Enable file sharing in Docker Desktop for the %s%s directory": "Il existe plusieurs manières d'activer le partage de fichiers requis :\n1. Activez \"Utiliser le moteur basé sur WSL 2\" dans Docker Desktop\nou\n2. Activer le partage de fichiers dans Docker Desktop pour le répertoire %s%s", "These --extra-config parameters are invalid: {{.invalid_extra_opts}}": "Ces paramètres --extra-config ne sont pas valides : {{.invalid_extra_opts}}", @@ -829,7 +830,7 @@ "To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:\n\n\tminikube{{.profileArg}} service yakd-dashboard -n yakd-dashboard\n": "Pour accéder à YAKD - Kubernetes Dashboard, attendez que le Pod soit prêt et exécutez la commande suivante :\n\n\tminikube{{.profileArg}} service yakd-dashboard -n yakd-dashboard\n", "To access YAKD - Kubernetes Dashboard, wait for Pod to be ready and run the following command:\n\n\tminikube{{.profileArg}} service yakd-dashboard -n yakd-dashboard\n\n": "Pour accéder à YAKD - Kubernetes Dashboard, attendez que le Pod soit prêt et exécutez la commande suivante :\n\n\tminikube{{.profileArg}} service yakd-dashboard -n yakd-dashboard\n\n", "To authenticate in Headlamp, fetch the Authentication Token using the following command:\n\nexport SECRET=$(kubectl get secrets --namespace headlamp -o custom-columns=\":metadata.name\" | grep \"headlamp-token\")\nkubectl get secret $SECRET --namespace headlamp --template=\\{\\{.data.token\\}\\} | base64 --decode\n\t\t\t\n": "Pour vous authentifier dans Headlamp, récupérez le jeton d'authentification à l'aide de la commande suivante :\n\nexport SECRET=$(kubectl get secrets --namespace headlamp -o custom-columns=\":metadata.name\" | grep \"headlamp-token \")\nkubectl get secret $SECRET --namespace headlamp --template=\\{\\{.data.token\\}\\} | base64 --decode\n\t\t\t\n", - "To configure vment-helper to run without a password, please check the documentation:": "", + "To configure vment-helper to run without a password, please check the documentation:": "Pour configurer vment-helper pour qu'il s'exécute sans mot de passe, veuillez consulter la documentation :", "To connect to this cluster, use: --context={{.name}}": "Pour vous connecter à ce cluster, utilisez : --context={{.name}}", "To connect to this cluster, use: kubectl --context={{.profile_name}}": "Pour vous connecter à ce cluster, utilisez : kubectl --context={{.profile_name}}", "To disable beta notices, run: 'minikube config set WantBetaUpdateNotification false'": "Pour désactiver les notifications bêta, exécutez : 'minikube config set WantBetaUpdateNotification false'", @@ -847,8 +848,8 @@ "Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "Essayez une ou plusieurs des solutions suivantes pour libérer de l'espace sur l'appareil :\n\t\n\t\t\t1. Exécutez \"docker system prune\" pour supprimer les données Docker inutilisées (éventuellement avec \"-a\")\n\t\t\t2. Augmentez le stockage alloué à Docker for Desktop en cliquant sur :\n\t\t\t\tIcône Docker \u003e Settings \u003e Ressources \u003e Disk Image Size\n\t\t\t3. Exécutez \"minikube ssh -- docker system prune\" si vous utilisez l'environnement d'exécution du conteneur Docker", "Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Settings \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "Essayez une ou plusieurs des solutions suivantes pour libérer de l'espace sur l'appareil :\n\t\n\t\t\t1. Exécutez \"docker system prune\" pour supprimer les données Docker inutilisées (éventuellement avec \"-a\")\n\t\t\t2. Augmentez le stockage alloué à Docker for Desktop en cliquant sur :\n\t\t\t\tIcône Docker \u003e Préférences \u003e Ressources \u003e Taille de l'image disque\n\t\t\t3. Exécutez \"minikube ssh -- docker system prune\" si vous utilisez l'environnement d'exécution du conteneur Docker", "Try one or more of the following to free up space on the device:\n\t\n\t\t\t1. Run \"sudo podman system prune\" to remove unused podman data\n\t\t\t2. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "Essayez une ou plusieurs des solutions suivantes pour libérer de l'espace sur l'appareil :\n\t\n\t\t\t1. Exécutez \"sudo podman system prune\" pour supprimer les données podman inutilisées\n\t\t\t2. Exécutez \"minikube ssh -- docker system prune\" si vous utilisez l'environnement d'exécution du conteneur Docker", - "Try one or more of the following to free up space on the device:\n\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "", - "Try one or more of the following to free up space on the device:\n\n\t\t\t1. Run \"sudo podman system prune\" to remove unused podman data\n\t\t\t2. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "", + "Try one or more of the following to free up space on the device:\n\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "Essayez une ou plusieurs des solutions suivantes pour libérer de l'espace sur l'appareil :\n\n\t\t\t1. Exécutez « docker system prune » pour supprimer les données Docker inutilisées (éventuellement avec « -a »).\n\t\t\t2. Augmentez l'espace de stockage alloué à Docker for Desktop en cliquant sur :\n\t\t\t\tIcône Docker \u003e Préférences \u003e Ressources \u003e Taille de l'image disque\n\t\t\t3. Exécutez « minikube ssh -- docker system prune » si vous utilisez l'environnement d'exécution de conteneur Docker.", + "Try one or more of the following to free up space on the device:\n\n\t\t\t1. Run \"sudo podman system prune\" to remove unused podman data\n\t\t\t2. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime": "Essayez une ou plusieurs des solutions suivantes pour libérer de l'espace sur l'appareil :\n\n\t\t\t1. Exécutez « sudo podman system prune » pour supprimer les données podman inutilisées.\n\t\t\t2. Exécutez « minikube ssh -- docker system prune » si vous utilisez l'environnement d'exécution de conteneur Docker.", "Trying to delete invalid profile {{.profile}}": "Tentative de suppression du profil non valide {{.profile}}", "Tunnel successfully started": "Tunnel démarré avec succès", "Unable to bind flags": "Impossible de lier les indicateurs", @@ -894,7 +895,7 @@ "Unable to remove machine directory": "Impossible de supprimer le répertoire de la machine", "Unable to restart cluster, will reset it: {{.error}}": "Impossible de redémarrer le cluster, va être réinitialisé : {{.error}}", "Unable to restart control-plane node(s), will reset cluster: {{.error}}": "Impossible de redémarrer le(s) nœud(s) du plan de contrôle, le cluster sera réinitialisé : {{.error}}", - "Unable to run vmnet-helper without a password": "", + "Unable to run vmnet-helper without a password": "Impossible d'exécuter vmnet-helper sans mot de passe", "Unable to safely downgrade existing Kubernetes v{{.old}} cluster to v{{.new}}": "Impossible de rétrograder en toute sécurité le cluster Kubernetes v{{.old}} existant vers v{{.new}}", "Unable to stop VM": "Impossible d'arrêter la VM", "Unable to update {{.driver}} driver: {{.error}}": "Impossible de mettre à jour le pilote {{.driver}} : {{.error}}", @@ -1018,7 +1019,7 @@ "call with cleanup=true to remove old tunnels": "appelez avec cleanup=true pour supprimer les anciens tunnels", "cancel any existing scheduled stop requests": "annuler toutes les demandes d'arrêt programmées existantes", "cannot specify --kubernetes-version with --no-kubernetes,\nto unset a global config run:\n\n$ minikube config unset kubernetes-version": "impossible de spécifier --kubernetes-version avec --no-kubernetes,\npour désactiver une configuration globale, exécutez :\n\n$ minikube config unset kubernetes-version", - "config file does not exist": "", + "config file does not exist": "le fichier de configuration n'existe pas", "config modifies minikube config files using subcommands like \"minikube config set driver kvm2\"\nConfigurable fields: \n\n": "config modifie les fichiers de configuration de minikube à l'aide de sous-commandes telles que \"minikube config set driver kvm2\"\nChamps configurables : \n\n", "config view failed": "échec de la vue de configuration", "containers paused status: {{.paused}}": "état des conteneurs en pause : {{.paused}}", @@ -1046,7 +1047,7 @@ "error: --output must be 'text', 'yaml' or 'json'": "erreur : --output doit être 'text', 'yaml' ou 'json'", "error: --output must be 'yaml' or 'json'": "erreur : --output doit être 'yaml' ou 'json'", "experimental": "expérimental", - "extra waiting: {{.error}}": "", + "extra waiting: {{.error}}": "attente supplémentaire : {{.error}}", "failed to acquire lock due to unexpected error": "échec de l'acquisition du verrou en raison d'une erreur inattendue", "failed to add node": "échec de l'ajout du nœud", "failed to load profile: {{.error}}": "échec du chargement du profil : {{.error}}", @@ -1056,7 +1057,7 @@ "failed to set cloud shell kubelet config options": "échec de la définition des options de configuration cloud shell kubelet", "failed to set extra option": "impossible de définir une option supplémentaire", "failed to start node": "échec du démarrage du nœud", - "failed to validate {{.network}} network: {{.reason}}": "", + "failed to validate {{.network}} network: {{.reason}}": "échec de validation du réseau {{.network}} : {{.reason}}", "false": "faux", "fish completion failed": "la complétion fish a échoué", "fish completion.": "complétion fish.", @@ -1106,7 +1107,7 @@ "namespaces to pause": "espaces de noms à mettre en pause", "namespaces to unpause": "espaces de noms à réactiver", "network to run minikube with. Now it is used by docker/podman and KVM drivers. If left empty, minikube will create a new network.": "réseau avec lequel exécuter minikube. Maintenant, il est utilisé par les pilotes docker/podman et KVM. Si laissé vide, minikube créera un nouveau réseau.", - "network to run minikube with. Used by docker/podman, qemu, kvm, and vfkit drivers. If left empty, minikube will create a new network.": "", + "network to run minikube with. Used by docker/podman, qemu, kvm, and vfkit drivers. If left empty, minikube will create a new network.": "Réseau pour exécuter Minikube. Utilisé par les pilotes Docker/Podman, Qemu, KVM et VfKit. Si ce champ est vide, Minikube crée un nouveau réseau.", "none driver does not support multi-node clusters": "aucun pilote ne prend pas en charge les clusters multi-nœuds", "not enough arguments ({{.ArgCount}}).\nusage: minikube config set PROPERTY_NAME PROPERTY_VALUE": "pas assez d'arguments ({{.ArgCount}}).\nusage : minikube config set PROPERTY_NAME PROPERTY_VALUE", "numa node is only supported on k8s v1.18 and later": "le nœud numa n'est pris en charge que sur k8s v1.18 et versions ultérieures", @@ -1154,7 +1155,7 @@ "using metrics-server addon, heapster is deprecated": "utilisation du module metrics-server, heapster est obsolète", "version json failure": "échec de la version du JSON", "version yaml failure": "échec de la version du YAML", - "vmnet-helper was not found on the system, resolve by:\n\n\t\tOption 1) Installing vmnet-helper:\n\n\t\t https://github.com/nirs/vmnet-helper#installation\n\n\t\tOption 2) Using the nat network:\n\n\t\t minikube start{{.profile}} --driver vfkit --network nat": "", + "vmnet-helper was not found on the system, resolve by:\n\n\t\tOption 1) Installing vmnet-helper:\n\n\t\t https://github.com/nirs/vmnet-helper#installation\n\n\t\tOption 2) Using the nat network:\n\n\t\t minikube start{{.profile}} --driver vfkit --network nat": "vmnet-helper n'a pas été trouvé sur le système, résolvez le problème par :\n\n\t\tOption 1) Installation de vmnet-helper :\n\n\t\t https://github.com/nirs/vmnet-helper#installation\n\n\t\tOption 2) Utilisation du réseau NAT :\n\n\t\t minikube start{{.profile}} --driver vfkit --network nat", "yaml encoding failure": "échec de l'encodage yaml", "zsh completion failed": "complétion de zsh en échec", "zsh completion.": "complétion zsh.", diff --git a/translations/id.json b/translations/id.json index aba1a7975e..e28f8a6e3f 100644 --- a/translations/id.json +++ b/translations/id.json @@ -353,6 +353,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "Jika true, hanya mengunduh dan menyimpan file untuk digunakan nanti - tidak menginstal atau memulai apa pun.", "If true, pods might get deleted and restarted on addon enable": "Jika true, pod mungkin akan dihapus dan dimulai ulang saat addon diaktifkan.", "If true, print web links to addons' documentation if using --output=list (default).": "Jika true, tampilkan tautan dokumentasi addons jika menggunakan --output=list (default).", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "Jika true, mengembalikan daftar profil lebih cepat dengan melewati validasi status klaster.", "If true, will perform potentially dangerous operations. Use with discretion.": "Jika true, akan melakukan operasi yang berpotensi berbahaya. Gunakan dengan bijak.", "If you are running minikube within a VM, consider using --driver=none:": "Jika anda menjalankan minikube di dalam VM, pertimbangkan untuk menggunakan --driver=none:", diff --git a/translations/ja.json b/translations/ja.json index bd90b1fd56..6f0e36cd28 100644 --- a/translations/ja.json +++ b/translations/ja.json @@ -370,6 +370,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "true の場合、後の使用のためのファイルのダウンロードとキャッシュ保存のみ行われます。インストールも起動も行いません", "If true, pods might get deleted and restarted on addon enable": "true の場合、有効なアドオンの Pod は削除され、再起動されます", "If true, print web links to addons' documentation if using --output=list (default).": "true の場合、--output=list (default) を利用することでアドオンのドキュメントへの web リンクを表示します", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "true の場合、クラスター状態の検証を省略することにより高速にプロファイル一覧を返します。", "If true, the added node will be marked for work. Defaults to true.": "true の場合、追加されたノードはワーカー用としてマークされます。デフォルトは true です。", "If true, will perform potentially dangerous operations. Use with discretion.": "true の場合、潜在的に危険な操作を行うことになります。慎重に使用してください。", diff --git a/translations/ko.json b/translations/ko.json index 5c74709813..16c4acba9c 100644 --- a/translations/ko.json +++ b/translations/ko.json @@ -406,6 +406,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "", "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", diff --git a/translations/pl.json b/translations/pl.json index 7aff3dc8f4..ebbc365cf1 100644 --- a/translations/pl.json +++ b/translations/pl.json @@ -386,6 +386,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "", "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If using the none driver, ensure that systemctl is installed": "Jeśli użyto sterownika 'none', upewnij się że systemctl jest zainstalowany", diff --git a/translations/ru.json b/translations/ru.json index 4b73492ce6..60bf1e7ca5 100644 --- a/translations/ru.json +++ b/translations/ru.json @@ -352,6 +352,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "", "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", diff --git a/translations/strings.txt b/translations/strings.txt index eb9bc9aecc..225d43fd69 100644 --- a/translations/strings.txt +++ b/translations/strings.txt @@ -351,6 +351,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "", "If true, pods might get deleted and restarted on addon enable": "", "If true, print web links to addons' documentation if using --output=list (default).": "", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "", "If true, will perform potentially dangerous operations. Use with discretion.": "", "If you are running minikube within a VM, consider using --driver=none:": "", diff --git a/translations/zh-CN.json b/translations/zh-CN.json index f760be4a40..c24a19d367 100644 --- a/translations/zh-CN.json +++ b/translations/zh-CN.json @@ -475,6 +475,7 @@ "If true, only download and cache files for later use - don't install or start anything.": "如果为 true,仅会下载和缓存文件以备后用 - 不会安装或启动任何项。", "If true, pods might get deleted and restarted on addon enable": "如果为 true,pods可能会被删除并在启用插件时重新启动", "If true, print web links to addons' documentation if using --output=list (default).": "如果为 true,则使用 --output=list(默认值)输出 web 链接到插件文档。", + "If true, returns a detailed list of profiles.": "", "If true, returns list of profiles faster by skipping validating the status of the cluster.": "如果为 true,则通过跳过验证群集的状态从而更快地返回配置文件列表。", "If true, the added node will be marked for work. Defaults to true.": "如果为true,则添加的节点将标记为 work,默认为 true。", "If true, will perform potentially dangerous operations. Use with discretion.": "如果为 true,将执行潜在的危险操作。谨慎使用。",