diff --git a/agent/agent.go b/agent/agent.go index c770544d..b5e21542 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -304,8 +304,7 @@ func NewAgent(ctx context.Context, client *kube.KubernetesClient, namespace stri connMap: map[string]connectionEntry{}, } - clusterCache, err := cluster.NewClusterCacheInstance(ctx, client.Clientset, - a.namespace, a.redisProxyMsgHandler.redisAddress, cacheutil.RedisCompressionGZip) + clusterCache, err := cluster.NewClusterCacheInstance(a.redisProxyMsgHandler.redisAddress, a.redisProxyMsgHandler.redisPassword, cacheutil.RedisCompressionGZip) if err != nil { return nil, fmt.Errorf("failed to create cluster cache instance: %v", err) } diff --git a/agent/outbound_test.go b/agent/outbound_test.go index a16fa911..ddd16e56 100644 --- a/agent/outbound_test.go +++ b/agent/outbound_test.go @@ -396,7 +396,7 @@ func Test_addClusterCacheInfoUpdateToQueue(t *testing.T) { a.emitter = event.NewEventSource("principal") // First populate the cache with dummy data - clusterMgr, err := cluster.NewManager(a.context, a.namespace, miniRedis.Addr(), cacheutil.RedisCompressionGZip, a.kubeClient.Clientset) + clusterMgr, err := cluster.NewManager(a.context, a.namespace, miniRedis.Addr(), "", cacheutil.RedisCompressionGZip, a.kubeClient.Clientset) require.NoError(t, err) err = clusterMgr.MapCluster("test-agent", &v1alpha1.Cluster{ Name: "test-cluster", diff --git a/cmd/argocd-agent/principal.go b/cmd/argocd-agent/principal.go index f2e75074..90652c73 100644 --- a/cmd/argocd-agent/principal.go +++ b/cmd/argocd-agent/principal.go @@ -83,6 +83,7 @@ func NewPrincipalRunCommand() *cobra.Command { keepAliveMinimumInterval time.Duration redisAddress string + redisPassword string redisCompressionType string healthzPort int ) @@ -242,7 +243,7 @@ func NewPrincipalRunCommand() *cobra.Command { opts = append(opts, principal.WithWebSocket(enableWebSocket)) opts = append(opts, principal.WithKeepAliveMinimumInterval(keepAliveMinimumInterval)) - opts = append(opts, principal.WithRedis(redisAddress, redisCompressionType)) + opts = append(opts, principal.WithRedis(redisAddress, redisPassword, redisCompressionType)) opts = append(opts, principal.WithHealthzPort(healthzPort)) s, err := principal.NewServer(ctx, kubeConfig, namespace, opts...) @@ -360,6 +361,9 @@ func NewPrincipalRunCommand() *cobra.Command { command.Flags().StringVar(&redisAddress, "redis-server-address", env.StringWithDefault("ARGOCD_PRINCIPAL_REDIS_SERVER_ADDRESS", nil, "argocd-redis:6379"), "Redis server hostname and port (e.g. argocd-redis:6379).") + command.Flags().StringVar(&redisPassword, "redis-password", + env.StringWithDefault("REDIS_PASSWORD", nil, ""), + "The password to connect to redis with") command.Flags().StringVar(&redisCompressionType, "redis-compression-type", env.StringWithDefault("ARGOCD_PRINCIPAL_REDIS_COMPRESSION_TYPE", nil, string(cacheutil.RedisCompressionGZip)), diff --git a/hack/dev-env/start-agent-autonomous.sh b/hack/dev-env/start-agent-autonomous.sh index ec653ca4..830f29fd 100755 --- a/hack/dev-env/start-agent-autonomous.sh +++ b/hack/dev-env/start-agent-autonomous.sh @@ -24,6 +24,10 @@ SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" echo $ARGOCD_AGENT_REMOTE_PORT export ARGOCD_AGENT_REMOTE_PORT=${ARGOCD_AGENT_REMOTE_PORT:-8443} +if test "${REDIS_PASSWORD}" = ""; then + export REDIS_PASSWORD=$(kubectl get secret argocd-redis --context=vcluster-agent-autonomous -n argocd -o jsonpath='{.data.auth}' | base64 --decode) +fi + # Point the agent to the toxiproxy server if it is configured from the e2e tests E2E_ENV_FILE="/tmp/argocd-agent-e2e" if [ -f "$E2E_ENV_FILE" ]; then diff --git a/hack/dev-env/start-agent-managed.sh b/hack/dev-env/start-agent-managed.sh index 48a543b4..53cd4145 100755 --- a/hack/dev-env/start-agent-managed.sh +++ b/hack/dev-env/start-agent-managed.sh @@ -23,6 +23,10 @@ SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" export ARGOCD_AGENT_REMOTE_PORT=${ARGOCD_AGENT_REMOTE_PORT:-8443} +if test "${REDIS_PASSWORD}" = ""; then + export REDIS_PASSWORD=$(kubectl get secret argocd-redis --context=vcluster-agent-managed -n argocd -o jsonpath='{.data.auth}' | base64 --decode) +fi + # Point the agent to the toxiproxy server if it is configured from the e2e tests E2E_ENV_FILE="/tmp/argocd-agent-e2e" if [ -f "$E2E_ENV_FILE" ]; then diff --git a/hack/dev-env/start-principal.sh b/hack/dev-env/start-principal.sh index 34137d0e..a1d65479 100755 --- a/hack/dev-env/start-principal.sh +++ b/hack/dev-env/start-principal.sh @@ -35,6 +35,10 @@ if test "${ARGOCD_PRINCIPAL_REDIS_SERVER_ADDRESS}" = ""; then export ARGOCD_PRINCIPAL_REDIS_SERVER_ADDRESS fi +if test "${REDIS_PASSWORD}" = ""; then + export REDIS_PASSWORD=$(kubectl get secret argocd-redis --context=vcluster-control-plane -n argocd -o jsonpath='{.data.auth}' | base64 --decode) +fi + # Point the principal to the e2e test configuration if it exists E2E_ENV_FILE="/tmp/argocd-agent-e2e" if [ -f "$E2E_ENV_FILE" ]; then diff --git a/internal/argocd/cluster/cluster.go b/internal/argocd/cluster/cluster.go index 04a9713d..72a9c473 100644 --- a/internal/argocd/cluster/cluster.go +++ b/internal/argocd/cluster/cluster.go @@ -15,7 +15,6 @@ package cluster import ( - "context" "errors" "fmt" "time" @@ -24,12 +23,10 @@ import ( "github.com/redis/go-redis/v9" "github.com/sirupsen/logrus" - "github.com/argoproj/argo-cd/v3/common" appv1 "github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1" cacheutil "github.com/argoproj/argo-cd/v3/util/cache" appstatecache "github.com/argoproj/argo-cd/v3/util/cache/appstate" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" ) // SetAgentConnectionStatus updates cluster info with connection state and time in mapped cluster at principal. @@ -167,14 +164,9 @@ func (m *Manager) setClusterInfo(clusterServer, agentName, clusterName string, c } // NewClusterCacheInstance creates a new cache instance with Redis connection -func NewClusterCacheInstance(ctx context.Context, kubeclient kubernetes.Interface, - namespace, redisAddress string, redisCompressionType cacheutil.RedisCompressionType) (*appstatecache.Cache, error) { - redisOptions := &redis.Options{Addr: redisAddress} - - if err := common.SetOptionalRedisPasswordFromKubeConfig(ctx, kubeclient, namespace, redisOptions); err != nil { - return nil, fmt.Errorf("failed to set redis password for namespace %s: %v", namespace, err) - } +func NewClusterCacheInstance(redisAddress, redisPassword string, redisCompressionType cacheutil.RedisCompressionType) (*appstatecache.Cache, error) { + redisOptions := &redis.Options{Addr: redisAddress, Password: redisPassword} redisClient := redis.NewClient(redisOptions) clusterCache := appstatecache.NewCache(cacheutil.NewCache( diff --git a/internal/argocd/cluster/cluster_test.go b/internal/argocd/cluster/cluster_test.go index c729129d..38e3195a 100644 --- a/internal/argocd/cluster/cluster_test.go +++ b/internal/argocd/cluster/cluster_test.go @@ -32,7 +32,7 @@ func setup(t *testing.T, redisAddress string) (string, *Manager) { t.Helper() agentName, clusterName := "agent-test", "cluster" - m, err := NewManager(context.Background(), "default", redisAddress, cacheutil.RedisCompressionNone, + m, err := NewManager(context.Background(), "default", redisAddress, "", cacheutil.RedisCompressionNone, kube.NewFakeKubeClient("default")) require.NoError(t, err) @@ -221,7 +221,7 @@ func Test_SetAgentConnectionStatus(t *testing.T) { t.Run("SetAgentConnectionStatus with invalid redis address", func(t *testing.T) { // Create a manager with invalid redis address - invalidM, err := NewManager(context.Background(), "default", "invalid:redis:address", + invalidM, err := NewManager(context.Background(), "default", "invalid:redis:address", "", cacheutil.RedisCompressionNone, kube.NewFakeKubeClient("default")) require.NoError(t, err) @@ -300,7 +300,7 @@ func Test_RefreshClusterInfo(t *testing.T) { t.Run("RefreshClusterInfo with invalid redis", func(t *testing.T) { // Create manager with invalid redis - invalidM, err := NewManager(context.Background(), "default", "invalid:redis", + invalidM, err := NewManager(context.Background(), "default", "invalid:redis", "", cacheutil.RedisCompressionNone, kube.NewFakeKubeClient("default")) require.NoError(t, err) diff --git a/internal/argocd/cluster/informer_test.go b/internal/argocd/cluster/informer_test.go index b62042b7..99796c1f 100644 --- a/internal/argocd/cluster/informer_test.go +++ b/internal/argocd/cluster/informer_test.go @@ -15,7 +15,7 @@ import ( func Test_onClusterAdded(t *testing.T) { t.Run("Successfully add a cluster", func(t *testing.T) { - m, err := NewManager(context.TODO(), "argocd", "", "", kube.NewFakeKubeClient("argocd")) + m, err := NewManager(context.TODO(), "argocd", "", "", "", kube.NewFakeKubeClient("argocd")) require.NoError(t, err) s := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -29,7 +29,7 @@ func Test_onClusterAdded(t *testing.T) { assert.Len(t, m.clusters, 1) }) t.Run("Secret is missing one or more labels", func(t *testing.T) { - m, err := NewManager(context.TODO(), "argocd", "", "", kube.NewFakeKubeClient("argocd")) + m, err := NewManager(context.TODO(), "argocd", "", "", "", kube.NewFakeKubeClient("argocd")) require.NoError(t, err) s := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -46,7 +46,7 @@ func Test_onClusterAdded(t *testing.T) { assert.Len(t, m.clusters, 0) }) t.Run("Target agent already has a mapping", func(t *testing.T) { - m, err := NewManager(context.TODO(), "argocd", "", "", kube.NewFakeKubeClient("argocd")) + m, err := NewManager(context.TODO(), "argocd", "", "", "", kube.NewFakeKubeClient("argocd")) require.NoError(t, err) s := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -83,7 +83,7 @@ func Test_onClusterUpdated(t *testing.T) { Name: "cluster", }, } - m, err := NewManager(context.TODO(), "argocd", "", "", kube.NewFakeKubeClient("argocd")) + m, err := NewManager(context.TODO(), "argocd", "", "", "", kube.NewFakeKubeClient("argocd")) require.NoError(t, err) m.mapCluster("agent1", &v1alpha1.Cluster{}) assert.NotNil(t, m.mapping("agent1")) @@ -111,7 +111,7 @@ func Test_onClusterUpdated(t *testing.T) { Name: "cluster2", }, } - m, err := NewManager(context.TODO(), "argocd", "", "", kube.NewFakeKubeClient("argocd")) + m, err := NewManager(context.TODO(), "argocd", "", "", "", kube.NewFakeKubeClient("argocd")) require.NoError(t, err) m.mapCluster("agent1", &v1alpha1.Cluster{Name: "cluster1"}) assert.NotNil(t, m.mapping("agent1")) diff --git a/internal/argocd/cluster/manager.go b/internal/argocd/cluster/manager.go index 24c89b77..07976d82 100644 --- a/internal/argocd/cluster/manager.go +++ b/internal/argocd/cluster/manager.go @@ -63,25 +63,21 @@ type Manager struct { // manager filters *filter.Chain[*v1.Secret] - redisAddress string - redisCompressionType cacheutil.RedisCompressionType - clusterCache *appstatecache.Cache + clusterCache *appstatecache.Cache } // NewManager instantiates and initializes a new Manager. -func NewManager(ctx context.Context, namespace, redisAddress string, redisCompressionType cacheutil.RedisCompressionType, kubeclient kubernetes.Interface) (*Manager, error) { +func NewManager(ctx context.Context, namespace, redisAddress, redisPassword string, redisCompressionType cacheutil.RedisCompressionType, kubeclient kubernetes.Interface) (*Manager, error) { var err error m := &Manager{ - clusters: make(map[string]*v1alpha1.Cluster), - namespace: namespace, - kubeclient: kubeclient, - ctx: ctx, - filters: filter.NewFilterChain[*v1.Secret](), - redisAddress: redisAddress, - redisCompressionType: redisCompressionType, + clusters: make(map[string]*v1alpha1.Cluster), + namespace: namespace, + kubeclient: kubeclient, + ctx: ctx, + filters: filter.NewFilterChain[*v1.Secret](), } - m.clusterCache, err = NewClusterCacheInstance(ctx, kubeclient, namespace, redisAddress, redisCompressionType) + m.clusterCache, err = NewClusterCacheInstance(redisAddress, redisPassword, redisCompressionType) if err != nil { return nil, fmt.Errorf("failed to create cluster cache instance: %v", err) } diff --git a/internal/argocd/cluster/manager_test.go b/internal/argocd/cluster/manager_test.go index dab05e33..53eb7ce2 100644 --- a/internal/argocd/cluster/manager_test.go +++ b/internal/argocd/cluster/manager_test.go @@ -53,7 +53,7 @@ func Test_StartStop(t *testing.T) { }, } clt := kube.NewFakeClientsetWithResources(redisSecret) - m, err := NewManager(context.TODO(), "argocd", "", "", clt) + m, err := NewManager(context.TODO(), "argocd", "", "", "", clt) require.NoError(t, err) require.NotNil(t, m) err = m.Start() @@ -74,7 +74,7 @@ func Test_onClusterAdd(t *testing.T) { }, } clt := kube.NewFakeClientsetWithResources(redisSecret) - m, err := NewManager(context.TODO(), "argocd", "", "", clt) + m, err := NewManager(context.TODO(), "argocd", "", "", "", clt) require.NoError(t, err) require.NotNil(t, m) err = m.Start() diff --git a/principal/options.go b/principal/options.go index dbf50a67..1b108d42 100644 --- a/principal/options.go +++ b/principal/options.go @@ -72,6 +72,7 @@ type ServerOptions struct { rootCa *x509.CertPool clientCertSubjectMatch bool redisAddress string + redisPassword string redisCompressionType cacheutil.RedisCompressionType healthzPort int } @@ -435,7 +436,7 @@ func WithKeepAliveMinimumInterval(interval time.Duration) ServerOption { } } -func WithRedis(redisAddress, redisCompressionTypeStr string) ServerOption { +func WithRedis(redisAddress, redisPassword, redisCompressionTypeStr string) ServerOption { return func(o *Server) error { redisCompressionType, err := cacheutil.CompressionTypeFromString(redisCompressionTypeStr) if err != nil { @@ -443,6 +444,7 @@ func WithRedis(redisAddress, redisCompressionTypeStr string) ServerOption { } o.options.redisCompressionType = redisCompressionType o.options.redisAddress = redisAddress + o.options.redisPassword = redisPassword return nil } diff --git a/principal/server.go b/principal/server.go index 5cb533e6..626a63ea 100644 --- a/principal/server.go +++ b/principal/server.go @@ -365,7 +365,7 @@ func NewServer(ctx context.Context, kubeClient *kube.KubernetesClient, namespace // Instantiate the cluster manager to handle Argo CD cluster secrets for // agents. - s.clusterMgr, err = cluster.NewManager(s.ctx, s.namespace, s.options.redisAddress, s.options.redisCompressionType, s.kubeClient.Clientset) + s.clusterMgr, err = cluster.NewManager(s.ctx, s.namespace, s.options.redisAddress, s.options.redisPassword, s.options.redisCompressionType, s.kubeClient.Clientset) if err != nil { return nil, err }