diff --git a/cmd/pitr/pxc/pxc.go b/cmd/pitr/pxc/pxc.go index f2553ce433..451188104a 100644 --- a/cmd/pitr/pxc/pxc.go +++ b/cmd/pitr/pxc/pxc.go @@ -219,6 +219,7 @@ func GetPXCFirstHost(ctx context.Context, pxcServiceName string) (string, error) sort.Strings(nodes) lastHost := "" for _, node := range nodes { + log.Printf("PXC Node: %s", node) if strings.Contains(node, "wsrep_ready:ON:wsrep_connected:ON:wsrep_local_state_comment:Synced:wsrep_cluster_status:Primary") { nodeArr := strings.Split(node, ":") lastHost = nodeArr[0] @@ -229,6 +230,8 @@ func GetPXCFirstHost(ctx context.Context, pxcServiceName string) (string, error) return "", errors.New("can't find host") } + log.Printf("connecting to %s", lastHost) + return lastHost, nil } diff --git a/e2e-tests/functions b/e2e-tests/functions index 6c29fb7ea7..6251d69c81 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -67,6 +67,13 @@ log() { echo "[$(date +%Y-%m-%dT%H:%M:%S%z)]" $* } +sleep_with_log() { + local d=$1 + + log "sleeping for ${d} seconds" + sleep ${d} +} + HELM_VERSION=$(helm version -c | $sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/') if [ "${HELM_VERSION:0:2}" == "v2" ]; then diff --git a/e2e-tests/pitr/conf/restore-on-pitr-minio-time.yaml b/e2e-tests/pitr/conf/restore-on-pitr-minio-time.yaml index 6177a2f53b..b95499c1b4 100755 --- a/e2e-tests/pitr/conf/restore-on-pitr-minio-time.yaml +++ b/e2e-tests/pitr/conf/restore-on-pitr-minio-time.yaml @@ -4,7 +4,7 @@ metadata: name: restore-on-pitr-minio-time spec: pxcCluster: pitr - backupName: on-pitr-minio + backupName: on-pitr-minio-2 pitr: type: date date: "" diff --git a/e2e-tests/pitr/run b/e2e-tests/pitr/run index 0883e374fb..9ac80d4eab 100755 --- a/e2e-tests/pitr/run +++ b/e2e-tests/pitr/run @@ -14,6 +14,22 @@ if [[ $IMAGE_PXC =~ 5\.7 ]]; then exit 0 fi +run_backup() { + local cluster=$1 + local backup=$2 + local name=$3 + if [ -z ${name} ]; then + name=$backup + fi + + log "run backup pxc-backup/${name}" + cat $test_dir/conf/${backup}.yml \ + | yq eval '.metadata.name="'${name}'"' \ + | kubectl_bin apply -f - + + wait_backup $name +} + write_test_data() { local cluster=$1 local config=$2 @@ -34,9 +50,9 @@ write_test_data() { run_mysql \ 'INSERT test.test (id) VALUES (100500); INSERT test.test (id) VALUES (100501); INSERT test.test (id) VALUES (100502);' \ "-h $proxy -uroot -proot_password" - sleep 30 + sleep_with_log 30 for i in $(seq 0 $((size - 1))); do - compare_mysql_cmd "select-3" "SELECT * from test.test;" "-h $cluster-pxc-$i.$cluster-pxc -uroot -proot_password" + compare_mysql_cmd "select-2" "SELECT * from test.test;" "-h $cluster-pxc-$i.$cluster-pxc -uroot -proot_password" done if [ "$(is_keyring_plugin_in_use "$cluster")" ]; then @@ -46,12 +62,18 @@ write_test_data() { write_data_for_pitr() { local cluster=$1 + local start=$2 + local rows=$3 + local proxy=$(get_proxy "$cluster") - desc "write data for pitr" - run_mysql \ - 'INSERT test.test (id) VALUES (100503); INSERT test.test (id) VALUES (100504); INSERT test.test (id) VALUES (100505);' \ - "-h $proxy -uroot -proot_password" + local sql="" + for ((i=0; i %s <-- instead of legit GTID. Exiting" ${gtid} + exit 1 + fi + + echo ${gtid} +} + main() { create_infra $namespace deploy_cert_manager kubectl_bin apply -f "$test_dir/conf/issuer.yml" kubectl_bin apply -f "$test_dir/conf/cert.yml" - sleep 25 + sleep_with_log 25 # We are using minio with tls enabled to check if `verifyTLS: false` works fine start_minio "tls-minio" @@ -87,86 +163,58 @@ main() { write_test_data "$cluster" # test changing xtrabackup password - desc "changing xtrabackup password multiple times" + desc "patching xtrabackup password" patch_secret "my-cluster-secrets" "xtrabackup" "$(echo -n "pass1" | base64)" wait_cluster_consistency ${cluster} 3 2 + + desc "patching xtrabackup password" patch_secret "my-cluster-secrets" "xtrabackup" "$(echo -n "pass2" | base64)" wait_cluster_consistency ${cluster} 3 2 + + desc "patching xtrabackup password" patch_secret "my-cluster-secrets" "xtrabackup" "$(echo -n "pass3" | base64)" wait_cluster_consistency ${cluster} 3 2 compare_kubectl secret/${cluster}-mysql-init - desc 'show binlog events' proxy=$(get_proxy "$cluster") - run_mysql "SHOW BINLOG EVENTS IN 'binlog.000005';" "-h ${proxy} -uroot -proot_password" - run_mysql "SHOW BINLOG EVENTS IN 'binlog.000006';" "-h ${proxy} -uroot -proot_password" - - time_now=$(run_mysql "SELECT now();" "-h ${proxy} -uroot -proot_password") - gtid=$(run_mysql "SELECT @@gtid_executed;" "-h ${proxy} -uroot -proot_password" | $sed 's/\([a-f0-9-]\{36\}\):[0-9]*-\([0-9]*\).*/\1:\2/') + gtid=$(get_gtid_executed ${proxy}) - if [[ ! ${gtid} =~ ${GTID_PATTERN} ]]; then - printf "Some garbage --> %s <-- instead of legit GTID. Exiting" ${gtid} - exit 1 - fi - - write_data_for_pitr "$cluster" - sleep 120 # need to wait while collector catch new data + write_data_for_pitr "$cluster" 100503 3 + sleep_with_log 130 # wait for two binlog collection cycles check_latest_restorable_time "on-pitr-minio" - timeout=60 - binlogs_exist=0 - for i in $(seq 1 5); do - echo "Checking if binlogs exist in bucket (attempt $i)..." - binlogs_exist=$( - kubectl_bin run -n "${NAMESPACE}" -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url https://minio-service:9000 --no-verify-ssl s3 ls operator-testing/binlogs/ | grep -c "binlog" | cat - exit "${PIPESTATUS[0]}" - ) - if [ "$binlogs_exist" -gt 0 ]; then - echo "${binlogs_exist} binlogs found in bucket" - break - else - d=$((timeout * i)) - echo "No binlogs found in bucket. Sleeping for ${d} seconds..." - sleep ${d} - fi - done + check_binlog_collection - if [ "$binlogs_exist" -eq 0 ]; then - echo "Binlogs are not found in S3" - exit 1 - fi + check_binlog_gap + desc "[CASE 1] PiTR with GTID: ${gtid}" run_recovery_check_pitr "$cluster" "restore-on-pitr-minio-gtid" "on-pitr-minio" "select-2" "" "" "$gtid" - desc "done gtid type" + desc "[CASE 1] PiTR with GTID: OK" - desc 'check for passwords leak' - check_passwords_leak + run_backup "$cluster" "on-pitr-minio" "on-pitr-minio-2" + sleep_with_log 130 # wait for two binlog collection cycles + check_binlog_gap "on-pitr-minio-2" - sleep 60 - if [[ $(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.conditions}' | grep -c 'Binlog with GTID set') -eq 1 ]]; then - echo "Binlog gap detected" - exit 1 - fi + time_now=$(run_mysql "SELECT now();" "-h ${proxy} -uroot -proot_password") + write_data_for_pitr "$cluster" 100503 3 + + desc "[CASE 2] PiTR with datetime: ${time_now}" run_recovery_check_pitr "$cluster" "restore-on-pitr-minio-time" "on-pitr-minio" "select-3" "$time_now" "" "" - desc "done date type" - sleep 60 - if [[ $(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.conditions}' | grep -c 'Binlog with GTID set') -eq 1 ]]; then - echo "Binlog gap detected" - exit 1 - fi + desc "[CASE 2] PiTR with datetime: OK" + + run_backup "$cluster" "on-pitr-minio" "on-pitr-minio-3" + sleep_with_log 130 # wait for two binlog collection cycles + check_binlog_gap "on-pitr-minio-3" + + write_data_for_pitr "$cluster" 100503 3 + sleep_with_log 130 # wait for two binlog collection cycles - dest=$(sed 's,/,\\/,g' <<<$(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.destination}')) + dest=$(sed 's,/,\\/,g' <<<$(kubectl get pxc-backup on-pitr-minio-3 -o jsonpath='{.status.destination}')) + desc "[CASE 3] PiTR with latest using backupSource: ${dest}" run_recovery_check_pitr "$cluster" "restore-on-pitr-minio" "on-pitr-minio" "select-4" "" "$dest" "" - desc "done latest type" - sleep 60 - if [[ $(kubectl get pxc-backup on-pitr-minio -o jsonpath='{.status.conditions}' | grep -c 'Binlog with GTID set') -eq 1 ]]; then - echo "Binlog gap detected" - exit 1 - fi + desc "[CASE 3] PiTR with latest using backupSource: OK" destroy $namespace desc "test passed"