diff --git a/README.md b/README.md index 63f01cf..5e56f0e 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,52 @@ # ZKP Trusted Setup Ceremony Coordinator -**Warning** -Please note that this tool is under development. Please consider it unusable before the first release. + +* [ZKP Trusted Setup Ceremony Coordinator](#zkp-trusted-setup-ceremony-coordinator) + * [Overview](#overview) + * [Online mode](#online-mode) + * [Offline mode](#offline-mode) + * [Snarkjs powers of tau (ptau) -> Phase 1 conversion](#snarkjs-powers-of-tau-ptau---phase-1-conversion) + * [Prerequisites](#prerequisites) + * [Build](#build) + * [Usage](#usage) + * [Commands](#commands) + * [General purpose commands](#general-purpose-commands) + * [`help`](#help) + * [`ptau`](#ptau) + * [Online mode commands](#online-mode-commands) + * [`server`](#server) + * [Using AWS S3 for artifacts storage](#using-aws-s3-for-artifacts-storage) + * [`client`](#client) + * [Offline mode commands](#offline-mode-commands) + * [`init`](#init) + * [`contribute`](#contribute) + * [`verify`](#verify) + * [`extract-keys`](#extract-keys) + ## Overview This utility program allows for performing a Trusted Setup Ceremony in a Multi-Party Computation fashion. It is meant to be used by the Coordinator of the ceremony, as well as by the Contributors. In the end, the Coordinator will obtain Proving and Verifying Keys, which can be used to generate proofs for the circuit the ceremony was conducted for. +**This project is designed to work with ZK circuits implemented in [Gnark](https://github.com/consensys/gnark).** + +Gnark version used for implementing the circuit the ceremony will be conducted for must match the Gnark version used +in this project. Please consult [`go.mod`](./go.mod) to learn which version of Gnark is used. Make sure your circuit +uses the same Gnark version. + +Your Gnark project must also satisfy the following constraints: +- Supported curve: BN254 +- Supported backend: Groth16 + + ### Online mode The primary mode of the program. In this mode, the Coordinator runs the ceremony server, which is responsible for accepting contributions from the Contributors. The Contributors connect to the Coordinator and contribute to the ceremony. -See help for `server` and `client` commands for details. +See help for [`server`](#server) and [`client`](#client) commands for details. ### Offline mode @@ -28,7 +60,8 @@ In this mode, sending Phase 2 files must be performed manually by the Coordinato At the end of the ceremony, the Coordinator will have a list of accepted contributions. The Coordinator can then perform the final verification and extract the Proving and the Verifying Keys. -See help for `init`, `contrib`, `verify` and `extract` commands for details. +See help for [`init`](#init), [`contribute`](#contribute), [`verify`](#verify) and [`extract-keys`](#extract-keys) +commands for details. ### Snarkjs powers of tau (ptau) -> Phase 1 conversion @@ -37,20 +70,11 @@ the initialization of the offline mode ceremony, if the Coordinator has a ptau f This step is not necessary if the Coordinator already has a Phase 1 file. -## Constraints - -Gnark version used for implementing the circuit the ceremony will be conducted for must match the Gnark version used -in this project. Please consult `[go.mod](./go.mod)` to learn which version of Gnark is used. - -Your Gnark project must satisfy the following constraints: -- Supported curve: BN254 -- Supported backend: Groth16 - ## Prerequisites These are one-time steps that must be done in order to build the program. -Install [Go](https://go.dev/dl/). Any recent version will do. Look into `go.mod` to see the minimum required version. +Install [Go](https://go.dev/dl/). Any recent version will do. Look into [`go.mod`](./go.mod) to see the minimum required version. Install [Protocol Buffer Compiler](https://protobuf.dev/installation/). @@ -82,7 +106,7 @@ Run the program with: $ go run . # or, after the program was built -./trusted-setup +$ ./trusted-setup ``` Running the program with no arguments lists the available commands. Running the program with the command but without @@ -107,14 +131,28 @@ tau file to a Phase 1 file, which can be used to initialize the Phase 2 of the c - `--ptau` - A Snarkjs powers of tau file, - `--phase1` - The output Phase 1 file. +Example usage: + +```shell +$ ./trusted-setup ptau --ptau test.ptau --phase1 test.ph1 +2025/09/02 00:31:32 Convert Starkjs powers of tau to Phase 1: + Load ptau from: offline/test/resources/test.ptau + Store Phase 1 to: test.ph1 +2025/09/02 00:31:32 Loading Starkjs powers of tau from offline/test/resources/test.ptau +// ... some details of the ptau file, skipped for brevity ... +2025/09/02 00:31:32 Converting Starkjs powers of tau to Phase 1 +2025/09/02 00:31:32 Storing Phase 1 to test.ph1 +2025/09/02 00:31:32 Operation successful +``` + ### Online mode commands #### `server` Start a Ceremony server. This step is performed by the Coordinator. -The server is responsible for orchestrating the ceremony, receiving contributions from the participants and, in the end, -generating Proving and Verifying Keys. +The server is responsible for orchestrating the ceremony, receiving contributions from the [participants](#client) and, +in the end, generating Proving and Verifying Keys. The server is configured with a JSON file. An example configuration is shown below: ```json5 @@ -130,6 +168,21 @@ The server is configured with a JSON file. An example configuration is shown bel "r1cs": "resources/server.r1cs", // The path to the Phase 1 file (possibly generated from a ptau file - see the `ptau` command for details). "phase1": "resources/server.ph1", + // (optional) If true, AWS S3 is chosen as the storage backend. If false or not present, + // the server will fall back to storing artifacts in tmpfs. + "useS3": true, + // (optional) Name of the AWS S3 bucket to store ceremony artifacts. The bucket must exist. + // If not provided, this information is taken from a default source (env or AWS CLI config file). + "s3Bucket": "my-ceremony-bucket", + // (optional) Region of the AWS S3 bucket to store ceremony artifacts. + // If not provided, this information is taken from a default source (env or AWS CLI config file). + "s3Region": "us-east-1", + // (optional) Profile of the AWS S3 bucket to store ceremony artifacts. + // If not provided, this information is taken from a default source (env or AWS CLI config file). + "s3Profile": "test", + // (optional) Credentials file for AWS S3. + // If not provided, this information is taken from a default source (env or AWS CLI config file). + "s3CredentialsFile": "~/.aws/credentials", } ``` @@ -137,11 +190,113 @@ Coordination of the ceremony is automatic. No action from the Coordinator is req and stopping it with CTRL+C at any arbitrary moment. At CTRL+C, the server stops accepting new contributions and starts key extraction from the existing contributions. +At the end of the contribution, the artifacts are either saved in the temporary filesystem or in the AWS S3, depending +on the provided configuration. The stored artifacts are: +- SRS commons of the circuit, +- all intermediate Phase 2 files, +- Proving Key, +- Verifying Key, +- JSON structured log documenting the ceremony. + +These artifacts are enough to re-generate the keys again using the offline mode [`extract-keys`](#extract-keys) command. + - `--config` - Path to a JSON file containing the server configuration. +Example usage: + +```shell +$ ./trusted-setup server --config small.json +2025/09/02 00:33:00 Loading config file: small.json +2025/09/02 00:33:00 Loading R1CS from online/test/resources/server.r1cs +2025/09/02 00:33:00 Loading Phase 1 from online/test/resources/server.ph1 +2025/09/02 00:33:01 INF beacon=a40d1701974c7e804732d3bf21131137f6b05dc054b073d0e48110ed2099f11c +2025/09/02 00:33:01 Ceremony artifacts will be stored in tmpfs +2025/09/02 00:33:01 Initializing Phase 2 +2025/09/02 00:33:02 INF new ceremony started name=testCeremony +2025/09/02 00:33:02 Server started, waiting for Contributors on 127.0.0.1:7312... +2025/09/02 00:33:02 Press Ctrl+C to end Ceremony and generate Keys +2025/09/02 00:33:09 INF new contributor connected ip=127.0.0.1:64624 +2025/09/02 00:33:09 INF contributor position update ip=127.0.0.1:64624 newQueuePosition=0 +2025/09/02 00:33:09 INF sending last accepted contribution ip=127.0.0.1:64624 +2025/09/02 00:33:09 INF sent last accepted contribution ip=127.0.0.1:64624 size=27147 +2025/09/02 00:33:09 INF receiving new contribution candidate ip=127.0.0.1:64624 +2025/09/02 00:33:09 INF new contribution candidate accepted ip=127.0.0.1:64624 size=27179 +^C2025/09/02 00:33:15 Generating keys out of 1 contributions... +2025/09/02 00:33:15 Artifacts generated in the ceremony: +2025/09/02 00:33:15 /var/folders/jh/bj77hlmj3k50qyxt3tzlbrwc0000gn/T/testCeremony-srs-commons-1639893806 +2025/09/02 00:33:15 /var/folders/jh/bj77hlmj3k50qyxt3tzlbrwc0000gn/T/testCeremony-phase2-1-3080991318 +2025/09/02 00:33:15 /var/folders/jh/bj77hlmj3k50qyxt3tzlbrwc0000gn/T/testCeremony-pk-2115635955 +2025/09/02 00:33:15 /var/folders/jh/bj77hlmj3k50qyxt3tzlbrwc0000gn/T/testCeremony-vk-2427354540 +2025/09/02 00:33:15 /var/folders/jh/bj77hlmj3k50qyxt3tzlbrwc0000gn/T/testCeremony-log-454330672 +2025/09/02 00:33:15 Operation successful +``` + +Example structured JSON log from the above ceremony: + +```json +{"level":"info","beacon":"a40d1701974c7e804732d3bf21131137f6b05dc054b073d0e48110ed2099f11c","time":"2025-09-02T00:33:01+02:00"} +{"level":"info","name":"testCeremony","time":"2025-09-02T00:33:02+02:00","message":"new ceremony started"} +{"level":"info","ip":"127.0.0.1:64624","time":"2025-09-02T00:33:09+02:00","message":"new contributor connected"} +{"level":"info","newQueuePosition":0,"ip":"127.0.0.1:64624","time":"2025-09-02T00:33:09+02:00","message":"contributor position update"} +{"level":"info","ip":"127.0.0.1:64624","time":"2025-09-02T00:33:09+02:00","message":"sending last accepted contribution"} +{"level":"info","ip":"127.0.0.1:64624","size":27147,"time":"2025-09-02T00:33:09+02:00","message":"sent last accepted contribution"} +{"level":"info","ip":"127.0.0.1:64624","time":"2025-09-02T00:33:09+02:00","message":"receiving new contribution candidate"} +{"level":"info","ip":"127.0.0.1:64624","size":27179,"time":"2025-09-02T00:33:09+02:00","message":"new contribution candidate accepted"} +``` + +##### Using AWS S3 for artifacts storage + +The server can store the ceremony artifacts in AWS S3. + +AWS S3 is chosen as the storage backend if `useS3` configuration field is set to `true`. If the field is `false` or not +present, the server will use tmpfs for storage. Some S3 settings can be overridden. See [`server`](#server) +for the details on the configuration file. + +AWS S3 credentials are loaded from the credentials file. This file is automatically generated by the +[AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). The file can be +generated as follows: + +```shell +$ aws configure --profile +AWS Access Key ID [None]: +AWS Secret Access Key [None]: +Default region name [None]: +Default output format [None]: +``` + +After the ceremony, the bucket can be queried as follows: + +```shell +$ AWS_PROFILE=test aws s3 ls s3://ceremony-bucket +2025-09-02 01:58:10 998 log +2025-09-02 01:58:05 27179 phase2-1 +2025-09-02 01:58:09 59955 pk +2025-09-02 01:58:04 98248 srs-commons +2025-09-02 01:58:10 364 vk +``` + +Artifacts can be downloaded as follows: + +```shell +$ AWS_PROFILE=test aws s3 cp s3://ceremony-bucket/ ceremony_artifacts --recursive +download: s3://ceremony-bucket/log to ceremony_artifacts/log +download: s3://ceremony-bucket/vk to ceremony_artifacts/vk +download: s3://ceremony-bucket/phase2-1 to ceremony_artifacts/phase2-1 +download: s3://ceremony-bucket/pk to ceremony_artifacts/pk +download: s3://ceremony-bucket/srs-commons to ceremony_artifacts/srs-commons + +$ ls -l ceremony_artifacts +total 384 +-rw-r--r--@ 1 user group 998 Sep 2 01:58 log +-rw-r--r--@ 1 user group 27179 Sep 2 01:58 phase2-1 +-rw-r--r--@ 1 user group 59955 Sep 2 01:58 pk +-rw-r--r--@ 1 user group 98248 Sep 2 01:58 srs-commons +-rw-r--r--@ 1 user group 364 Sep 2 01:58 vk +``` + #### `client` -Connect to a Ceremony server and provide contributions. This step is performed by the Contributors. +Connect to a Ceremony [server](#server) and provide contributions. This step is performed by the Contributors. The client is responsible for connecting to the server and providing contributions. The client is configured with a host and port of the server. Participation in the ceremony is automatic. No action from the Contributor is required @@ -150,6 +305,22 @@ besides starting the client. - `--host` - The IP address of the server, - `--port` - The port of the server. +Example usage: + +```shell +$ ./trusted-setup client --host 127.0.0.1 --port 7312 +2025/09/02 00:33:09 Connecting to 127.0.0.1:7312... +2025/09/02 00:33:09 Joined ceremony: testCeremony +2025/09/02 00:33:09 Contribution slot assigned, position in queue: 0 +2025/09/02 00:33:09 Our turn, downloading last contribution +2025/09/02 00:33:09 Received 27147 bytes +2025/09/02 00:33:09 Generating contribution +2025/09/02 00:33:09 Uploading our contribution +2025/09/02 00:33:09 Sent 27179 bytes +2025/09/02 00:33:09 Contribution accepted +2025/09/02 00:33:09 Operation successful +``` + ### Offline mode commands #### `init` @@ -172,6 +343,25 @@ The command outputs a beacon value, which must then be passed as an argument to - `--phase2` - The output path for the Phase 2 file, - `--srscommons` - The output path for circuit-independent components of the Groth16 SRS. +Example usage: + +```shell +$ ./trusted-setup --phase1 online/test/resources/server.ph1 --r1cs online/test/resources/server.r1cs --phase2 tiny.ph2 --srscommons tiny.srs +2025/09/02 00:39:21 Initializing Phase 2: + Load Phase 1 from: online/test/resources/server.ph1 + Load R1CS from: online/test/resources/server.r1cs + Store Phase 2 to: tiny.ph2 + Store SRS commons to: tiny.srs + Beacon (pass it to extract-keys): b570d6c3cb53603fc4d297ae83e587fe5a34a8c6b51056d51a42dbdf40df2458 +2025/09/02 00:39:21 Loading R1CS from online/test/resources/server.r1cs +2025/09/02 00:39:21 Loading Phase 1 from online/test/resources/server.ph1 +2025/09/02 00:39:21 Generating SRS commons form Phase 1 +2025/09/02 00:39:21 Storing SRS commons to tiny.srs +2025/09/02 00:39:21 Initializing Phase 2 +2025/09/02 00:39:22 Storing Phase 2 to tiny.ph2 +2025/09/02 00:39:22 Operation successful +``` + #### `contribute` Contribute randomness to Phase 2. This step is performed by all the participants of the ceremony. @@ -184,6 +374,28 @@ appended to the name. - `--phase2` - The existing Phase 2 file created in the `init` step or in the previous run of the `contribute` step. +Example usage: + +```shell +$ ./trusted-setup contribute --phase2 tiny.ph2 +2025/09/02 00:40:17 Contribution to Phase 2: + Load Phase 2 from: tiny.ph2 +2025/09/02 00:40:17 Loading Phase 2 from tiny.ph2 +2025/09/02 00:40:17 Contributing randomness to Phase 2 +2025/09/02 00:40:17 Storing Phase 2 to tiny.ph2.20250902004017.451331 +2025/09/02 00:40:17 Phase2 file with contributions: tiny.ph2.20250902004017.451331 +2025/09/02 00:40:17 Operation successful + +$ ./trusted-setup contribute --phase2 tiny.ph2.20250902004017.451331 +2025/09/02 00:40:24 Contribution to Phase 2: + Load Phase 2 from: tiny.ph2.20250902004017.451331 +2025/09/02 00:40:24 Loading Phase 2 from tiny.ph2.20250902004017.451331 +2025/09/02 00:40:24 Contributing randomness to Phase 2 +2025/09/02 00:40:24 Storing Phase 2 to tiny.ph2.20250902004024.355772 +2025/09/02 00:40:24 Phase2 file with contributions: tiny.ph2.20250902004024.355772 +2025/09/02 00:40:24 Operation successful +``` + #### `verify` Verify the last randomness contributed to Phase 2. This step is performed by the Coordinator. @@ -194,11 +406,33 @@ is the output of that contribution process, that was sent back by the Contributo If the verification is successful, the Coordinator can either: - send the next contribution file to the next Contributor for further contributions, or -- export the Proving and Verifying Keys (see [`keys`](#keys).) +- export the Proving and Verifying Keys (see [`extract-keys`](#extract-keys).) - `--phase2prev` - A Phase 2 file being an input to the contribution - `--phase2next` - A Phase 2 file that was contributed to. +Example usage: + +```shell +$ ./trusted-setup verify --phase2prev tiny.ph2 --phase2next tiny.ph2.20250902004017.451331 +2025/09/02 00:41:17 Verify single Phase 2 contribution: + Load previous Phase 2 from: tiny.ph2 + Load next Phase 2 from: tiny.ph2.20250902004017.451331 +2025/09/02 00:41:17 Loading Phase 2 from tiny.ph2 +2025/09/02 00:41:17 Loading Phase 2 from tiny.ph2.20250902004017.451331 +2025/09/02 00:41:17 Verifying the most recent Phase 2 against the previous step +2025/09/02 00:41:17 Operation successful + +$ ./trusted-setup verify --phase2prev tiny.ph2.20250902004017.451331 --phase2next tiny.ph2.20250902004024.355772 +2025/09/02 00:44:49 Verify single Phase 2 contribution: + Load previous Phase 2 from: tiny.ph2.20250902004017.451331 + Load next Phase 2 from: tiny.ph2.20250902004024.355772 +2025/09/02 00:44:49 Loading Phase 2 from tiny.ph2.20250902004017.451331 +2025/09/02 00:44:49 Loading Phase 2 from tiny.ph2.20250902004024.355772 +2025/09/02 00:44:49 Verifying the most recent Phase 2 against the previous step +2025/09/02 00:44:49 Operation successful +``` + #### `extract-keys` Extract the Proving and Verifying Keys. This step is performed by the Coordinator. @@ -214,3 +448,23 @@ The output are binary files containing the keys. file generated on initialization. - `--pk` - The output path for the Proving Key file, - `--vk` - The output path for the Verifying Key file. + +Example usage: + +```shell +$ ./trusted-setup extract-keys --r1cs online/test/resources/server.r1cs --srscommons tiny.srs --beacon b570d6c3cb53603fc4d297ae83e587fe5a34a8c6b51056d51a42dbdf40df2458 --phase2 tiny.ph2.20250902004017.451331,tiny.ph2.20250902004024.355772 --pk tiny.pk --vk tiny.vk +2025/09/02 00:44:20 Verify multiple Phase 2 contributions: + Load R1CS from: online/test/resources/server.r1cs + Load SRS commons from: tiny.srs (beacon: b570d6c3cb53603fc4d297ae83e587fe5a34a8c6b51056d51a42dbdf40df2458) + Load Phase 2 from: [tiny.ph2.20250902004017.451331 tiny.ph2.20250902004024.355772] + Store Proving Key to: tiny.pk + Store Verifying Key to: tiny.vk +2025/09/02 00:44:20 Loading R1CS from online/test/resources/server.r1cs +2025/09/02 00:44:20 Loading SRS commons from tiny.srs +2025/09/02 00:44:20 Loading Phase 2 from tiny.ph2.20250902004017.451331 +2025/09/02 00:44:20 Loading Phase 2 from tiny.ph2.20250902004024.355772 +2025/09/02 00:44:20 Verifying all Phase 2 contributions and generating Keys +2025/09/02 00:44:20 Storing Proving Key to tiny.pk +2025/09/02 00:44:20 Storing Verifying Key to tiny.vk +2025/09/02 00:44:20 Operation successful +``` diff --git a/go.mod b/go.mod index 34773e4..88d4783 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,14 @@ module github.com/reilabs/trusted-setup go 1.24 require ( + github.com/aws/aws-sdk-go-v2 v1.38.2 + github.com/aws/aws-sdk-go-v2/config v1.31.4 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.2 + github.com/aws/aws-sdk-go-v2/service/s3 v1.87.2 github.com/consensys/gnark v0.13.0 github.com/consensys/gnark-crypto v0.18.0 github.com/drand/go-clients v0.2.3 - github.com/golang/protobuf v1.5.4 + github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v3 v3.3.8 github.com/worldcoin/ptau-deserializer v0.2.0 @@ -18,6 +22,21 @@ replace github.com/worldcoin/ptau-deserializer => github.com/reilabs/ptau-deseri require ( github.com/BurntSushi/toml v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.8 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.1 // indirect + github.com/aws/smithy-go v1.23.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.22.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -32,6 +51,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 // indirect + github.com/johannesboyne/gofakes3 v0.0.0-20250825084532-6555d310c473 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -43,10 +63,10 @@ require ( github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/ronanh/intcomp v1.1.1 // indirect - github.com/rs/zerolog v1.34.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect + github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/x448/float16 v0.8.4 // indirect go.dedis.ch/fixbuf v1.0.3 // indirect + go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.39.0 // indirect @@ -54,6 +74,7 @@ require ( golang.org/x/sync v0.15.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/text v0.26.0 // indirect + golang.org/x/tools v0.33.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f037cc1..e7e5443 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,44 @@ github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/ardanlabs/darwin/v2 v2.0.0 h1:XCisQMgQ5EG+ZvSEcADEo+pyfIMKyWAGnn5o2TgriYE= github.com/ardanlabs/darwin/v2 v2.0.0/go.mod h1:MubZ2e9DAYGaym0mClSOi183NYahrrfKxvSy1HMhoes= +github.com/aws/aws-sdk-go-v2 v1.38.2 h1:QUkLO1aTW0yqW95pVzZS0LGFanL71hJ0a49w4TJLMyM= +github.com/aws/aws-sdk-go-v2 v1.38.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.4 h1:aY2IstXOfjdLtr1lDvxFBk5DpBnHgS5GS3jgR/0BmPw= +github.com/aws/aws-sdk-go-v2/config v1.31.4/go.mod h1:1IAykiegrTp6n+CbZoCpW6kks1I74fEDgl2BPQSkLSU= +github.com/aws/aws-sdk-go-v2/credentials v1.18.8 h1:0FfdP0I9gs/f1rwtEdkcEdsclTEkPB8o6zWUG2Z8+IM= +github.com/aws/aws-sdk-go-v2/credentials v1.18.8/go.mod h1:9UReQ1UmGooX93JKzHyr7PRF3F+p3r+PmRwR7+qHJYA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.5 h1:ul7hICbZ5Z/Pp9VnLVGUVe7rqYLXCyIiPU7hQ0sRkow= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.5/go.mod h1:5cIWJ0N6Gjj+72Q6l46DeaNtcxXHV42w/Uq3fIfeUl4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.2 h1:eZAl6tdv3HrIHAxbpnDQByEOD84bmxyhLmgvUYJ8ggo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.2/go.mod h1:vV+YS0SWfpwbIGOUWbB5NWklaYKscfYrQRb9ggHptxs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.5 h1:d45S2DqHZOkHu0uLUW92VdBoT5v0hh3EyR+DzMEh3ag= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.5/go.mod h1:G6e/dR2c2huh6JmIo9SXysjuLuDDGWMeYGibfW2ZrXg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.5 h1:ENhnQOV3SxWHplOqNN1f+uuCNf9n4Y/PKpl6b1WRP0Q= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.5/go.mod h1:csQLMI+odbC0/J+UecSTztG70Dc4aTCOu4GyPNDNpVo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.5 h1:ovHE1XM53pMGOwINf8Mas4FMl5XRRMAihNokV1YViZ8= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.5/go.mod h1:Cmu/DOSYwcr0xYTFk7sA9NJ5HF3ND0EqNUBdoK16nPI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.5 h1:gC3YW8AojITDXfI5avcKZst5iOg6v5aQEU4HIcxwAss= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.5/go.mod h1:z5OdVolKifM0NpEel6wLkM/TQ0eodWB2dmDFoj3WCbw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.5 h1:Cx1M/UUgYu9UCQnIMKaOhkVaFvLy1HneD6T4sS/DlKg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.5/go.mod h1:fTRNLgrTvPpEzGqc9QkeO4hu/3ng+mdtUbL8shUwXz4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.5 h1:IM2yO5Dd9bzCmYEvLU6Di5kduRKh4O93TjrZ47hxLhQ= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.5/go.mod h1:0nXagJIQFWms6GJ1jvPJLwr8r3hN6f+kTwt17Q2NrPQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.87.2 h1:HNAbIp6VXmtKR+JuDmywGcRc3kYoIGT9y4a2Zg9bSTQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.87.2/go.mod h1:6VSEglrPCTx7gi7Z7l/CtqSgbnFr1N6UJ6+Ik+vjuEo= +github.com/aws/aws-sdk-go-v2/service/sso v1.28.3 h1:z6lajFT/qGlLRB/I8V5CCklqSuWZKUkdwRAn9leIkiQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.28.3/go.mod h1:BnyjuIX0l+KXJVl2o9Ki3Zf0M4pA2hQYopFCRUj9ADU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.1 h1:8yI3jK5JZ310S8RpgdZdzwvlvBu3QbG8DP7Be/xJ6yo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.1/go.mod h1:HPzXfFgrLd02lYpcFYdDz5xZs94LOb+lWlvbAGaeMsk= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.1 h1:3kWmIg5iiWPMBJyq/I55Fki5fyfoMtrn/SkUIpxPwHQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.1/go.mod h1:yi0b3Qez6YamRVJ+Rbi19IgvjfjPODgVRhkWA6RTMUM= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= @@ -65,6 +103,8 @@ github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2 h1:B+aWVgAx+GlFLhtYjIaF0uGjU3rzpl github.com/ingonyama-zk/icicle-gnark/v3 v3.2.2/go.mod h1:CH/cwcr21pPWH+9GtK/PFaa4OGTv4CtfkCKro6GpbRE= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/johannesboyne/gofakes3 v0.0.0-20250825084532-6555d310c473 h1:dFhdS4skQ9FYVrhMYmoTFAZpdrVDKUDL+ACqEBGLZns= +github.com/johannesboyne/gofakes3 v0.0.0-20250825084532-6555d310c473/go.mod h1:zrz/yDxjXycSAS7BFibBFSxIB6DKybZh+x1Bb+hVi4U= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= @@ -113,8 +153,8 @@ github.com/ronanh/intcomp v1.1.1/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGk github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/urfave/cli/v3 v3.3.8 h1:BzolUExliMdet9NlJ/u4m5vHSotJ3PzEqSAZ1oPMa/E= @@ -149,6 +189,8 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d h1:Ns9kd1Rwzw7t0BR8XMphenji4SmIoNZPn8zhYmaVKP8= +go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d/go.mod h1:92Uoe3l++MlthCm+koNi0tcUCX3anayogF0Pa/sp24k= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -171,6 +213,8 @@ golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA= diff --git a/online/actions/server.go b/online/actions/server.go index b6dca32..edde33c 100644 --- a/online/actions/server.go +++ b/online/actions/server.go @@ -2,16 +2,16 @@ package actions import ( "context" - "fmt" + "io" "log" "os" "os/signal" "syscall" + "github.com/rs/zerolog" "github.com/urfave/cli/v3" "github.com/reilabs/trusted-setup/offline/phase1" - offline_phase2 "github.com/reilabs/trusted-setup/offline/phase2" "github.com/reilabs/trusted-setup/offline/r1cs" server_config "github.com/reilabs/trusted-setup/online/config" "github.com/reilabs/trusted-setup/online/contribution" @@ -19,6 +19,7 @@ import ( "github.com/reilabs/trusted-setup/online/server/ceremony_service" "github.com/reilabs/trusted-setup/online/server/contributors_manager" "github.com/reilabs/trusted-setup/online/server/coordinator" + "github.com/reilabs/trusted-setup/online/storage" "github.com/reilabs/trusted-setup/utils/randomness" ) @@ -41,13 +42,60 @@ func Server(_ context.Context, cmd *cli.Command) error { return err } + logFile, err := os.CreateTemp("", "") + if err != nil { + return err + } + defer func(logFile *os.File) { + err = logFile.Close() + if err != nil { + log.Printf("Error closing log file writer: %v", err) + } + }(logFile) + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: "2006/01/02 15:04:05"} + ceremonyLogger := zerolog.New(io.MultiWriter(consoleWriter, logFile)).With().Timestamp().Logger() + beaconProvider, err := randomness.New() if err != nil { return err } + beacon := beaconProvider.GetBeacon() + ceremonyLogger.Info().Hex("beacon", beacon).Send() + + var store storage.Storage + if !config.UseS3 { + log.Print("Ceremony artifacts will be stored in tmpfs") + store = storage.NewTmpfs(config.CeremonyName) + } else { + log.Println("Ceremony artifacts will be stored in AWS S3") + var s3Opts []storage.S3Option + if config.S3Bucket != "" { + log.Printf("\tbucket: %s", config.S3Bucket) + s3Opts = append(s3Opts, storage.WithBucket(config.S3Bucket)) + } + if config.S3Region != "" { + log.Printf("\tregion: %s", config.S3Region) + s3Opts = append(s3Opts, storage.WithRegion(config.S3Region)) + } + if config.S3Profile != "" { + log.Printf("\tprofile: %s", config.S3Profile) + s3Opts = append(s3Opts, storage.WithProfile(config.S3Profile)) + } + if config.S3CredentialsFile != "" { + log.Printf("\tcredentials file: %s", config.S3CredentialsFile) + s3Opts = append(s3Opts, storage.WithCredentialsFile(config.S3CredentialsFile)) + } + store, err = storage.NewS3(s3Opts...) + if err != nil { + return err + } + } log.Print("Initializing Phase 2") - last := contribution.New(p1, ccs, beaconProvider.GetBeacon()) + last, err := contribution.New(p1, ccs, store, beacon) + if err != nil { + return err + } service := ceremony_service.New( config.CeremonyName, @@ -55,6 +103,7 @@ func Server(_ context.Context, cmd *cli.Command) error { last, contributors_manager.New(), ), + &ceremonyLogger, ) s := server.New(service) @@ -66,34 +115,33 @@ func Server(_ context.Context, cmd *cli.Command) error { sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - fmt.Println("Press Ctrl+C to end Ceremony and generate Keys") + log.Println("Press Ctrl+C to end Ceremony and generate Keys") <-sigs s.Stop() - // TODO: this is temporary, keys will go to S3 - pkTemp, err := getTempFilePath("pk") - if err != nil { - return err + if last.GetCount() > 0 { + log.Printf("Generating keys out of %d contributions...\n", last.GetCount()) + _, _, err = last.ExtractKeys() + } else { + log.Printf("No contributions received") } - vkTemp, err := getTempFilePath("vk") + + _, err = logFile.Seek(0, 0) if err != nil { - return err + log.Printf("Rewinding log file failed") + } + if _, err = store.Save("log", logFile); err != nil { + log.Printf("Storing ceremony log failed") } - fmt.Println("Generating keys...") - pk, vk := last.ExtractKeys() - return offline_phase2.PkVkToFile(pk, pkTemp, vk, vkTemp) -} -func getTempFilePath(pattern string) (string, error) { - tempFile, err := os.CreateTemp("", pattern) + log.Println("Artifacts generated in the ceremony:") + files, err := store.List() if err != nil { - return "", err + return err } - // Close immediately because we're not writing to these files, we just need paths - err = tempFile.Close() - if err != nil { - log.Printf("error closing %s", tempFile.Name()) + for _, file := range files { + log.Println("\t" + file) } - return tempFile.Name(), nil + return err } diff --git a/online/config/config.go b/online/config/config.go index aeca0a8..66b3170 100644 --- a/online/config/config.go +++ b/online/config/config.go @@ -9,11 +9,16 @@ import ( ) type Config struct { - CeremonyName string `json:"ceremonyName"` - Host string `json:"host"` - Port int `json:"port"` - R1cs string `json:"r1cs"` - Phase1 string `json:"phase1"` + CeremonyName string `json:"ceremonyName"` + Host string `json:"host"` + Port int `json:"port"` + R1cs string `json:"r1cs"` + Phase1 string `json:"phase1"` + UseS3 bool `json:"useS3"` + S3Bucket string `json:"s3Bucket"` + S3Region string `json:"s3Region"` + S3Profile string `json:"s3Profile"` + S3CredentialsFile string `json:"s3CredentialsFile"` } func (c *Config) Validate() error { diff --git a/online/contribution/contribution.go b/online/contribution/contribution.go index a500a20..e1cd31a 100644 --- a/online/contribution/contribution.go +++ b/online/contribution/contribution.go @@ -3,11 +3,14 @@ package contribution import ( + "fmt" "io" "github.com/consensys/gnark/backend/groth16" "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" cs "github.com/consensys/gnark/constraint/bn254" + + "github.com/reilabs/trusted-setup/online/storage" ) type contribution struct { @@ -15,6 +18,8 @@ type contribution struct { srsCommons *mpcsetup.SrsCommons evals *mpcsetup.Phase2Evaluations beacon []byte + count int + storage storage.Storage } // Verifiable is an interface for a contribution object that is going to be verified by a verifier. @@ -57,26 +62,37 @@ func (p *contribution) WriteTo(writer io.Writer) (int64, error) { type Contribution interface { NewVerifiable() Verifiable AddContribution(next Verifiable) error - ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey) + ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey, error) WriteTo(writer io.Writer) (int64, error) + GetCount() int } // New creates a new Contribution from a Phase 1 object, R1CS and beacon. // // phase1 is the Phase 1 object produced either by Gnark of by the `ptau` command from a Snarkjs file. // r1cs is the R1CS constraint system object generated by Gnark for a circuit the ceremony is performed for. -// beacon is a 32-byte array of random bytes. It is used for generating Phase 1 and later for keys generation. -func New(phase1 *mpcsetup.Phase1, r1cs *cs.R1CS, beacon []byte) Contribution { +// storage is a storage service that will be used to store contribution artifacts. It can be nil, then artifacts +// will not be stored. +// beacon is a 32-byte array of random bytes. It is used for sealing Phase 1 and later for keys generation. +func New(phase1 *mpcsetup.Phase1, r1cs *cs.R1CS, storage storage.Storage, beacon []byte) (Contribution, error) { p2 := new(mpcsetup.Phase2) srsCommons := phase1.Seal(beacon) evals := p2.Initialize(r1cs, &srsCommons) + if storage != nil { + if _, err := storage.Save("srs-commons", &srsCommons); err != nil { + return nil, err + } + } + return &contribution{ p2, &srsCommons, &evals, beacon, - } + 0, + storage, + }, nil } // NewVerifiable returns a new empty contribution instance implementing Verifiable. @@ -90,17 +106,32 @@ func (p *contribution) NewVerifiable() Verifiable { // // The next object is verified against the current one. If verification fails, an error is returned. // If the contribution is valid, next considered the new state. +// +// If the storage parameter of New was populated with a storage provider and the next contribution +// is validated positively, it is saved in the storage. func (p *contribution) AddContribution(next Verifiable) error { err := p.contribution.Verify(next.(*contribution).contribution) if err != nil { return err } p.contribution = next.(*contribution).contribution + p.count++ + + if p.storage != nil { + if _, err = p.storage.Save(fmt.Sprintf("phase2-%d", p.count), p.contribution); err != nil { + return err + } + } + return nil } // ExtractKeys extracts the proving and verifying keys from the contribution. -func (p *contribution) ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey) { +// +// Returns the keys object and error, if occurred. +// +// If the storage parameter of New was populated with a storage provider, keys are saved in the storage. +func (p *contribution) ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey, error) { // We intentionally don't use mpcsetup.VerifyPhase2() here. // // mpcsetup.VerifyPhase2() accepts a list of all accepted contributions and runs @@ -111,5 +142,21 @@ func (p *contribution) ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey) // contribution. Our p.contribution is the result of .Verify() passing // for every submitted contribution. It is enough to implement only the last step // of mpcsetup.VerifyPhase2(), which is .Seal(). - return p.contribution.Seal(p.srsCommons, p.evals, p.beacon) + pk, vk := p.contribution.Seal(p.srsCommons, p.evals, p.beacon) + + if p.storage != nil { + if _, err := p.storage.Save("pk", pk); err != nil { + return pk, vk, err + } + if _, err := p.storage.Save("vk", vk); err != nil { + return pk, vk, err + } + } + + return pk, vk, nil +} + +// GetCount returns the number of contributions added to the contribution. +func (p *contribution) GetCount() int { + return p.count } diff --git a/online/contribution/contribution_test.go b/online/contribution/contribution_test.go index 4ec6d5d..e9539cd 100644 --- a/online/contribution/contribution_test.go +++ b/online/contribution/contribution_test.go @@ -41,11 +41,13 @@ func Test(t *testing.T) { ccs, p1, beacon := setup() // Initialize Phase 2 from Phase 1, circuit constraint system and random beacon - p2 := contribution.New(p1, ccs, beacon) + p2, err := contribution.New(p1, ccs, nil, beacon) + assert.NoError(t, err) + assert.Equal(t, 0, p2.GetCount()) // Serialize initial Phase 2 to a buffer var buf bytes.Buffer - _, err := p2.WriteTo(&buf) + _, err = p2.WriteTo(&buf) assert.NoError(t, err) // Recreate the initial contribution from a buffer @@ -59,9 +61,11 @@ func Test(t *testing.T) { // Submit contribution err = p2.AddContribution(contrib.(contribution.Verifiable)) assert.NoError(t, err) + assert.Equal(t, 1, p2.GetCount()) // One contribution should be enough to generate keys - pk, vk := p2.ExtractKeys() + pk, vk, err := p2.ExtractKeys() + assert.NoError(t, err) // Check that keys can be used for proof generation and verification teardown(ccs, &pk, &vk) diff --git a/online/server/ceremony_service/ceremony_service.go b/online/server/ceremony_service/ceremony_service.go index 88eaad7..e101262 100644 --- a/online/server/ceremony_service/ceremony_service.go +++ b/online/server/ceremony_service/ceremony_service.go @@ -3,8 +3,8 @@ package ceremony_service import ( "context" - "log" + "github.com/rs/zerolog" "google.golang.org/grpc/peer" "github.com/reilabs/trusted-setup/online/api" @@ -17,17 +17,25 @@ type ceremonyService struct { name string coordinator coordinator.Coordinator + log *zerolog.Logger } // New returns a new instance of CeremonyServiceServer. // -// # The returned object can be passed to the gRPC server constructor +// Accepts a name of the ceremony, ceremony coordinator instance and a logger. +// The ceremony name is sent to contributors after they connect. +// The coordinator keeps track of incoming contributions, accepts new contribution +// candidates and validates them. +// The logger will accept log entries with crucial steps of the ceremony, that can +// be useful during attestation or keys recovery. // -// Accepts a name of the ceremony and a ceremony coordinator instance. +// The returned object can be passed to the gRPC server constructor func New( - name string, coordinator coordinator.Coordinator, + name string, coordinator coordinator.Coordinator, log *zerolog.Logger, ) api.CeremonyServiceServer { - return &ceremonyService{name: name, coordinator: coordinator} + log.Info().Str("name", name).Msg("new ceremony started") + + return &ceremonyService{name: name, coordinator: coordinator, log: log} } func clientAddressFromContext(ctx context.Context) string { @@ -40,47 +48,65 @@ func clientAddressFromContext(ctx context.Context) string { return clientIP } -func onContributorPositionUpdate(newPosition int, clientIp string, stream api.CeremonyService_ContributeServer) { - log.Printf("contributor %s got slot %d in the queue", clientIp, newPosition) - if err := stream.Send(api.NewTurnNotification(newPosition)); err != nil { - log.Printf("failed to send position update to %s: %v", clientIp, err) - } -} - // Contribute implements the flow of a single contribution coming from a contributor client. func (s *ceremonyService) Contribute( stream api.CeremonyService_ContributeServer, ) error { + clientIp := clientAddressFromContext(stream.Context()) + s.log.Info(). + Str("ip", clientIp). + Msg("new contributor connected") + err := stream.Send(api.NewHello(s.name)) if err != nil { return err } - clientIp := clientAddressFromContext(stream.Context()) waitForThisContributorsTurn := s.coordinator.AddContributor( func(newPosition int) { - onContributorPositionUpdate(newPosition, clientIp, stream) + err = stream.Send(api.NewTurnNotification(newPosition)) + s.log.Info(). + Int("newQueuePosition", newPosition). + Str("ip", clientIp). + Err(err). + Msg("contributor position update") }, ) waitForThisContributorsTurn() - log.Printf("Sending last contribution to %s", clientIp) + s.log.Info(). + Str("ip", clientIp). + Msg("sending last accepted contribution") n, err := s.coordinator.WriteLastContribution(stream_utils.NewStreamWriter(stream)) if err != nil { - log.Printf("error sending last contribution to %s", clientIp) + s.log.Info(). + Str("ip", clientIp). + Err(err) return err } - log.Printf("Sent %d bytes", n) + s.log.Info(). + Str("ip", clientIp). + Int64("size", n). + Msg("sent last accepted contribution") - log.Printf("Contribution to be received from %s", clientIp) + s.log.Info(). + Str("ip", clientIp). + Msg("receiving new contribution candidate") n, err = s.coordinator.ReadNextContribution(stream_utils.NewStreamReader(stream)) - log.Printf("Received %d bytes", n) if err != nil { - log.Printf("%s: %v", clientIp, err) + s.log.Info(). + Str("ip", clientIp). + Int64("size", n). + Err(err). + Msg("new contribution candidate rejected") return stream.Send(api.NewValidationResponse(err)) } - log.Printf("Contribution from %s accepted", clientIp) + + s.log.Info(). + Str("ip", clientIp). + Int64("size", n). + Msg("new contribution candidate accepted") return stream.Send(api.NewValidationResponse(nil)) } diff --git a/online/server/coordinator/coordinator.go b/online/server/coordinator/coordinator.go index ca52265..7d78eb7 100644 --- a/online/server/coordinator/coordinator.go +++ b/online/server/coordinator/coordinator.go @@ -16,6 +16,7 @@ type Coordinator interface { AddContributor(notify contributors_manager.OnPositionUpdate) contributors_manager.PositionUpdateNotifier WriteLastContribution(client io.Writer) (int64, error) ReadNextContribution(client io.Reader) (int64, error) + GetContributionsCount() int } type coordinator struct { @@ -92,3 +93,8 @@ func (s *coordinator) ReadNextContribution(contributor io.Reader) (int64, error) return n, nil } + +// GetContributionsCount returns the number of contributions added to the contribution. +func (s *coordinator) GetContributionsCount() int { + return s.last.GetCount() +} diff --git a/online/server/coordinator/coordinator_test.go b/online/server/coordinator/coordinator_test.go index 3f297c6..88547cd 100644 --- a/online/server/coordinator/coordinator_test.go +++ b/online/server/coordinator/coordinator_test.go @@ -17,6 +17,11 @@ import ( type mockPhase2 struct { phase2 bytes.Buffer + count int +} + +func (m *mockPhase2) GetCount() int { + return m.count } func (m *mockPhase2) NewVerifiable() contribution.Verifiable { @@ -34,10 +39,11 @@ func (m *mockPhase2) AddContribution(next contribution.Verifiable) error { return errors.New("malformed contribution") } _, err := m.phase2.Write(contribBuf.Bytes()) + m.count++ return err } -func (m *mockPhase2) ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey) { +func (m *mockPhase2) ExtractKeys() (groth16.ProvingKey, groth16.VerifyingKey, error) { // Not necessary for this test panic("not implemented") } @@ -104,12 +110,14 @@ func TestWriteLastContribution(t *testing.T) { func TestReadNextContribution(t *testing.T) { coord := coordinator.New(&mockPhase2{}, &mockContributorsManager{}) + assert.Equal(t, 0, coord.GetContributionsCount()) _ = coord.AddContributor(func(int) {}) goodContrib := bytes.NewBuffer(bytes.Repeat([]byte{0x21}, 0x37)) _, err := coord.ReadNextContribution(goodContrib) assert.NoError(t, err) + assert.Equal(t, 1, coord.GetContributionsCount()) // Shortcut here - during the real ceremony the contributor would be // removed from the queue, and we'd had to test bad contribution with @@ -119,4 +127,5 @@ func TestReadNextContribution(t *testing.T) { badContrib := bytes.NewBuffer([]byte{0x1, 0x01}) _, err = coord.ReadNextContribution(badContrib) assert.Error(t, err) + assert.Equal(t, 1, coord.GetContributionsCount()) } diff --git a/online/storage/s3.go b/online/storage/s3.go new file mode 100644 index 0000000..a834ddd --- /dev/null +++ b/online/storage/s3.go @@ -0,0 +1,170 @@ +package storage + +import ( + "context" + "fmt" + "io" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +type S3 struct { + client *s3.Client + uploader *manager.Uploader + bucket string +} + +type S3Option func(*s3Options) + +type s3Options struct { + bucket string + region string + profile string + credentialsFile string + endpoint string +} + +// WithBucket sets the S3 bucket +func WithBucket(bucket string) S3Option { + return func(o *s3Options) { + o.bucket = bucket + } +} + +// WithRegion sets the AWS region +func WithRegion(region string) S3Option { + return func(o *s3Options) { + o.region = region + } +} + +// WithProfile sets the AWS profile name +func WithProfile(profile string) S3Option { + return func(o *s3Options) { + o.profile = profile + } +} + +// WithCredentialsFile sets the AWS credentials file path +func WithCredentialsFile(credentialsFile string) S3Option { + return func(o *s3Options) { + o.credentialsFile = credentialsFile + } +} + +// WithEndpoint sets the custom endpoint for AWS service +func WithEndpoint(endpoint string) S3Option { + return func(o *s3Options) { + o.endpoint = endpoint + } +} + +// NewS3 creates a new instance of the storage backed by AWS S3. +// +// Bucket, profile, region, credentials file and endpoint can be optionally customized using With### functions. +// Otherwise, the default values are used based on the environment variables, +// shared configuration and shared credentials files. +// +// AWS S3 endpoint can be overridden with the endpoint parameter. For default endpoint use S3DefaultEndpoint. +func NewS3(opts ...S3Option) (*S3, error) { + options := &s3Options{} + + for _, opt := range opts { + opt(options) + } + + var cfgOpts []func(*config.LoadOptions) error + + if options.endpoint != "" { + cfgOpts = append(cfgOpts, config.WithBaseEndpoint(options.endpoint)) + } + if options.region != "" { + cfgOpts = append(cfgOpts, config.WithRegion(options.region)) + } + if options.profile != "" { + cfgOpts = append(cfgOpts, config.WithSharedConfigProfile(options.profile)) + } + if options.credentialsFile != "" { + cfgOpts = append(cfgOpts, config.WithSharedCredentialsFiles([]string{options.credentialsFile})) + } + + cfg, err := config.LoadDefaultConfig(context.TODO(), cfgOpts...) + if err != nil { + return nil, err + } + + client := s3.NewFromConfig(cfg) + + return &S3{ + client: client, + uploader: manager.NewUploader(client), + bucket: options.bucket, + }, nil +} + +// Save stores the object in the S3 bucket. +// +// The given id is prepended to the file name of the file. If the file already exists, it will +// be overwritten. +// +// The function returns the path to the file and an error if any. +func (s *S3) Save(id string, obj io.WriterTo) (string, error) { + pipeReader, pipeWriter := io.Pipe() + go func() { + defer func(pipeWriter *io.PipeWriter) { + err := pipeWriter.Close() + if err != nil { + return + } + }(pipeWriter) + _, err := obj.WriteTo(pipeWriter) + if err != nil { + err = pipeWriter.CloseWithError(err) + if err != nil { + return + } + } + }() + + _, err := s.uploader.Upload( + context.TODO(), &s3.PutObjectInput{ + Bucket: &s.bucket, + Key: &id, + Body: pipeReader, + }, + ) + if err != nil { + return "", err + } + + return fmt.Sprintf("s3://%s/%s", s.bucket, id), nil +} + +// List returns a list of all files stored with Save. +// +// The function reflects the actual state of the storage. I.e. if the file stored with Save +// is removed with AWS S3 CLI, List will not return it. +// +// Returns an array of strings where each element is a path in the format: s3:/// +// and error, if occurred. +func (s *S3) List() ([]string, error) { + output, err := s.client.ListObjectsV2( + context.TODO(), &s3.ListObjectsV2Input{ + Bucket: aws.String(s.bucket), + }, + ) + if err != nil { + return nil, err + } + + var files []string + for _, object := range output.Contents { + files = append(files, fmt.Sprintf("s3://%s/%s", s.bucket, aws.ToString(object.Key))) + } + + return files, nil +} diff --git a/online/storage/s3_test.go b/online/storage/s3_test.go new file mode 100644 index 0000000..4f0420d --- /dev/null +++ b/online/storage/s3_test.go @@ -0,0 +1,91 @@ +package storage_test + +import ( + "fmt" + "io" + "net/http/httptest" + "os" + "testing" + + "github.com/johannesboyne/gofakes3" + "github.com/johannesboyne/gofakes3/backend/s3mem" + "github.com/stretchr/testify/assert" + + "github.com/reilabs/trusted-setup/online/storage" +) + +type testWriterTo struct { + content []byte +} + +func (e *testWriterTo) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(e.content) + return int64(n), err +} + +func TestS3(t *testing.T) { + backend := s3mem.New() + faker := gofakes3.New(backend) + srv := httptest.NewServer(faker.Server()) + defer srv.Close() + + credentials, _ := os.CreateTemp("", "") + _, _ = credentials.Write( + []byte( + "[default]\n" + + "aws_access_key_id = \n" + + "aws_secret_access_key = "), + ) + _ = credentials.Close() + + s, err := storage.NewS3( + storage.WithBucket("test-bucket"), + storage.WithRegion("us-east-1"), + storage.WithProfile("default"), + storage.WithCredentialsFile(credentials.Name()), + storage.WithEndpoint(srv.URL), // Use mock S3 service + ) + assert.NoError(t, err) + + const testBucket = "test-bucket" + const testFile = "test" + obj := &testWriterTo{content: []byte("Hello world")} + + // Bucket does not exist + filePath, err := s.Save(testFile, obj) + assert.Error(t, err) + assert.Empty(t, filePath) + + // Bucket exists + err = backend.CreateBucket(testBucket) + assert.NoError(t, err) + filePath, err = s.Save(testFile, obj) + assert.NoError(t, err) + assert.Equal(t, fmt.Sprintf("s3://%s/%s", testBucket, testFile), filePath) + + // Object is visible + objs, err := s.List() + assert.NoError(t, err) + assert.Len(t, objs, 1) + assert.Equal(t, objs[0], filePath) + + // Removing objects with other means is reflected in List return value + _, err = backend.DeleteObject(testBucket, testFile) + assert.NoError(t, err) + objs, err = s.List() + assert.NoError(t, err) + assert.Empty(t, objs) + + // Saving objects is immediately reflected in List return value + for i := 0; i < 5; i++ { + path, err := s.Save(fmt.Sprintf("testfile%d", i), obj) + assert.NoError(t, err) + assert.NotEmpty(t, path) + + objs, err = s.List() + assert.NoError(t, err) + assert.Len(t, objs, i+1) + } + + _ = os.Remove(credentials.Name()) +} diff --git a/online/storage/storage.go b/online/storage/storage.go new file mode 100644 index 0000000..6688c9c --- /dev/null +++ b/online/storage/storage.go @@ -0,0 +1,13 @@ +// Package storage provides a persistent storage service with multiple backends. +// +// Currently supported backends are Tmpfs and AWS S3. +// +// Objects stored in the storage must implement io.WriterTo interface. +package storage + +import "io" + +type Storage interface { + Save(id string, obj io.WriterTo) (string, error) + List() ([]string, error) +} diff --git a/online/storage/tmpfs.go b/online/storage/tmpfs.go new file mode 100644 index 0000000..40192d7 --- /dev/null +++ b/online/storage/tmpfs.go @@ -0,0 +1,69 @@ +package storage + +import ( + "io" + "os" +) + +type Tmpfs struct { + prefix string + files []string +} + +// NewTmpfs creates a new instance of the storage backed by tmpfs. +// +// Prefix is a prepended to the names of the files stored with Save. +func NewTmpfs(prefix string) *Tmpfs { + return &Tmpfs{ + prefix, + []string{}, + } +} + +// Save stores the object in the temp filesystem. +// +// The given id is prepended to the file name of the file. If the file already exists, it will +// be overwritten. +// +// The function returns the path to the file and an error if any. +func (t *Tmpfs) Save(id string, obj io.WriterTo) (string, error) { + tmpFile, err := os.CreateTemp("", t.prefix+"-"+id+"-") + if err != nil { + return "", err + } + defer func(tmpFile *os.File) { + err = tmpFile.Close() + if err != nil { + return + } + }(tmpFile) + + _, err = obj.WriteTo(tmpFile) + if err != nil { + _ = os.Remove(tmpFile.Name()) + return "", err + } + + t.files = append(t.files, tmpFile.Name()) + + return tmpFile.Name(), nil +} + +// List returns a list of all files stored with Save. +// +// The function reflects the actual state of the storage. I.e. if the file stored with Save +// is removed with `rm /tmp/`, List will not return it. +// +// Returns an array of strings where each element is the id of the file. +// Error is always nil. +func (t *Tmpfs) List() ([]string, error) { + var existingFiles []string + for _, f := range t.files { + fi, err := os.Stat(f) + if err == nil && !fi.IsDir() { + existingFiles = append(existingFiles, f) + } + } + + return existingFiles, nil +} diff --git a/online/storage/tmpfs_test.go b/online/storage/tmpfs_test.go new file mode 100644 index 0000000..8468815 --- /dev/null +++ b/online/storage/tmpfs_test.go @@ -0,0 +1,69 @@ +package storage_test + +import ( + "bytes" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/reilabs/trusted-setup/online/storage" +) + +func TestTmpfs_Save(t *testing.T) { + tmpfs := storage.NewTmpfs("") + + content := []byte("hello world") + obj := bytes.NewBuffer(content) + + path, err := tmpfs.Save("testfile", obj) + defer func(name string) { + err = os.Remove(name) + if err != nil { + return + } + }(path) + assert.NoError(t, err) + + info, err := os.Stat(path) + assert.NoError(t, err) + assert.False(t, info.IsDir(), "Expected a file but found a directory") + + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("Failed to read saved file: %v", err) + } + assert.True(t, bytes.Equal(data, content), "File content mismatch") +} + +func TestTmpfs_List(t *testing.T) { + tmpfs := storage.NewTmpfs("") + + content := []byte("hello world") + obj := bytes.NewBuffer(content) + + files, err := tmpfs.List() + assert.NoError(t, err) + assert.Empty(t, files) + + for i := 0; i < 5; i++ { + path, err := tmpfs.Save(fmt.Sprintf("testfile%d", i), obj) + assert.NoError(t, err) + assert.NotEmpty(t, path) + + files, err = tmpfs.List() + assert.NoError(t, err) + assert.Len(t, files, i+1) + } + + files, err = tmpfs.List() + assert.NoError(t, err) + for _, f := range files { + _ = os.Remove(f) + } + + files, err = tmpfs.List() + assert.NoError(t, err) + assert.Empty(t, files) +} diff --git a/online/test/online_test.go b/online/test/online_test.go index 95d5d27..82e2f3f 100644 --- a/online/test/online_test.go +++ b/online/test/online_test.go @@ -2,13 +2,18 @@ package online_test import ( "bytes" + "io" "strconv" + "strings" "sync" "testing" + "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/reilabs/trusted-setup/offline/phase1" + offline_phase2 "github.com/reilabs/trusted-setup/offline/phase2" "github.com/reilabs/trusted-setup/offline/r1cs" "github.com/reilabs/trusted-setup/online/client" server_config "github.com/reilabs/trusted-setup/online/config" @@ -17,6 +22,7 @@ import ( "github.com/reilabs/trusted-setup/online/server/ceremony_service" "github.com/reilabs/trusted-setup/online/server/contributors_manager" "github.com/reilabs/trusted-setup/online/server/coordinator" + "github.com/reilabs/trusted-setup/online/storage" test_circuit "github.com/reilabs/trusted-setup/test" ) @@ -31,11 +37,14 @@ func TestOnlineCeremony(t *testing.T) { t.Run("Run contributions", testRunContributions) t.Run("Stop server", testStopServer) t.Run("Extract keys, prove and verify", testProveAndVerifyOnline) + t.Run("Extract keys, prove and verify (offline)", testProveAndVerifyFromFiles) } var serv *server.CeremonyServer var config *server_config.Config var last contribution.Contribution +var store storage.Storage +var beacon []byte func testStartServer(t *testing.T) { var err error @@ -49,11 +58,13 @@ func testStartServer(t *testing.T) { p1, err := phase1.FromFile(config.Phase1) assert.NoError(t, err) - beacon := bytes.Repeat([]byte{0x42}, 32) - - last = contribution.New(p1, ccs, beacon) + beacon = bytes.Repeat([]byte{0x42}, 32) + store = storage.NewTmpfs(config.CeremonyName) + last, err = contribution.New(p1, ccs, store, beacon) + assert.NoError(t, err) - service := ceremony_service.New(config.CeremonyName, coordinator.New(last, contributors_manager.New())) + muteLogger := zerolog.New(io.Discard) + service := ceremony_service.New(config.CeremonyName, coordinator.New(last, contributors_manager.New()), &muteLogger) serv = server.New(service) @@ -102,7 +113,35 @@ func testProveAndVerifyOnline(t *testing.T) { ccs, err := r1cs.FromFile(config.R1cs) assert.NoError(t, err) - pk, vk := last.ExtractKeys() + pk, vk, err := last.ExtractKeys() + assert.NoError(t, err) + + err = test_circuit.ProveAndVerify(ccs, &pk, &vk) + assert.NoError(t, err) +} + +func testProveAndVerifyFromFiles(t *testing.T) { + ccs, err := r1cs.FromFile(config.R1cs) + assert.NoError(t, err) + + var phase2s []*mpcsetup.Phase2 + var srs *mpcsetup.SrsCommons + files, err := store.List() + assert.NoError(t, err) + for _, f := range files { + if strings.Contains(f, "phase2-") { + p2, err := offline_phase2.FromFile(f) + assert.NoError(t, err) + phase2s = append(phase2s, p2) + } else if strings.Contains(f, "srs-commons") { + srs, err = offline_phase2.SrsCommonsFromFile(f) + assert.NoError(t, err) + } + } + + pk, vk, err := mpcsetup.VerifyPhase2(ccs, srs, beacon, phase2s...) + assert.NoError(t, err) + err = test_circuit.ProveAndVerify(ccs, &pk, &vk) assert.NoError(t, err) }