diff --git a/.github/workflows/build-and-release.yml b/.github/workflows/build-and-release.yml new file mode 100644 index 00000000..b4b9dfce --- /dev/null +++ b/.github/workflows/build-and-release.yml @@ -0,0 +1,87 @@ +name: Build and Release + +on: + push: + branches: [ main ] + pull_request: + branches: [] + +jobs: + build: + runs-on: ubuntu-20.04 + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: 'true' + + - name: Install build-essential, curl, git, wget, and Docker + run: | + sudo apt-get update + sudo apt-get install -y build-essential curl git wget + sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + sudo apt-get update + sudo apt-get install -y docker-ce docker-ce-cli containerd.io + echo "Installed build-essential, curl, git, wget, and Docker" + + - name: Setup Rust and Cargo + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.51.0 + override: true + components: rustc, cargo + + - name: Build app + run: sh build.sh + + - name: Upload artifact + uses: actions/upload-artifact@v2 + with: + name: dist + path: ./dist/ + + release: + needs: build + runs-on: ubuntu-22.04 + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Download build artifact + uses: actions/download-artifact@v2 + with: + name: dist + path: ./dist + + - name: Prepare release + run: zip -r dist.zip dist + + - name: Get the current date + id: date + run: echo "::set-output name=date::$(date +'%Y-%m-%d-%H-%M')" + + - name: Create new release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.date.outputs.date }} + release_name: "Release: ${{ steps.date.outputs.date }}" + draft: false + prerelease: false + body: "Hackachain phase 2 bn256 release at ${{ steps.date.outputs.date }}" + + - name: Upload Release Asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_name: dist.zip + asset_path: ./dist.zip + asset_content_type: application/zip diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..3e221292 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/dist \ No newline at end of file diff --git a/build.sh b/build.sh new file mode 100644 index 00000000..b976fca6 --- /dev/null +++ b/build.sh @@ -0,0 +1,12 @@ +# Use ubuntu 20.04 for building, otherwise the binary will not work on gramine image +mkdir -p ./dist/bin + +cd ./phase2 + +cargo build --release --bin new +cargo build --release --bin contribute +cargo build --release --bin verify_contribution + +cp target/release/new ../dist/bin +cp target/release/contribute ../dist/bin +cp target/release/verify_contribution ../dist/bin diff --git a/phase2/src/parameters.rs b/phase2/src/parameters.rs index 31817d55..4c84bff1 100644 --- a/phase2/src/parameters.rs +++ b/phase2/src/parameters.rs @@ -441,7 +441,6 @@ impl MPCParameters { ) -> [u8; 64] { println!("MPCParameters::contribute()"); - // Generate a keypair let (pubkey, privkey) = keypair(rng, self); println!("MPCParameters::batch_exp1()"); @@ -450,45 +449,56 @@ impl MPCParameters { let coeff = coeff.into_repr(); let mut projective = vec![C::Projective::zero(); bases.len()]; - let cpus = num_cpus::get(); - let chunk_size = if bases.len() < cpus { - 1 - } else { - bases.len() / cpus - }; - - // Perform wNAF over multiple cores, placing results into `projective`. - crossbeam::scope(|scope| { - for (bases, projective) in bases.chunks_mut(chunk_size) - .zip(projective.chunks_mut(chunk_size)) - { - scope.spawn(move |_| { - let mut wnaf = Wnaf::new(); - let mut count = 0; - for (base, projective) in bases.iter_mut() - .zip(projective.iter_mut()) - { - *projective = wnaf.base(base.into_projective(), 1).scalar(coeff); - count = count + 1; - if *progress_update_interval > 0 && count % *progress_update_interval == 0 { - println!("progress {} {}", *progress_update_interval, *total_exps) - } - } - }); - } - }).unwrap(); - - // Perform batch normalization - crossbeam::scope(|scope| { - for projective in projective.chunks_mut(chunk_size) - { - scope.spawn(move |_| { - C::Projective::batch_normalization(projective); - }); - } - }).unwrap(); - - // Turn it all back into affine points + // let cpus = num_cpus::get(); + // let chunk_size = if bases.len() < cpus { + // 1 + // } else { + // bases.len() / cpus + // }; + + // // Perform wNAF over multiple cores, placing results into `projective`. + // crossbeam::scope(|scope| { + // for (bases, projective) in bases.chunks_mut(chunk_size) + // .zip(projective.chunks_mut(chunk_size)) + // { + // scope.spawn(move |_| { + // let mut wnaf = Wnaf::new(); + // let mut count = 0; + // for (base, projective) in bases.iter_mut() + // .zip(projective.iter_mut()) + // { + // *projective = wnaf.base(base.into_projective(), 1).scalar(coeff); + // count = count + 1; + // if *progress_update_interval > 0 && count % *progress_update_interval == 0 { + // println!("progress {} {}", *progress_update_interval, *total_exps) + // } + // } + // }); + // } + // }).unwrap(); + + // // Perform batch normalization + // crossbeam::scope(|scope| { + // for projective in projective.chunks_mut(chunk_size) + // { + // scope.spawn(move |_| { + // C::Projective::batch_normalization(projective); + // }); + // } + // }).unwrap(); + + let mut wnaf = Wnaf::new(); + let mut count = 0; + for (base, projective) in bases.iter_mut().zip(projective.iter_mut()) { + *projective = wnaf.base(base.into_projective(), 1).scalar(coeff); + count += 1; + if *progress_update_interval > 0 && count % *progress_update_interval == 0 { + println!("progress {} {}", *progress_update_interval, *total_exps) + } + } + + C::Projective::batch_normalization(&mut projective); + for (projective, affine) in projective.iter().zip(bases.iter_mut()) { *affine = projective.into_affine(); } @@ -852,9 +862,11 @@ pub fn verify_contribution( (G1Affine::one(), pubkey.delta_after), (G2Affine::one(), after.params.vk.delta_g2) ) { + println!("verify_contribution::27.1"); return Err(()); } + println!("function merge_pairs needs update"); // H and L queries should be updated with delta^-1 if !same_ratio( merge_pairs(&before.params.h, &after.params.h), diff --git a/phase2/src/utils.rs b/phase2/src/utils.rs index 2c15c808..ff69033b 100644 --- a/phase2/src/utils.rs +++ b/phase2/src/utils.rs @@ -58,48 +58,76 @@ pub fn same_ratio( /// ... with high probability. pub fn merge_pairs(v1: &[G], v2: &[G]) -> (G, G) { - use std::sync::Mutex; - use rand::{thread_rng}; + // use std::sync::Mutex; + // use rand::{thread_rng}; - assert_eq!(v1.len(), v2.len()); + // assert_eq!(v1.len(), v2.len()); - let chunk = (v1.len() / num_cpus::get()) + 1; + // let chunk = (v1.len() / num_cpus::get()) + 1; - let s = Arc::new(Mutex::new(G::Projective::zero())); - let sx = Arc::new(Mutex::new(G::Projective::zero())); + // let s = Arc::new(Mutex::new(G::Projective::zero())); + // let sx = Arc::new(Mutex::new(G::Projective::zero())); - crossbeam::scope(|scope| { - for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) { - let s = s.clone(); - let sx = sx.clone(); + // crossbeam::scope(|scope| { + // for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) { + // let s = s.clone(); + // let sx = sx.clone(); - scope.spawn(move |_| { - // We do not need to be overly cautious of the RNG - // used for this check. - let rng = &mut thread_rng(); + // scope.spawn(move |_| { + // // We do not need to be overly cautious of the RNG + // // used for this check. + // let rng = &mut thread_rng(); - let mut wnaf = Wnaf::new(); - let mut local_s = G::Projective::zero(); - let mut local_sx = G::Projective::zero(); + // let mut wnaf = Wnaf::new(); + // let mut local_s = G::Projective::zero(); + // let mut local_sx = G::Projective::zero(); - for (v1, v2) in v1.iter().zip(v2.iter()) { - let rho = G::Scalar::rand(rng); - let mut wnaf = wnaf.scalar(rho.into_repr()); - let v1 = wnaf.base(v1.into_projective()); - let v2 = wnaf.base(v2.into_projective()); + // for (v1, v2) in v1.iter().zip(v2.iter()) { + // let rho = G::Scalar::rand(rng); + // let mut wnaf = wnaf.scalar(rho.into_repr()); + // let v1 = wnaf.base(v1.into_projective()); + // let v2 = wnaf.base(v2.into_projective()); - local_s.add_assign(&v1); - local_sx.add_assign(&v2); - } + // local_s.add_assign(&v1); + // local_sx.add_assign(&v2); + // } - s.lock().unwrap().add_assign(&local_s); - sx.lock().unwrap().add_assign(&local_sx); - }); - } - }).unwrap(); + // s.lock().unwrap().add_assign(&local_s); + // sx.lock().unwrap().add_assign(&local_sx); + // }); + // } + // }).unwrap(); + + // let s = s.lock().unwrap().into_affine(); + // let sx = sx.lock().unwrap().into_affine(); + + // (s, sx) + + use rand::{thread_rng}; + + assert_eq!(v1.len(), v2.len()); + + let mut local_s = G::Projective::zero(); + let mut local_sx = G::Projective::zero(); + + // We do not need to be overly cautious of the RNG + // used for this check. + let rng = &mut thread_rng(); + + let mut wnaf = Wnaf::new(); + + for (v1, v2) in v1.iter().zip(v2.iter()) { + let rho = G::Scalar::rand(rng); + let mut wnaf = wnaf.scalar(rho.into_repr()); + let v1 = wnaf.base(v1.into_projective()); + let v2 = wnaf.base(v2.into_projective()); + + local_s.add_assign(&v1); + local_sx.add_assign(&v2); + } - let s = s.lock().unwrap().into_affine(); - let sx = sx.lock().unwrap().into_affine(); + let s = local_s.into_affine(); + let sx = local_sx.into_affine(); (s, sx) }