Skip to content

feat: release workflow #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: feat/gramine
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions .github/workflows/build-and-release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
name: Build and Release

on:
push:
branches: [ main ]
pull_request:
branches: []

jobs:
build:
runs-on: ubuntu-20.04

steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: 'true'

- name: Install build-essential, curl, git, wget, and Docker
run: |
sudo apt-get update
sudo apt-get install -y build-essential curl git wget
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
echo "Installed build-essential, curl, git, wget, and Docker"

- name: Setup Rust and Cargo
uses: actions-rs/toolchain@v1
with:
toolchain: 1.51.0
override: true
components: rustc, cargo

- name: Build app
run: sh build.sh

- name: Upload artifact
uses: actions/upload-artifact@v2
with:
name: dist
path: ./dist/

release:
needs: build
runs-on: ubuntu-22.04

steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Download build artifact
uses: actions/download-artifact@v2
with:
name: dist
path: ./dist

- name: Prepare release
run: zip -r dist.zip dist

- name: Get the current date
id: date
run: echo "::set-output name=date::$(date +'%Y-%m-%d-%H-%M')"

- name: Create new release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.date.outputs.date }}
release_name: "Release: ${{ steps.date.outputs.date }}"
draft: false
prerelease: false
body: "Hackachain phase 2 bn256 release at ${{ steps.date.outputs.date }}"

- name: Upload Release Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_name: dist.zip
asset_path: ./dist.zip
asset_content_type: application/zip
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/dist
12 changes: 12 additions & 0 deletions build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Use ubuntu 20.04 for building, otherwise the binary will not work on gramine image
mkdir -p ./dist/bin

cd ./phase2

cargo build --release --bin new
cargo build --release --bin contribute
cargo build --release --bin verify_contribution

cp target/release/new ../dist/bin
cp target/release/contribute ../dist/bin
cp target/release/verify_contribution ../dist/bin
92 changes: 52 additions & 40 deletions phase2/src/parameters.rs
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,6 @@ impl MPCParameters {
) -> [u8; 64]
{
println!("MPCParameters::contribute()");
// Generate a keypair
let (pubkey, privkey) = keypair(rng, self);

println!("MPCParameters::batch_exp1()");
Expand All @@ -450,45 +449,56 @@ impl MPCParameters {
let coeff = coeff.into_repr();

let mut projective = vec![C::Projective::zero(); bases.len()];
let cpus = num_cpus::get();
let chunk_size = if bases.len() < cpus {
1
} else {
bases.len() / cpus
};

// Perform wNAF over multiple cores, placing results into `projective`.
crossbeam::scope(|scope| {
for (bases, projective) in bases.chunks_mut(chunk_size)
.zip(projective.chunks_mut(chunk_size))
{
scope.spawn(move |_| {
let mut wnaf = Wnaf::new();
let mut count = 0;
for (base, projective) in bases.iter_mut()
.zip(projective.iter_mut())
{
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
count = count + 1;
if *progress_update_interval > 0 && count % *progress_update_interval == 0 {
println!("progress {} {}", *progress_update_interval, *total_exps)
}
}
});
}
}).unwrap();

// Perform batch normalization
crossbeam::scope(|scope| {
for projective in projective.chunks_mut(chunk_size)
{
scope.spawn(move |_| {
C::Projective::batch_normalization(projective);
});
}
}).unwrap();

// Turn it all back into affine points
// let cpus = num_cpus::get();
// let chunk_size = if bases.len() < cpus {
// 1
// } else {
// bases.len() / cpus
// };

// // Perform wNAF over multiple cores, placing results into `projective`.
// crossbeam::scope(|scope| {
// for (bases, projective) in bases.chunks_mut(chunk_size)
// .zip(projective.chunks_mut(chunk_size))
// {
// scope.spawn(move |_| {
// let mut wnaf = Wnaf::new();
// let mut count = 0;
// for (base, projective) in bases.iter_mut()
// .zip(projective.iter_mut())
// {
// *projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
// count = count + 1;
// if *progress_update_interval > 0 && count % *progress_update_interval == 0 {
// println!("progress {} {}", *progress_update_interval, *total_exps)
// }
// }
// });
// }
// }).unwrap();

// // Perform batch normalization
// crossbeam::scope(|scope| {
// for projective in projective.chunks_mut(chunk_size)
// {
// scope.spawn(move |_| {
// C::Projective::batch_normalization(projective);
// });
// }
// }).unwrap();

let mut wnaf = Wnaf::new();
let mut count = 0;
for (base, projective) in bases.iter_mut().zip(projective.iter_mut()) {
*projective = wnaf.base(base.into_projective(), 1).scalar(coeff);
count += 1;
if *progress_update_interval > 0 && count % *progress_update_interval == 0 {
println!("progress {} {}", *progress_update_interval, *total_exps)
}
}

C::Projective::batch_normalization(&mut projective);

for (projective, affine) in projective.iter().zip(bases.iter_mut()) {
*affine = projective.into_affine();
}
Expand Down Expand Up @@ -852,9 +862,11 @@ pub fn verify_contribution(
(G1Affine::one(), pubkey.delta_after),
(G2Affine::one(), after.params.vk.delta_g2)
) {
println!("verify_contribution::27.1");
return Err(());
}

println!("function merge_pairs needs update");
// H and L queries should be updated with delta^-1
if !same_ratio(
merge_pairs(&before.params.h, &after.params.h),
Expand Down
92 changes: 60 additions & 32 deletions phase2/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,48 +58,76 @@ pub fn same_ratio<G1: CurveAffine>(
/// ... with high probability.
pub fn merge_pairs<G: CurveAffine>(v1: &[G], v2: &[G]) -> (G, G)
{
use std::sync::Mutex;
use rand::{thread_rng};
// use std::sync::Mutex;
// use rand::{thread_rng};

assert_eq!(v1.len(), v2.len());
// assert_eq!(v1.len(), v2.len());

let chunk = (v1.len() / num_cpus::get()) + 1;
// let chunk = (v1.len() / num_cpus::get()) + 1;

let s = Arc::new(Mutex::new(G::Projective::zero()));
let sx = Arc::new(Mutex::new(G::Projective::zero()));
// let s = Arc::new(Mutex::new(G::Projective::zero()));
// let sx = Arc::new(Mutex::new(G::Projective::zero()));

crossbeam::scope(|scope| {
for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
let s = s.clone();
let sx = sx.clone();
// crossbeam::scope(|scope| {
// for (v1, v2) in v1.chunks(chunk).zip(v2.chunks(chunk)) {
// let s = s.clone();
// let sx = sx.clone();

scope.spawn(move |_| {
// We do not need to be overly cautious of the RNG
// used for this check.
let rng = &mut thread_rng();
// scope.spawn(move |_| {
// // We do not need to be overly cautious of the RNG
// // used for this check.
// let rng = &mut thread_rng();

let mut wnaf = Wnaf::new();
let mut local_s = G::Projective::zero();
let mut local_sx = G::Projective::zero();
// let mut wnaf = Wnaf::new();
// let mut local_s = G::Projective::zero();
// let mut local_sx = G::Projective::zero();

for (v1, v2) in v1.iter().zip(v2.iter()) {
let rho = G::Scalar::rand(rng);
let mut wnaf = wnaf.scalar(rho.into_repr());
let v1 = wnaf.base(v1.into_projective());
let v2 = wnaf.base(v2.into_projective());
// for (v1, v2) in v1.iter().zip(v2.iter()) {
// let rho = G::Scalar::rand(rng);
// let mut wnaf = wnaf.scalar(rho.into_repr());
// let v1 = wnaf.base(v1.into_projective());
// let v2 = wnaf.base(v2.into_projective());

local_s.add_assign(&v1);
local_sx.add_assign(&v2);
}
// local_s.add_assign(&v1);
// local_sx.add_assign(&v2);
// }

s.lock().unwrap().add_assign(&local_s);
sx.lock().unwrap().add_assign(&local_sx);
});
}
}).unwrap();
// s.lock().unwrap().add_assign(&local_s);
// sx.lock().unwrap().add_assign(&local_sx);
// });
// }
// }).unwrap();

// let s = s.lock().unwrap().into_affine();
// let sx = sx.lock().unwrap().into_affine();

// (s, sx)

use rand::{thread_rng};

assert_eq!(v1.len(), v2.len());

let mut local_s = G::Projective::zero();
let mut local_sx = G::Projective::zero();

// We do not need to be overly cautious of the RNG
// used for this check.
let rng = &mut thread_rng();

let mut wnaf = Wnaf::new();

for (v1, v2) in v1.iter().zip(v2.iter()) {
let rho = G::Scalar::rand(rng);
let mut wnaf = wnaf.scalar(rho.into_repr());
let v1 = wnaf.base(v1.into_projective());
let v2 = wnaf.base(v2.into_projective());

local_s.add_assign(&v1);
local_sx.add_assign(&v2);
}

let s = s.lock().unwrap().into_affine();
let sx = sx.lock().unwrap().into_affine();
let s = local_s.into_affine();
let sx = local_sx.into_affine();

(s, sx)
}
Expand Down