Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions stwo_cairo_verifier/crates/cairo_air/src/cairo_air.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,7 @@ pub impl CairoAirNewImpl of CairoAirNewTrait {
#[cfg(not(feature: "poseidon252_verifier"))]
pub impl CairoAirImpl of Air<CairoAir> {
fn composition_log_degree_bound(self: @CairoAir) -> u32 {
// TODO(audit): Change name of field to composition_log_degree_bound. Done
*self.composition_log_degree_bound
}

Expand Down
7 changes: 5 additions & 2 deletions stwo_cairo_verifier/crates/cairo_air/src/lib.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,8 @@ pub fn get_verification_output(proof: @CairoProof) -> VerificationOutput {
pub fn verify_cairo(proof: CairoProof) {
let CairoProof { claim, interaction_pow, interaction_claim, stark_proof, channel_salt } = proof;

// TODO(audit): assert that len of commitments is 4. Done

// Verify.
let pcs_config = stark_proof.commitment_scheme_proof.config;

Expand Down Expand Up @@ -241,11 +243,12 @@ pub fn verify_cairo(proof: CairoProof) {
commitment_scheme
.commit(interaction_trace_commitment, interaction_trace_log_sizes, ref channel);

let trace_log_size = commitment_scheme.get_trace_log_size();
let trace_log_size = *commitment_scheme.trees[1].tree_height;
assert!(trace_log_size == *commitment_scheme.trees[2].tree_height);

// The maximal constraint degree is 2, so the degree bound for the cairo air is the degree bound
// of the trace plus 1.
let cairo_air_log_degree_bound = trace_log_size - pcs_config.fri_config.log_blowup_factor + 1;
let cairo_air_log_degree_bound = tree_height - pcs_config.fri_config.log_blowup_factor + 1;
let cairo_air = CairoAirNewImpl::new(
@claim, @interaction_elements, @interaction_claim, cairo_air_log_degree_bound,
);
Expand Down
2 changes: 2 additions & 0 deletions stwo_cairo_verifier/crates/verifier_core/src/channel.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ mod feature_dependent_uses {

pub use feature_dependent_uses::*;

// TODO(audit): Remove ChannelTime and n_challenges. Done

/// An interface for performing the Fiat-Shamir transformation on an interactive protocol.
/// The methods `mix_*` take a part of the protocol transcript and mix it into the state
/// of the pseudo-random oracle, so that subsequent queries are dependent on the transcript.
Expand Down
14 changes: 10 additions & 4 deletions stwo_cairo_verifier/crates/verifier_core/src/channel/blake2s.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ pub impl Blake2sChannelImpl of ChannelTrait {

for felt in felts {
// Compress whenever the buffer reaches capacity.
// TODO(audit): Add type (like in mix_felts) everywhere..
let msg_opt: Option<@Box<[u32; 16]>> = buffer.span().try_into();
if let Some(msg) = msg_opt {
state = blake2s_compress(state, byte_count, *msg);
Expand All @@ -84,6 +85,8 @@ pub impl Blake2sChannelImpl of ChannelTrait {
update_digest(ref self, Blake2sHash { hash: res });
}

// TODO(audit): Delete this function. Reimplement mix_u64. Done
// TODO(audit): Consider moving the digest to the end.
fn mix_u64(ref self: Blake2sChannel, nonce: u64) {
let (q, r) = div_rem(nonce, NZ_U32_SHIFT);
let nonce_hi = upcast(q);
Expand All @@ -102,10 +105,10 @@ pub impl Blake2sChannelImpl of ChannelTrait {
let digest = self.digest.hash.unbox();

// Mix ids hash
let ids_hash = hash_memory_section_ids(section, digest);
let new_digest = hash_memory_section_ids(section, digest);

// Mix values hash
let values_hash = hash_memory_section_values(section, ids_hash.unbox());
let values_hash = hash_memory_section_values(section, new_digest.unbox());
update_digest(ref self, Blake2sHash { hash: values_hash });
}

Expand Down Expand Up @@ -154,7 +157,7 @@ pub impl Blake2sChannelImpl of ChannelTrait {
// Compute `POW_PREFIX || digest || workBits`.
// 1 u32 || 8 u32 || 1 u32.
let msg = BoxImpl::new(
[POW_PREFIX, d0, d1, d2, d3, d4, d5, d6, d7, n_bits, 0, 0, 0, 0, 0, 0],
[POW_PREFIX, 0, 0, 0, 0, 0, 0, d0, d1, d2, d3, d4, d5, d6, d7, n_bits],
);
let [q0, q1, q2, q3, q4, q5, q6, q7] = blake2s_finalize(
BoxImpl::new(BLAKE2S_256_INITIAL_STATE), 40, msg,
Expand All @@ -180,12 +183,13 @@ pub impl Blake2sChannelImpl of ChannelTrait {
/// # Panics
///
/// Panics if `n_bits` >= 64.
// TODO(audit): Don't use the digest directly. Done
fn check_leading_zeros(digest: Blake2sHash, n_bits: u32) -> bool {
const U64_2_POW_32: u64 = 0x100000000;
let [d0, d1, _, _, _, _, _, _] = digest.hash.unbox();
let v = d1.into() * U64_2_POW_32 + d0.into();

let nonzero_divisor = pow2_u64(n_bits).try_into().unwrap();
let nonzero_divisor: NonZero<u64> = pow2_u64(n_bits).try_into().unwrap();
let (_, r) = DivRem::div_rem(v, nonzero_divisor);
r == 0
}
Expand All @@ -197,6 +201,7 @@ fn update_digest(ref channel: Blake2sChannel, new_digest: Blake2sHash) {

// TODO: Consider just returning secure felts.
fn draw_base_felts(ref channel: Blake2sChannel) -> Box<[M31; 8]> {
// TODO(audit): Make this consistent with Poseidon252Channel::draw_base_felts. We think that the loop is not necessary. Add documentation.
loop {
let [w0, w1, w2, w3, w4, w5, w6, w7] = draw_random_words(ref channel).hash.unbox();

Expand All @@ -222,5 +227,6 @@ fn draw_random_words(ref channel: Blake2sChannel) -> Blake2sHash {

// Append a zero byte for domain separation between generating randomness and mixing a
// single u32.
// TODO(audit): Add domain separation. E.g. change 36 to 37. Done
Blake2sHash { hash: blake2s_finalize(BoxImpl::new(BLAKE2S_256_INITIAL_STATE), 37, msg) }
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ impl Posidon252ChannelHelperImpl of Poseidon252ChannelHelper {
/// Helper function to mix a single `felt252` into the channel.
#[inline(always)]
fn mix_felt252(ref self: Poseidon252Channel, x: felt252) {
// TODO(audit): Fix domain separation.
let (s0, _, _) = hades_permutation(self.digest, x, 2);
update_digest(ref self, s0);
}
Expand Down Expand Up @@ -62,6 +63,7 @@ pub impl Poseidon252ChannelImpl of ChannelTrait {
res.append(pack_qm31(cur, y));
},
Option::None => {
// TODO(audit): Use if let.
if !felts.is_empty() {
let x = felts.pop_front().unwrap();
res.append(pack_qm31(1, *x));
Expand Down Expand Up @@ -194,7 +196,7 @@ pub impl Poseidon252ChannelImpl of ChannelTrait {

/// Check that `H(H(POW_PREFIX, digest, n_bits), nonce)` has `n_bits` starting zeros.
fn verify_pow_nonce(self: @Poseidon252Channel, n_bits: u32, nonce: u64) -> bool {
const POW_PREFIX: u32 = 0x012345678;
const POW_PREFIX: u32 = 0x12345678;
let prefix_hash = poseidon_hash_span(
[POW_PREFIX.into(), *self.digest, n_bits.into()].span(),
);
Expand All @@ -209,10 +211,11 @@ pub impl Poseidon252ChannelImpl of ChannelTrait {
/// # Panics
///
/// Panics if `n_bits` >= 64.
// TODO(audit): Don't use the digest directly, use a different hash for pow. Done
fn check_leading_zeros(digest: felt252, n_bits: u32) -> bool {
let u256 { low, .. } = digest.into();
let two_pow_n_bits: u128 = pow2_u64(n_bits).into();
let nonzero_divisor = two_pow_n_bits.try_into().unwrap();
let nonzero_divisor: NonZero<u128> = two_pow_n_bits.try_into().unwrap();
let (_, r) = DivRem::div_rem(low, nonzero_divisor);
r == 0
}
Expand All @@ -227,6 +230,7 @@ fn draw_base_felts(ref channel: Poseidon252Channel) -> [M31; FELTS_PER_HASH] {
}

fn draw_secure_felt252(ref channel: Poseidon252Channel) -> felt252 {
// TODO(audit): Can take two of the outputs of hades_permutation.
let counter: felt252 = channel.n_draws.into();
// Use 3 as the capacity for domain separation between mix and draw operations.
// In all mix functions, the capacity is set to 0 or 2.
Expand All @@ -236,7 +240,9 @@ fn draw_secure_felt252(ref channel: Poseidon252Channel) -> felt252 {
}

#[inline]
// TODO(audit): Don't work with u256, work directly with u128. Make both cases of draw_secure_felts and draw_secure_felt more efficient.
fn extract_m31(ref num: u256) -> M31 {
// TODO(audit): div_rem(num, 2**31 - 1).
let (q, r) = DivRem::div_rem(num, M31_SHIFT_NZ_U256);
num = q;
M31Trait::reduce_u128(r.low)
Expand Down
1 change: 1 addition & 0 deletions stwo_cairo_verifier/crates/verifier_core/src/fields.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ pub trait Invertible<T> {
}

// TODO(andrew): Consider removing in favour of inverse libfunc.
// TODO(audit): See Andrew's comment above, or consider not doing it per query.
pub trait BatchInvertible<T, +Invertible<T>, +Copy<T>, +Drop<T>, +Mul<T>> {
/// Computes all `1/arr[i]` with a single call to `inverse()` using Montgomery batch inversion.
fn batch_inverse(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ pub impl CM31One of One<CM31> {
pub impl M31IntoCM31 of core::traits::Into<M31, CM31> {
#[inline]
fn into(self: M31) -> CM31 {
CM31 { a: self, b: m31(0) }
CM31 { a: self, b: Zero::zero() }
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use super::super::{BatchInvertible, Invertible};
#[derive(Copy, Drop, Debug, PartialEq)]
pub struct CM31 {
// Represented using QM31, since QM31 has a dedicated opcode.
pub inner: super::super::qm31::QM31,
inner: super::super::qm31::QM31,
}

pub impl CM31InvertibleImpl of Invertible<CM31> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ pub trait QM31Trait {

/// Given a sample point `(px, py): CirclePoint<QM31>` and a domain point
/// `(dx, dy): CirclePoint<M31>` computes the quotient denominator, which has the formula:
/// Re(px - dx) * Im(py) - Re(py - dy) * Im(px)
/// Re(px - dx) * Im(py - dy) - Re(py - dy) * Im(px - dx)
/// Equivalently, this is the imaginary part of (py - dy) * conj(px - dx).
fn fused_quotient_denominator(px: @QM31, py: @QM31, dx: M31, dy: M31) -> CM31;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ pub impl QM31Impl of QM31Trait {
unreduced::fused_quotient_denominator(*px, *py, dx, dy)
}

// TODO(audit): Optimize. Done?
fn from_partial_evals(evals: [QM31; QM31_EXTENSION_DEGREE]) -> QM31 {
unreduced::from_partial_evals(evals)
}
Expand Down
45 changes: 26 additions & 19 deletions stwo_cairo_verifier/crates/verifier_core/src/fri.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -88,16 +88,18 @@ pub impl FriVerifierImpl of FriVerifierTrait {
};

// Bounds are stored in descending order.
// TODO(andrew): There is no check that it's sorted. Add check.
let max_column_log_bound = *column_log_bounds.first().unwrap();

let mut layer_index = 0;
let mut inner_layers = array![];
let mut layer_log_bound = max_column_log_bound - CIRCLE_TO_LINE_FOLD_STEP;
// TODO(audit): Change to column_commitment_domain.first().fold_to_line().
let mut layer_domain = LineDomainImpl::new_unchecked(
CosetImpl::half_odds(layer_log_bound + config.log_blowup_factor),
);

// TODO(audit): Change to for loop.
// TODO(audit): Rename proof to layer_proof.
while let Some(proof) = inner_layer_proofs.pop_front() {
channel.mix_commitment(*proof.commitment);

Expand All @@ -112,11 +114,7 @@ pub impl FriVerifierImpl of FriVerifierTrait {
},
);

layer_log_bound = match layer_log_bound.checked_sub(FOLD_STEP) {
Some(layer_log_bound) => layer_log_bound,
None => panic!("{}", FriVerificationError::InvalidNumFriLayers),
};

layer_log_bound -= FOLD_STEP;
layer_index += 1;
layer_domain = layer_domain.double();
}
Expand All @@ -127,7 +125,7 @@ pub impl FriVerifierImpl of FriVerifierTrait {
);

assert!(
last_layer_poly.len() == pow2(config.log_last_layer_degree_bound),
last_layer_poly.log_size == config.log_last_layer_degree_bound,
"{}",
FriVerificationError::LastLayerDegreeInvalid,
);
Expand All @@ -136,6 +134,7 @@ pub impl FriVerifierImpl of FriVerifierTrait {

FriVerifier {
config, first_layer, inner_layers, last_layer_domain: layer_domain, last_layer_poly,
// TODO(audit): Remove queries from the struct. Done
}
}

Expand All @@ -148,6 +147,7 @@ pub impl FriVerifierImpl of FriVerifierTrait {
self.decommit_on_queries(queries, first_layer_query_evals)
}

// TODO(audit): Inline.
fn decommit_on_queries(
self: FriVerifier, queries: Queries, first_layer_query_evals: ColumnArray<Span<QM31>>,
) {
Expand Down Expand Up @@ -215,9 +215,12 @@ fn decommit_inner_layers(
let mut first_layer_column_bounds = *verifier.first_layer.column_log_bounds;
let mut prev_fold_alpha = *verifier.first_layer.folding_alpha;

// TODO(audit): Swap the order of the folds. In each iteration do the circle fold and line fold
// to the same column size.
while let Some(layer) = inner_layers.pop_front() {
let circle_poly_degree_bound = *layer.log_degree_bound + CIRCLE_TO_LINE_FOLD_STEP;

// TODO(audit): Replace while with if (there's only one).
// Check for evals committed in the first layer that need to be folded into this layer.
while let Some(_) = first_layer_column_bounds.next_if_eq(@circle_poly_degree_bound) {
let (sparse_eval, column_domain) = first_layer_iter.next().unwrap();
Expand Down Expand Up @@ -256,17 +259,18 @@ fn decommit_last_layer(verifier: FriVerifier, mut queries: Queries, mut query_ev

// TODO(andrew): Note depending on the proof parameters, doing FFT on the last layer poly vs
// pointwize evaluation is less efficient.
let last_layer_evals = last_layer_poly.evaluate(last_layer_domain).values;
let domain_log_size = last_layer_domain.log_size();
assert!(last_layer_poly.coeffs.len() == 1);
// let last_layer_evals = last_layer_poly.evaluate(last_layer_domain).values;
// let domain_log_size = last_layer_domain.log_size();

while let (Some(query), Some(query_eval)) =
(queries.positions.pop_front(), query_evals.pop_front()) {
// TODO(andrew): Makes more sense for the proof to provide coeffs in natural order and
// the FFT return evals in bit-reversed order to prevent this unnecessary bit-reverse.
let last_layer_eval_i = bit_reverse_index(*query, domain_log_size);
// let last_layer_eval_i = bit_reverse_index(*query, domain_log_size);

assert!(
query_eval == *last_layer_evals[last_layer_eval_i],
query_eval == *last_layer_poly.coeffs[0],
"{}",
FriVerificationError::LastLayerEvaluationsInvalid,
);
Expand Down Expand Up @@ -336,7 +340,7 @@ impl FriFirstLayerVerifierImpl of FriFirstLayerVerifierTrait {
) -> ColumnArray<SparseEvaluation> {
// Columns are provided in descending order by size.
let max_column_log_size = (*self.column_commitment_domains).first().unwrap().log_size();
assert!(queries.log_domain_size == max_column_log_size);
assert!(queries.log_domain_size == max_column_log_size, "...");

let mut column_queries = queries;
let mut column_commitment_domains = *self.column_commitment_domains;
Expand All @@ -347,6 +351,7 @@ impl FriFirstLayerVerifierImpl of FriFirstLayerVerifierTrait {
let mut sparse_evals_by_column = array![];
let mut decommitted_values = array![];

// TODO(audit): Remove.
// Check that the column commitment domains are sorted in descending order.
for i in 1..column_commitment_domains.len() {
assert!(
Expand All @@ -355,6 +360,7 @@ impl FriFirstLayerVerifierImpl of FriFirstLayerVerifierTrait {
);
}

// TODO(audit): Change to zip_eq.
while let (Some(column_domain), Some(column_query_evals)) =
(column_commitment_domains.pop_front(), query_evals_by_column.pop_front()) {
let column_domain_log_size = column_domain.log_size();
Expand All @@ -377,6 +383,7 @@ impl FriFirstLayerVerifierImpl of FriFirstLayerVerifierTrait {
NullableTrait::new(column_decommitment_positions),
);

// TODO(audit): Consider flattening sparse_evaluation.
for subset_eval in sparse_evaluation.subset_evals.span() {
for eval in subset_eval.span() {
// Split the QM31 into its M31 coordinate values.
Expand Down Expand Up @@ -511,7 +518,6 @@ fn compute_decommitment_positions_and_rebuild_evals(
fold_step: u32,
) -> (Span<usize>, SparseEvaluation) {
let fold_factor = pow2(fold_step);
let mut query_evals_iter = query_evals.into_iter();

let mut decommitment_positions = array![];
let mut subset_evals = array![];
Expand All @@ -520,12 +526,14 @@ fn compute_decommitment_positions_and_rebuild_evals(
let mut query_positions = queries.positions;
let mut folded_query_positions = queries.fold(fold_step).positions;

// TODO(audit): Change to for loop.
while let Some(folded_query_position) = folded_query_positions.pop_front() {
let subset_start = *folded_query_position * fold_factor;
let subset_end = subset_start + fold_factor;
let mut subset_decommitment_positions = (subset_start..subset_end).into_iter();
let mut subset_eval = array![];

// TODO(audit): Change to for loop.
// Extract the subset eval and decommitment positions.
while let Some(decommitment_position) = subset_decommitment_positions.next() {
decommitment_positions.append(decommitment_position);
Expand All @@ -535,11 +543,8 @@ fn compute_decommitment_positions_and_rebuild_evals(
subset_eval
.append(
*match query_positions.next_if_eq(@decommitment_position) {
Some(_) => query_evals_iter.next().unwrap(),
None => match witness_evals_iter.next() {
Some(witness_eval) => { witness_eval },
None => panic!("Insufficient Witness Error"),
},
Some(_) => query_evals.pop_front().unwrap(),
None => witness_evals_iter.next().unwrap(),
},
);
}
Expand All @@ -552,7 +557,7 @@ fn compute_decommitment_positions_and_rebuild_evals(

// Sanity check all the values have been consumed.
assert!(query_positions.is_empty());
assert!(query_evals_iter.next().is_none());
assert!(query_evals.is_empty());

let sparse_evaluation = SparseEvaluationImpl::new(
subset_evals, subset_domain_start_indices.span(),
Expand Down Expand Up @@ -592,6 +597,7 @@ impl SparseEvaluationImpl of SparseEvaluationTrait {
let mut domain_initials_inv = BatchInvertible::batch_inverse(domain_initials);
let mut res = array![];

// TODO(audit): Use zip_eq.
for subset_eval in self.subset_evals.span() {
let x_inv = domain_initials_inv.pop_front().unwrap();
let values: Box<[QM31; FOLD_FACTOR]> = *subset_eval.span().try_into().unwrap();
Expand All @@ -617,6 +623,7 @@ impl SparseEvaluationImpl of SparseEvaluationTrait {
let mut domain_initial_ys_inv = BatchInvertible::batch_inverse(domain_initial_ys);
let mut res = array![];

// TODO(audit): Use zip_eq.
for subset_eval in self.subset_evals.span() {
let y_inv = domain_initial_ys_inv.pop_front().unwrap();
let values: Box<[QM31; CIRCLE_TO_LINE_FOLD_FACTOR]> = *subset_eval
Expand Down
Loading
Loading