diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c6cd03bdc7..36a53aca3d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,6 +22,7 @@ jobs: args: --verbose --release --all --all-features build: + if: ${{ false }} name: Build target ${{ matrix.target }} runs-on: ubuntu-latest strategy: diff --git a/halo2_proofs/Cargo.toml b/halo2_proofs/Cargo.toml index 8b8dee7f18..a941f0437f 100644 --- a/halo2_proofs/Cargo.toml +++ b/halo2_proofs/Cargo.toml @@ -52,10 +52,20 @@ halo2curves = { git = 'https://github.com/privacy-scaling-explorations/halo2curv rand_core = { version = "0.6", default-features = false } tracing = "0.1" blake2b_simd = "1" +# pairing = { git = 'https://github.com/appliedzkp/pairing', package = "pairing_bn256", tag = "v0.1.1" } +subtle = "2.3" +cfg-if = "0.1" +poseidon = { git = 'https://github.com/appliedzkp/poseidon.git' } #, branch = 'circuit' } +num-integer = "0.1" +num-bigint = { version = "0.4", features = ["rand"] } # Developer tooling dependencies plotters = { version = "0.3.0", optional = true } tabbycat = { version = "0.1", features = ["attributes"], optional = true } +log = "0.4.17" + +# timer +ark-std = { version = "0.3.0" } [dev-dependencies] assert_matches = "1.5" @@ -68,11 +78,15 @@ rand_core = { version = "0.6", default-features = false, features = ["getrandom" getrandom = { version = "0.2", features = ["js"] } [features] -default = ["batch"] +default = ["batch", "gwc"] dev-graph = ["plotters", "tabbycat"] gadget-traces = ["backtrace"] sanity-checks = [] batch = ["rand_core/getrandom"] +shplonk = [] +gwc = [] +phase-check = [] +profile = ["ark-std/print-trace"] [lib] bench = false diff --git a/halo2_proofs/src/arithmetic.rs b/halo2_proofs/src/arithmetic.rs index 69b63502bf..e919f5d0b0 100644 --- a/halo2_proofs/src/arithmetic.rs +++ b/halo2_proofs/src/arithmetic.rs @@ -10,6 +10,8 @@ use group::{ pub use halo2curves::{CurveAffine, CurveExt, FieldExt, Group}; +pub const SPARSE_TWIDDLE_DEGREE: u32 = 10; + fn multiexp_serial(coeffs: &[C::Scalar], bases: &[C], acc: &mut C::Curve) { let coeffs: Vec<_> = coeffs.iter().map(|a| a.to_repr()).collect(); @@ -169,108 +171,250 @@ pub fn best_multiexp(coeffs: &[C::Scalar], bases: &[C]) -> C::Cu /// /// This will use multithreading if beneficial. pub fn best_fft(a: &mut [G], omega: G::Scalar, log_n: u32) { - fn bitreverse(mut n: usize, l: usize) -> usize { - let mut r = 0; - for _ in 0..l { - r = (r << 1) | (n & 1); - n >>= 1; - } - r - } - let threads = multicore::current_num_threads(); - let log_threads = log2_floor(threads); + let log_split = log2_floor(threads) as usize; let n = a.len() as usize; + let sub_n = n >> log_split; + let split_m = 1 << log_split; + + if sub_n < split_m { + serial_fft(a, omega, log_n); + } else { + parallel_fft(a, omega, log_n); + } +} + +fn bitreverse(mut n: usize, l: usize) -> usize { + let mut r = 0; + for _ in 0..l { + r = (r << 1) | (n & 1); + n >>= 1; + } + r +} + +fn serial_fft(a: &mut [G], omega: G::Scalar, log_n: u32) { + let n = a.len() as u32; assert_eq!(n, 1 << log_n); - for k in 0..n { + for k in 0..n as usize { let rk = bitreverse(k, log_n as usize); if k < rk { - a.swap(rk, k); + a.swap(rk as usize, k as usize); } } - // precompute twiddle factors - let twiddles: Vec<_> = (0..(n / 2) as usize) - .scan(G::Scalar::one(), |w, _| { - let tw = *w; - w.group_scale(&omega); - Some(tw) - }) - .collect(); - - if log_n <= log_threads { - let mut chunk = 2_usize; - let mut twiddle_chunk = (n / 2) as usize; - for _ in 0..log_n { - a.chunks_mut(chunk).for_each(|coeffs| { - let (left, right) = coeffs.split_at_mut(chunk / 2); - - // case when twiddle factor is one - let (a, left) = left.split_at_mut(1); - let (b, right) = right.split_at_mut(1); - let t = b[0]; - b[0] = a[0]; - a[0].group_add(&t); - b[0].group_sub(&t); - - left.iter_mut() - .zip(right.iter_mut()) - .enumerate() - .for_each(|(i, (a, b))| { - let mut t = *b; - t.group_scale(&twiddles[(i + 1) * twiddle_chunk]); - *b = *a; - a.group_add(&t); - b.group_sub(&t); - }); - }); - chunk *= 2; - twiddle_chunk /= 2; + let mut m = 1; + for _ in 0..log_n { + let w_m = omega.pow_vartime(&[u64::from(n / (2 * m)), 0, 0, 0]); + + let mut k = 0; + while k < n { + let mut w = G::Scalar::one(); + for j in 0..m { + let mut t = a[(k + j + m) as usize]; + t.group_scale(&w); + a[(k + j + m) as usize] = a[(k + j) as usize]; + a[(k + j + m) as usize].group_sub(&t); + a[(k + j) as usize].group_add(&t); + w *= &w_m; + } + + k += 2 * m; } - } else { - recursive_butterfly_arithmetic(a, n, 1, &twiddles) + + m *= 2; } } -/// This perform recursive butterfly arithmetic -pub fn recursive_butterfly_arithmetic( +fn serial_split_fft( a: &mut [G], + twiddle_lut: &[G::Scalar], + twiddle_scale: usize, + log_n: u32, +) { + let n = a.len() as u32; + assert_eq!(n, 1 << log_n); + + let mut m = 1; + for _ in 0..log_n { + let omega_idx = twiddle_scale * n as usize / (2 * m as usize); // 1/2, 1/4, 1/8, ... + let low_idx = omega_idx % (1 << SPARSE_TWIDDLE_DEGREE); + let high_idx = omega_idx >> SPARSE_TWIDDLE_DEGREE; + let mut w_m = twiddle_lut[low_idx]; + if high_idx > 0 { + w_m = w_m * twiddle_lut[(1 << SPARSE_TWIDDLE_DEGREE) + high_idx]; + } + + let mut k = 0; + while k < n { + let mut w = G::Scalar::one(); + for j in 0..m { + let mut t = a[(k + j + m) as usize]; + t.group_scale(&w); + a[(k + j + m) as usize] = a[(k + j) as usize]; + a[(k + j + m) as usize].group_sub(&t); + a[(k + j) as usize].group_add(&t); + w *= &w_m; + } + + k += 2 * m; + } + + m *= 2; + } +} + +fn split_radix_fft( + tmp: &mut [G], + a: &[G], + twiddle_lut: &[G::Scalar], n: usize, - twiddle_chunk: usize, - twiddles: &[G::Scalar], + sub_fft_offset: usize, + log_split: usize, ) { - if n == 2 { - let t = a[1]; - a[1] = a[0]; - a[0].group_add(&t); - a[1].group_sub(&t); - } else { - let (left, right) = a.split_at_mut(n / 2); - rayon::join( - || recursive_butterfly_arithmetic(left, n / 2, twiddle_chunk * 2, twiddles), - || recursive_butterfly_arithmetic(right, n / 2, twiddle_chunk * 2, twiddles), - ); + let split_m = 1 << log_split; + let sub_n = n >> log_split; + + // we use out-place bitreverse here, split_m <= num_threads, so the buffer spase is small + // and it's is good for data locality + let mut t1 = vec![G::group_zero(); split_m]; + // if unsafe code is allowed, a 10% performance improvement can be achieved + // let mut t1: Vec = Vec::with_capacity(split_m as usize); + // unsafe{ t1.set_len(split_m as usize); } + for i in 0..split_m { + t1[bitreverse(i, log_split)] = a[(i * sub_n + sub_fft_offset)]; + } + serial_split_fft(&mut t1, twiddle_lut, sub_n, log_split as u32); + + let sparse_degree = SPARSE_TWIDDLE_DEGREE; + let omega_idx = sub_fft_offset as usize; + let low_idx = omega_idx % (1 << sparse_degree); + let high_idx = omega_idx >> sparse_degree; + let mut omega = twiddle_lut[low_idx]; + if high_idx > 0 { + omega = omega * twiddle_lut[(1 << sparse_degree) + high_idx]; + } + let mut w_m = G::Scalar::one(); + for i in 0..split_m { + t1[i].group_scale(&w_m); + tmp[i] = t1[i]; + w_m = w_m * omega; + } +} - // case when twiddle factor is one - let (a, left) = left.split_at_mut(1); - let (b, right) = right.split_at_mut(1); - let t = b[0]; - b[0] = a[0]; - a[0].group_add(&t); - b[0].group_sub(&t); - - left.iter_mut() - .zip(right.iter_mut()) - .enumerate() - .for_each(|(i, (a, b))| { - let mut t = *b; - t.group_scale(&twiddles[(i + 1) * twiddle_chunk]); - *b = *a; - a.group_add(&t); - b.group_sub(&t); - }); +pub fn generate_twiddle_lookup_table( + omega: F, + log_n: u32, + sparse_degree: u32, + with_last_level: bool, +) -> Vec { + let without_last_level = !with_last_level; + let is_lut_len_large = sparse_degree > log_n; + + // dense + if is_lut_len_large { + let mut twiddle_lut = vec![F::zero(); (1 << log_n) as usize]; + parallelize(&mut twiddle_lut, |twiddle_lut, start| { + let mut w_n = omega.pow_vartime(&[start as u64, 0, 0, 0]); + for twiddle_lut in twiddle_lut.iter_mut() { + *twiddle_lut = w_n; + w_n = w_n * omega; + } + }); + return twiddle_lut; } + + // sparse + let low_degree_lut_len = 1 << sparse_degree; + let high_degree_lut_len = 1 << (log_n - sparse_degree - without_last_level as u32); + let mut twiddle_lut = vec![F::zero(); (low_degree_lut_len + high_degree_lut_len) as usize]; + parallelize( + &mut twiddle_lut[..low_degree_lut_len], + |twiddle_lut, start| { + let mut w_n = omega.pow_vartime(&[start as u64, 0, 0, 0]); + for twiddle_lut in twiddle_lut.iter_mut() { + *twiddle_lut = w_n; + w_n = w_n * omega; + } + }, + ); + let high_degree_omega = omega.pow_vartime(&[(1 << sparse_degree) as u64, 0, 0, 0]); + parallelize( + &mut twiddle_lut[low_degree_lut_len..], + |twiddle_lut, start| { + let mut w_n = high_degree_omega.pow_vartime(&[start as u64, 0, 0, 0]); + for twiddle_lut in twiddle_lut.iter_mut() { + *twiddle_lut = w_n; + w_n = w_n * high_degree_omega; + } + }, + ); + twiddle_lut +} + +pub fn parallel_fft(a: &mut [G], omega: G::Scalar, log_n: u32) { + let n = a.len() as usize; + assert_eq!(n, 1 << log_n); + + let log_split = log2_floor(multicore::current_num_threads()) as usize; + let split_m = 1 << log_split; + let sub_n = n >> log_split as usize; + let twiddle_lut = generate_twiddle_lookup_table(omega, log_n, SPARSE_TWIDDLE_DEGREE, true); + + // split fft + let mut tmp = vec![G::group_zero(); n]; + // if unsafe code is allowed, a 10% performance improvement can be achieved + // let mut tmp: Vec = Vec::with_capacity(n); + // unsafe{ tmp.set_len(n); } + multicore::scope(|scope| { + let a = &*a; + let twiddle_lut = &*twiddle_lut; + for (chunk_idx, tmp) in tmp.chunks_mut(sub_n).enumerate() { + scope.spawn(move |_| { + let split_fft_offset = chunk_idx * sub_n >> log_split; + for (i, tmp) in tmp.chunks_mut(split_m).enumerate() { + let split_fft_offset = split_fft_offset + i; + split_radix_fft(tmp, a, twiddle_lut, n, split_fft_offset, log_split); + } + }); + } + }); + + // shuffle + parallelize(a, |a, start| { + for (idx, a) in a.iter_mut().enumerate() { + let idx = start + idx; + let i = idx / sub_n; + let j = idx % sub_n; + *a = tmp[j * split_m + i]; + } + }); + + // sub fft + let new_omega = omega.pow_vartime(&[split_m as u64, 0, 0, 0]); + multicore::scope(|scope| { + for a in a.chunks_mut(sub_n) { + scope.spawn(move |_| { + serial_fft(a, new_omega, log_n - log_split as u32); + }); + } + }); + + // copy & unshuffle + let mask = (1 << log_split) - 1; + parallelize(&mut tmp, |tmp, start| { + for (idx, tmp) in tmp.iter_mut().enumerate() { + let idx = start + idx; + *tmp = a[idx]; + } + }); + parallelize(a, |a, start| { + for (idx, a) in a.iter_mut().enumerate() { + let idx = start + idx; + *a = tmp[sub_n * (idx & mask) + (idx >> log_split)]; + } + }); } /// Convert coefficient bases group elements to lagrange basis by inverse FFT. diff --git a/halo2_proofs/src/circuit.rs b/halo2_proofs/src/circuit.rs index 0f0646fa85..c48bb77459 100644 --- a/halo2_proofs/src/circuit.rs +++ b/halo2_proofs/src/circuit.rs @@ -51,8 +51,8 @@ pub trait Chip: Sized { } /// Index of a region in a layouter -#[derive(Clone, Copy, Debug)] -pub struct RegionIndex(usize); +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct RegionIndex(pub usize); impl From for RegionIndex { fn from(idx: usize) -> RegionIndex { @@ -87,14 +87,14 @@ impl std::ops::Deref for RegionStart { } /// A pointer to a cell within a circuit. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct Cell { /// Identifies the region in which this cell resides. - region_index: RegionIndex, + pub region_index: RegionIndex, /// The relative offset of this cell within its region. - row_offset: usize, + pub row_offset: usize, /// The column of this cell. - column: Column, + pub column: Column, } /// An assigned cell. diff --git a/halo2_proofs/src/circuit/floor_planner/single_pass.rs b/halo2_proofs/src/circuit/floor_planner/single_pass.rs index 3798efbe54..f0e9098d12 100644 --- a/halo2_proofs/src/circuit/floor_planner/single_pass.rs +++ b/halo2_proofs/src/circuit/floor_planner/single_pass.rs @@ -5,6 +5,8 @@ use std::marker::PhantomData; use ff::Field; +use ark_std::{end_timer, start_timer}; + use crate::{ circuit::{ layouter::{RegionColumn, RegionLayouter, RegionShape, TableLayouter}, @@ -31,8 +33,11 @@ impl FloorPlanner for SimpleFloorPlanner { config: C::Config, constants: Vec>, ) -> Result<(), Error> { + let timer = start_timer!(|| format!("SimpleFloorPlanner synthesize")); let layouter = SingleChipLayouter::new(cs, constants)?; - circuit.synthesize(config, layouter) + let result = circuit.synthesize(config, layouter); + end_timer!(timer); + result } } @@ -82,21 +87,36 @@ impl<'a, F: Field, CS: Assignment + 'a> Layouter for SingleChipLayouter<'a N: Fn() -> NR, NR: Into, { + let region_name: String = name().into(); + let timer = start_timer!(|| format!("assign region: {}", region_name)); let region_index = self.regions.len(); // Get shape of the region. let mut shape = RegionShape::new(region_index.into()); { + let timer_1st = start_timer!(|| format!("assign region 1st pass: {}", region_name)); let region: &mut dyn RegionLayouter = &mut shape; assignment(region.into())?; + end_timer!(timer_1st); } + log::debug!("region row_count {}: {}", region_name, shape.row_count()); // Lay out this region. We implement the simplest approach here: position the // region starting at the earliest row for which none of the columns are in use. let mut region_start = 0; for column in &shape.columns { - region_start = cmp::max(region_start, self.columns.get(column).cloned().unwrap_or(0)); + let column_start = self.columns.get(column).cloned().unwrap_or(0); + if column_start != 0 { + log::trace!( + "columns {:?} reused between multi regions. Start: {}. Region: {}", + column, + column_start, + region_name + ); + } + region_start = cmp::max(region_start, column_start); } + log::debug!("region{} start: {}", self.regions.len(), region_start); self.regions.push(region_start.into()); // Update column usage information. @@ -108,8 +128,11 @@ impl<'a, F: Field, CS: Assignment + 'a> Layouter for SingleChipLayouter<'a self.cs.enter_region(name); let mut region = SingleChipLayouterRegion::new(self, region_index.into()); let result = { + let timer_2nd = start_timer!(|| format!("assign region 2nd pass: {}", region_name)); let region: &mut dyn RegionLayouter = &mut region; - assignment(region.into()) + let result = assignment(region.into()); + end_timer!(timer_2nd); + result }?; let constants_to_assign = region.constants; self.cs.exit_region(); @@ -143,6 +166,7 @@ impl<'a, F: Field, CS: Assignment + 'a> Layouter for SingleChipLayouter<'a } } + end_timer!(timer); Ok(result) } diff --git a/halo2_proofs/src/circuit/floor_planner/v1.rs b/halo2_proofs/src/circuit/floor_planner/v1.rs index 62207a91a7..1a3b5b676b 100644 --- a/halo2_proofs/src/circuit/floor_planner/v1.rs +++ b/halo2_proofs/src/circuit/floor_planner/v1.rs @@ -73,10 +73,19 @@ impl FloorPlanner for V1 { .without_witnesses() .synthesize(config.clone(), V1Pass::<_, CS>::measure(pass))?; } + for (name, shape) in &measure.regions { + log::debug!("region height {}: {}", name, shape.row_count()) + } // Planning: // - Position the regions. - let (regions, column_allocations) = strategy::slot_in_biggest_advice_first(measure.regions); + let (regions, column_allocations) = strategy::slot_in_biggest_advice_first( + measure + .regions + .into_iter() + .map(|(_name, shape)| shape) + .collect(), + ); plan.regions = regions; // - Determine how many rows our planned circuit will require. @@ -170,7 +179,7 @@ impl<'p, 'a, F: Field, CS: Assignment + 'a> Layouter for V1Pass<'p, 'a, F, NR: Into, { match &mut self.0 { - Pass::Measurement(pass) => pass.assign_region(assignment), + Pass::Measurement(pass) => pass.assign_region(name, assignment), Pass::Assignment(pass) => pass.assign_region(name, assignment), } } @@ -230,7 +239,7 @@ impl<'p, 'a, F: Field, CS: Assignment + 'a> Layouter for V1Pass<'p, 'a, F, /// Measures the circuit. #[derive(Debug)] pub struct MeasurementPass { - regions: Vec, + regions: Vec<(String, RegionShape)>, } impl MeasurementPass { @@ -238,9 +247,15 @@ impl MeasurementPass { MeasurementPass { regions: vec![] } } - fn assign_region(&mut self, mut assignment: A) -> Result + fn assign_region( + &mut self, + name: N, + mut assignment: A, + ) -> Result where A: FnMut(Region<'_, F>) -> Result, + N: Fn() -> NR, + NR: Into, { let region_index = self.regions.len(); @@ -250,7 +265,7 @@ impl MeasurementPass { let region: &mut dyn RegionLayouter = &mut shape; assignment(region.into()) }?; - self.regions.push(shape); + self.regions.push((name().into(), shape)); Ok(result) } diff --git a/halo2_proofs/src/circuit/value.rs b/halo2_proofs/src/circuit/value.rs index e6ae26cd1b..920b70d330 100644 --- a/halo2_proofs/src/circuit/value.rs +++ b/halo2_proofs/src/circuit/value.rs @@ -14,7 +14,8 @@ use crate::plonk::{Assigned, Error}; /// for improved usability. #[derive(Clone, Copy, Debug)] pub struct Value { - inner: Option, + /// for adhoc usage... + pub inner: Option, } impl Default for Value { @@ -49,6 +50,11 @@ impl Value { self.inner.ok_or(Error::Synthesis) } + /// ... + pub fn is_none(&self) -> bool { + self.inner.is_none() + } + /// Converts from `&Value` to `Value<&V>`. pub fn as_ref(&self) -> Value<&V> { Value { diff --git a/halo2_proofs/src/dev.rs b/halo2_proofs/src/dev.rs index b5b92d390f..f2155165b8 100644 --- a/halo2_proofs/src/dev.rs +++ b/halo2_proofs/src/dev.rs @@ -10,6 +10,9 @@ use std::time::{Duration, Instant}; use blake2b_simd::blake2b; use ff::Field; +use crate::plonk::sealed::SealedPhase; +use crate::plonk::FirstPhase; +use crate::plonk::ThirdPhase; use crate::{ arithmetic::{FieldExt, Group}, circuit, @@ -82,7 +85,7 @@ impl Region { /// The value of a particular cell within the circuit. #[derive(Clone, Copy, Debug, PartialEq, Eq)] -enum CellValue { +pub(crate) enum CellValue { // An unassigned cell. Unassigned, // A cell that has been assigned a value. @@ -291,7 +294,8 @@ pub struct MockProver { // The fixed cells in the circuit, arranged as [column][row]. fixed: Vec>>, // The advice cells in the circuit, arranged as [column][row]. - advice: Vec>>, + pub(crate) advice: Vec>>, + advice_prev: Vec>>, // The instance cells in the circuit, arranged as [column][row]. instance: Vec>, @@ -303,6 +307,8 @@ pub struct MockProver { // A range of available rows for assignment and copies. usable_rows: Range, + + current_phase: crate::plonk::sealed::Phase, } impl Assignment for MockProver { @@ -378,6 +384,10 @@ impl Assignment for MockProver { A: FnOnce() -> AR, AR: Into, { + if self.current_phase.0 < column.column_type().phase.0 { + return Ok(()); + } + if !self.usable_rows.contains(&row) { return Err(Error::not_enough_rows_available(self.k)); } @@ -391,12 +401,30 @@ impl Assignment for MockProver { .or_default(); } + let assigned = CellValue::Assigned(to().into_field().evaluate().assign()?); *self .advice .get_mut(column.index()) .and_then(|v| v.get_mut(row)) - .ok_or(Error::BoundsFailure)? = - CellValue::Assigned(to().into_field().evaluate().assign()?); + .ok_or(Error::BoundsFailure)? = assigned; + + #[cfg(feature = "phase-check")] + if false && self.current_phase.0 > column.column_type().phase.0 { + // Some circuits assign cells more than one times with different values + // So this check sometimes can be false alarm + if !self.advice_prev.is_empty() { + if self.advice_prev[column.index()][row] != assigned { + panic!("not same new {assigned:?} old {:?}, column idx {} row {} cur phase {:?} col phase {:?} region {:?}", + self.advice_prev[column.index()][row], + column.index(), + row, + self.current_phase, + column.column_type().phase, + self.current_region + ) + } + } + } Ok(()) } @@ -470,7 +498,10 @@ impl Assignment for MockProver { } fn get_challenge(&self, challenge: Challenge) -> circuit::Value { - circuit::Value::known(self.challenges[challenge.index()]) + match self.challenges.get(challenge.index()) { + None => circuit::Value::unknown(), + Some(v) => circuit::Value::known(*v), + } } fn push_namespace(&mut self, _: N) @@ -541,7 +572,7 @@ impl MockProver { let constants = cs.constants.clone(); // Use hash chain to derive deterministic challenges for testing - let challenges = { + let challenges: Vec = { let mut hash: [u8; 64] = blake2b(b"Halo2-MockProver").as_bytes().try_into().unwrap(); iter::repeat_with(|| { hash = blake2b(&hash).as_bytes().try_into().unwrap(); @@ -551,6 +582,63 @@ impl MockProver { .collect() }; + #[cfg(feature = "phase-check")] + { + // check1: phase1 should not assign expr including phase2 challenges + // check2: phase2 assigns same phase1 columns with phase1 + let mut cur_challenges: Vec = Vec::new(); + let mut last_advice: Vec>> = Vec::new(); + for current_phase in cs.phases() { + let mut prover = MockProver { + k, + n: n as u32, + cs: cs.clone(), + regions: vec![], + current_region: None, + fixed: fixed.clone(), + advice: advice.clone(), + advice_prev: last_advice.clone(), + instance: instance.clone(), + selectors: selectors.clone(), + challenges: cur_challenges.clone(), + permutation: permutation.clone(), + usable_rows: 0..usable_rows, + current_phase, + }; + ConcreteCircuit::FloorPlanner::synthesize( + &mut prover, + circuit, + config.clone(), + constants.clone(), + )?; + for (index, phase) in cs.challenge_phase.iter().enumerate() { + if current_phase == *phase { + debug_assert_eq!(cur_challenges.len(), index); + cur_challenges.push(challenges[index].clone()); + } + } + if !last_advice.is_empty() { + let mut err = false; + for (idx, advice_values) in prover.advice.iter().enumerate() { + if cs.advice_column_phase[idx].0 < current_phase.0 { + if advice_values != &last_advice[idx] { + log::error!( + "PHASE ERR column{} not same after phase {:?}", + idx, + current_phase + ); + err = true; + } + } + } + if err { + panic!("wrong phase assignment"); + } + } + last_advice = prover.advice; + } + } + let mut prover = MockProver { k, n: n as u32, @@ -559,13 +647,14 @@ impl MockProver { current_region: None, fixed, advice, + advice_prev: vec![], instance, selectors, - challenges, + challenges: challenges.clone(), permutation, usable_rows: 0..usable_rows, + current_phase: ThirdPhase.to_sealed(), }; - ConcreteCircuit::FloorPlanner::synthesize(&mut prover, circuit, config, constants)?; let (cs, selector_polys) = prover.cs.compress_selectors(prover.selectors.clone()); diff --git a/halo2_proofs/src/dev/failure.rs b/halo2_proofs/src/dev/failure.rs index c3c7ab93a9..e20eb7058b 100644 --- a/halo2_proofs/src/dev/failure.rs +++ b/halo2_proofs/src/dev/failure.rs @@ -90,6 +90,9 @@ impl FailureLocation { .iter() .enumerate() .find(|(_, r)| { + if r.rows.is_none() { + return false; + } let (start, end) = r.rows.unwrap(); // We match the region if any input columns overlap, rather than all of // them, because matching complex selector columns is hard. As long as diff --git a/halo2_proofs/src/helpers.rs b/halo2_proofs/src/helpers.rs index 297fd7b9ca..aef72b8338 100644 --- a/halo2_proofs/src/helpers.rs +++ b/halo2_proofs/src/helpers.rs @@ -1,7 +1,8 @@ +use ff::Field; +use halo2curves::{CurveAffine, FieldExt}; +use num_bigint::BigUint; use std::io; -use halo2curves::CurveAffine; - pub(crate) trait CurveRead: CurveAffine { /// Reads a compressed element from the buffer and attempts to parse it /// using `from_bytes`. @@ -14,3 +15,49 @@ pub(crate) trait CurveRead: CurveAffine { } impl CurveRead for C {} + +pub fn field_to_bn(f: &F) -> BigUint { + BigUint::from_bytes_le(f.to_repr().as_ref()) +} + +/// Input a big integer `bn`, compute a field element `f` +/// such that `f == bn % F::MODULUS`. +pub fn bn_to_field(bn: &BigUint) -> F { + let mut buf = bn.to_bytes_le(); + buf.resize(64, 0u8); + + let mut buf_array = [0u8; 64]; + buf_array.copy_from_slice(buf.as_ref()); + F::from_bytes_wide(&buf_array) +} + +/// Input a base field element `b`, output a scalar field +/// element `s` s.t. `s == b % ScalarField::MODULUS` +pub(crate) fn base_to_scalar(base: &C::Base) -> C::Scalar { + let bn = field_to_bn(base); + // bn_to_field will perform a mod reduction + bn_to_field(&bn) +} + +#[cfg(test)] +mod test { + use super::*; + use halo2curves::bn256::{Fq, G1Affine}; + use rand_core::OsRng; + #[test] + fn test_conversion() { + // random numbers + for _ in 0..100 { + let b = Fq::random(OsRng); + let bi = field_to_bn(&b); + let b_rec = bn_to_field(&bi); + assert_eq!(b, b_rec); + + let s = base_to_scalar::(&b); + let si = field_to_bn(&s); + // TODO: fixme -- this test has a small probability to fail + // because |base field| > |scalar field| + assert_eq!(si, bi); + } + } +} diff --git a/halo2_proofs/src/lib.rs b/halo2_proofs/src/lib.rs index c84e482675..b4bdcd5baf 100644 --- a/halo2_proofs/src/lib.rs +++ b/halo2_proofs/src/lib.rs @@ -19,7 +19,6 @@ )] #![deny(broken_intra_doc_links)] #![deny(missing_debug_implementations)] -#![deny(missing_docs)] #![deny(unsafe_code)] // Remove this once we update pasta_curves #![allow(unused_imports)] diff --git a/halo2_proofs/src/plonk.rs b/halo2_proofs/src/plonk.rs index f9a6587af6..164d68bcff 100644 --- a/halo2_proofs/src/plonk.rs +++ b/halo2_proofs/src/plonk.rs @@ -7,6 +7,7 @@ use blake2b_simd::Params as Blake2bParams; use group::ff::Field; +use halo2curves::pairing::Engine; use crate::arithmetic::{CurveAffine, FieldExt}; use crate::helpers::CurveRead; @@ -53,6 +54,37 @@ pub struct VerifyingKey { } impl VerifyingKey { + /// Writes a verifying key to a buffer. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + for commitment in &self.fixed_commitments { + writer.write_all(commitment.to_bytes().as_ref())?; + } + self.permutation.write(writer)?; + + Ok(()) + } + + /// Reads a verification key from a buffer. + pub fn read< + 'param, + R: io::Read, + ConcreteCircuit: Circuit, + E: Engine, + P: Params<'param, C>, + >( + reader: &mut R, + params: &P, + ) -> io::Result { + let (domain, cs, _) = keygen::create_domain::(params.k()); + + let fixed_commitments: Vec<_> = (0..cs.num_fixed_columns) + .map(|_| C::read(reader)) + .collect::>()?; + + let permutation = permutation::VerifyingKey::read(reader, &cs.permutation)?; + Ok(Self::from_parts(domain, fixed_commitments, permutation, cs)) + } + fn from_parts( domain: EvaluationDomain, fixed_commitments: Vec, diff --git a/halo2_proofs/src/plonk/circuit.rs b/halo2_proofs/src/plonk/circuit.rs index 7ad8d8b8e1..e94787b619 100644 --- a/halo2_proofs/src/plonk/circuit.rs +++ b/halo2_proofs/src/plonk/circuit.rs @@ -24,7 +24,7 @@ pub trait ColumnType: /// A column with an index and type #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub struct Column { - index: usize, + pub index: usize, column_type: C, } @@ -66,7 +66,7 @@ impl PartialOrd for Column { pub(crate) mod sealed { /// Phase of advice column #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] - pub struct Phase(pub(super) u8); + pub struct Phase(pub(crate) u8); impl Phase { pub fn prev(&self) -> Option { @@ -398,6 +398,10 @@ pub struct FixedQuery { } impl FixedQuery { + /// Index + pub fn index(&self) -> usize { + self.index + } /// Column index pub fn column_index(&self) -> usize { self.column_index @@ -423,6 +427,10 @@ pub struct AdviceQuery { } impl AdviceQuery { + /// Index + pub fn index(&self) -> usize { + self.index + } /// Column index pub fn column_index(&self) -> usize { self.column_index @@ -451,6 +459,10 @@ pub struct InstanceQuery { } impl InstanceQuery { + /// Index + pub fn index(&self) -> usize { + self.index + } /// Column index pub fn column_index(&self) -> usize { self.column_index @@ -1278,7 +1290,7 @@ impl>, Iter: IntoIterator> IntoIterato pub struct Gate { name: &'static str, constraint_names: Vec<&'static str>, - polys: Vec>, + pub polys: Vec>, /// We track queried selectors separately from other cells, so that we can use them to /// trigger debug checks on gates. queried_selectors: Vec, @@ -1312,37 +1324,36 @@ impl Gate { /// permutation arrangements. #[derive(Debug, Clone)] pub struct ConstraintSystem { - pub(crate) num_fixed_columns: usize, - pub(crate) num_advice_columns: usize, - pub(crate) num_instance_columns: usize, - pub(crate) num_selectors: usize, + pub num_fixed_columns: usize, + pub num_advice_columns: usize, + pub num_instance_columns: usize, + pub num_selectors: usize, pub(crate) num_challenges: usize, /// Contains the phase for each advice column. Should have same length as num_advice_columns. - pub(crate) advice_column_phase: Vec, + pub advice_column_phase: Vec, /// Contains the phase for each challenge. Should have same length as num_challenges. - pub(crate) challenge_phase: Vec, + pub challenge_phase: Vec, /// This is a cached vector that maps virtual selectors to the concrete /// fixed column that they were compressed into. This is just used by dev /// tooling right now. pub(crate) selector_map: Vec>, - - pub(crate) gates: Vec>, - pub(crate) advice_queries: Vec<(Column, Rotation)>, + pub gates: Vec>, + pub advice_queries: Vec<(Column, Rotation)>, // Contains an integer for each advice column // identifying how many distinct queries it has // so far; should be same length as num_advice_columns. num_advice_queries: Vec, - pub(crate) instance_queries: Vec<(Column, Rotation)>, - pub(crate) fixed_queries: Vec<(Column, Rotation)>, + pub instance_queries: Vec<(Column, Rotation)>, + pub fixed_queries: Vec<(Column, Rotation)>, // Permutation argument for performing equality constraints - pub(crate) permutation: permutation::Argument, + pub permutation: permutation::Argument, // Vector of lookup arguments, where each corresponds to a sequence of // input expressions and a sequence of table expressions involved in the lookup. - pub(crate) lookups: Vec>, + pub lookups: Vec>, // Vector of fixed columns, which can be used to store constant values // that are copied into advice columns. @@ -1613,7 +1624,7 @@ impl ConstraintSystem { panic!("get_instance_query_index called for non-existent query"); } - pub(crate) fn get_any_query_index(&self, column: Column, at: Rotation) -> usize { + pub fn get_any_query_index(&self, column: Column, at: Rotation) -> usize { match column.column_type() { Any::Advice(_) => { self.get_advice_query_index(Column::::try_from(column).unwrap(), at) @@ -1888,7 +1899,16 @@ impl ConstraintSystem { }); } - pub(crate) fn phases(&self) -> impl Iterator { + /// .. + pub fn max_phase(&self) -> u8 { + self.advice_column_phase + .iter() + .max() + .map(|phase| phase.0) + .unwrap_or_default() + } + + pub fn phases(&self) -> impl Iterator { let max_phase = self .advice_column_phase .iter() diff --git a/halo2_proofs/src/plonk/keygen.rs b/halo2_proofs/src/plonk/keygen.rs index 39cef34c0f..e18d9c62d2 100644 --- a/halo2_proofs/src/plonk/keygen.rs +++ b/halo2_proofs/src/plonk/keygen.rs @@ -249,7 +249,20 @@ where )) } -/// Generate a `ProvingKey` from a `VerifyingKey` and an instance of `Circuit`. +/// Generate a `ProvingKey` from an instance of `Circuit`. +pub fn keygen_pk2<'params, C, P, ConcreteCircuit>( + params: &P, + circuit: &ConcreteCircuit, +) -> Result, Error> +where + C: CurveAffine, + P: Params<'params, C>, + ConcreteCircuit: Circuit, +{ + keygen_pk_impl(params, None, circuit) +} + +/// Generate a `ProvingKey` from a `VerifyingKey` and an instance of `Circuit` pub fn keygen_pk<'params, C, P, ConcreteCircuit>( params: &P, vk: VerifyingKey, @@ -260,10 +273,21 @@ where P: Params<'params, C>, ConcreteCircuit: Circuit, { - let mut cs = ConstraintSystem::default(); - let config = ConcreteCircuit::configure(&mut cs); + keygen_pk_impl(params, Some(vk), circuit) +} - let cs = cs; +/// Generate a `ProvingKey` from a `VerifyingKey` and an instance of `Circuit`. +pub fn keygen_pk_impl<'params, C, P, ConcreteCircuit>( + params: &P, + vk: Option>, + circuit: &ConcreteCircuit, +) -> Result, Error> +where + C: CurveAffine, + P: Params<'params, C>, + ConcreteCircuit: Circuit, +{ + let (domain, cs, config) = create_domain::(params.k()); if (params.n() as usize) < cs.minimum_rows() { return Err(Error::not_enough_rows_available(params.k())); @@ -271,7 +295,7 @@ where let mut assembly: Assembly = Assembly { k: params.k(), - fixed: vec![vk.domain.empty_lagrange_assigned(); cs.num_fixed_columns], + fixed: vec![domain.empty_lagrange_assigned(); cs.num_fixed_columns], permutation: permutation::keygen::Assembly::new(params.n() as usize, &cs.permutation), selectors: vec![vec![false; params.n() as usize]; cs.num_selectors], usable_rows: 0..params.n() as usize - (cs.blinding_factors() + 1), @@ -291,9 +315,24 @@ where fixed.extend( selector_polys .into_iter() - .map(|poly| vk.domain.lagrange_from_vec(poly)), + .map(|poly| domain.lagrange_from_vec(poly)), ); + let vk = vk.unwrap_or_else(|| { + let permutation_vk = + assembly + .permutation + .clone() + .build_vk(params, &domain, &cs.permutation); + + let fixed_commitments = fixed + .iter() + .map(|poly| params.commit_lagrange(poly, Blind::default()).to_affine()) + .collect(); + + VerifyingKey::from_parts(domain, fixed_commitments, permutation_vk, cs.clone()) + }); + let fixed_polys: Vec<_> = fixed .iter() .map(|poly| vk.domain.lagrange_to_coeff(poly.clone())) diff --git a/halo2_proofs/src/plonk/lookup.rs b/halo2_proofs/src/plonk/lookup.rs index 68cda75d37..e0d0db5c0c 100644 --- a/halo2_proofs/src/plonk/lookup.rs +++ b/halo2_proofs/src/plonk/lookup.rs @@ -7,9 +7,9 @@ pub(crate) mod verifier; #[derive(Clone)] pub struct Argument { - pub(crate) name: &'static str, - pub(crate) input_expressions: Vec>, - pub(crate) table_expressions: Vec>, + pub name: &'static str, + pub input_expressions: Vec>, + pub table_expressions: Vec>, } impl Debug for Argument { diff --git a/halo2_proofs/src/plonk/lookup/verifier.rs b/halo2_proofs/src/plonk/lookup/verifier.rs index 88041c29b3..add4e592c9 100644 --- a/halo2_proofs/src/plonk/lookup/verifier.rs +++ b/halo2_proofs/src/plonk/lookup/verifier.rs @@ -12,27 +12,30 @@ use crate::{ }; use ff::Field; +#[derive(Debug)] pub struct PermutationCommitments { - permuted_input_commitment: C, - permuted_table_commitment: C, + pub permuted_input_commitment: C, + pub permuted_table_commitment: C, } +#[derive(Debug)] pub struct Committed { - permuted: PermutationCommitments, - product_commitment: C, + pub permuted: PermutationCommitments, + pub product_commitment: C, } +#[derive(Debug)] pub struct Evaluated { - committed: Committed, - product_eval: C::Scalar, - product_next_eval: C::Scalar, - permuted_input_eval: C::Scalar, - permuted_input_inv_eval: C::Scalar, - permuted_table_eval: C::Scalar, + pub committed: Committed, + pub product_eval: C::Scalar, + pub product_next_eval: C::Scalar, + pub permuted_input_eval: C::Scalar, + pub permuted_input_inv_eval: C::Scalar, + pub permuted_table_eval: C::Scalar, } impl Argument { - pub(in crate::plonk) fn read_permuted_commitments< + pub fn read_permuted_commitments< C: CurveAffine, E: EncodedChallenge, T: TranscriptRead, @@ -51,10 +54,7 @@ impl Argument { } impl PermutationCommitments { - pub(in crate::plonk) fn read_product_commitment< - E: EncodedChallenge, - T: TranscriptRead, - >( + pub fn read_product_commitment, T: TranscriptRead>( self, transcript: &mut T, ) -> Result, Error> { @@ -68,7 +68,7 @@ impl PermutationCommitments { } impl Committed { - pub(crate) fn evaluate, T: TranscriptRead>( + pub fn evaluate, T: TranscriptRead>( self, transcript: &mut T, ) -> Result, Error> { diff --git a/halo2_proofs/src/plonk/permutation.rs b/halo2_proofs/src/plonk/permutation.rs index 26a4d805d6..765a1cf975 100644 --- a/halo2_proofs/src/plonk/permutation.rs +++ b/halo2_proofs/src/plonk/permutation.rs @@ -15,7 +15,7 @@ use std::io; #[derive(Debug, Clone)] pub struct Argument { /// A sequence of columns involved in the argument. - pub(super) columns: Vec>, + pub columns: Vec>, } impl Argument { @@ -73,9 +73,9 @@ impl Argument { } /// The verifying key for a single permutation argument. -#[derive(Clone, Debug)] +#[derive(Debug, Clone)] pub struct VerifyingKey { - commitments: Vec, + pub commitments: Vec, } impl VerifyingKey { @@ -85,6 +85,23 @@ impl VerifyingKey { } } +impl VerifyingKey { + pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { + for commitment in &self.commitments { + writer.write_all(commitment.to_bytes().as_ref())?; + } + + Ok(()) + } + + pub(crate) fn read(reader: &mut R, argument: &Argument) -> io::Result { + let commitments = (0..argument.columns.len()) + .map(|_| C::read(reader)) + .collect::, _>>()?; + Ok(VerifyingKey { commitments }) + } +} + /// The proving key for a single permutation argument. #[derive(Clone, Debug)] pub(crate) struct ProvingKey { diff --git a/halo2_proofs/src/plonk/permutation/keygen.rs b/halo2_proofs/src/plonk/permutation/keygen.rs index cdb8cc02f9..3b2552f64f 100644 --- a/halo2_proofs/src/plonk/permutation/keygen.rs +++ b/halo2_proofs/src/plonk/permutation/keygen.rs @@ -3,7 +3,7 @@ use group::Curve; use super::{Argument, ProvingKey, VerifyingKey}; use crate::{ - arithmetic::{CurveAffine, FieldExt}, + arithmetic::{parallelize, CurveAffine, FieldExt}, plonk::{Any, Column, Error}, poly::{ commitment::{Blind, CommitmentScheme, Params}, @@ -11,7 +11,7 @@ use crate::{ }, }; -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) struct Assembly { columns: Vec>, pub(crate) mapping: Vec>, @@ -104,13 +104,16 @@ impl Assembly { p: &Argument, ) -> VerifyingKey { // Compute [omega^0, omega^1, ..., omega^{params.n - 1}] - let mut omega_powers = Vec::with_capacity(params.n() as usize); + let mut omega_powers = vec![C::Scalar::zero(); params.n() as usize]; { - let mut cur = C::Scalar::one(); - for _ in 0..params.n() { - omega_powers.push(cur); - cur *= &domain.get_omega(); - } + let omega = domain.get_omega(); + parallelize(&mut omega_powers, |o, start| { + let mut cur = omega.pow_vartime(&[start as u64]); + for v in o.iter_mut() { + *v = cur; + cur *= ω + } + }) } // Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m] @@ -157,13 +160,16 @@ impl Assembly { p: &Argument, ) -> ProvingKey { // Compute [omega^0, omega^1, ..., omega^{params.n - 1}] - let mut omega_powers = Vec::with_capacity(params.n() as usize); + let mut omega_powers = vec![C::Scalar::zero(); params.n() as usize]; { - let mut cur = C::Scalar::one(); - for _ in 0..params.n() { - omega_powers.push(cur); - cur *= &domain.get_omega(); - } + let omega = domain.get_omega(); + parallelize(&mut omega_powers, |o, start| { + let mut cur = omega.pow_vartime(&[start as u64]); + for v in o.iter_mut() { + *v = cur; + cur *= ω + } + }) } // Compute [omega_powers * \delta^0, omega_powers * \delta^1, ..., omega_powers * \delta^m] diff --git a/halo2_proofs/src/plonk/permutation/verifier.rs b/halo2_proofs/src/plonk/permutation/verifier.rs index 2e7a707a07..b892d1720d 100644 --- a/halo2_proofs/src/plonk/permutation/verifier.rs +++ b/halo2_proofs/src/plonk/permutation/verifier.rs @@ -10,27 +10,31 @@ use crate::{ transcript::{EncodedChallenge, TranscriptRead}, }; +#[derive(Debug)] pub struct Committed { permutation_product_commitments: Vec, } +#[derive(Debug)] pub struct EvaluatedSet { - permutation_product_commitment: C, - permutation_product_eval: C::Scalar, - permutation_product_next_eval: C::Scalar, - permutation_product_last_eval: Option, + pub permutation_product_commitment: C, + pub permutation_product_eval: C::Scalar, + pub permutation_product_next_eval: C::Scalar, + pub permutation_product_last_eval: Option, } +#[derive(Debug)] pub struct CommonEvaluated { - permutation_evals: Vec, + pub permutation_evals: Vec, } +#[derive(Debug)] pub struct Evaluated { - sets: Vec>, + pub sets: Vec>, } impl Argument { - pub(crate) fn read_product_commitments< + pub fn read_product_commitments< C: CurveAffine, E: EncodedChallenge, T: TranscriptRead, @@ -54,7 +58,7 @@ impl Argument { } impl VerifyingKey { - pub(in crate::plonk) fn evaluate, T: TranscriptRead>( + pub fn evaluate, T: TranscriptRead>( &self, transcript: &mut T, ) -> Result, Error> { @@ -69,7 +73,7 @@ impl VerifyingKey { } impl Committed { - pub(crate) fn evaluate, T: TranscriptRead>( + pub fn evaluate, T: TranscriptRead>( self, transcript: &mut T, ) -> Result, Error> { diff --git a/halo2_proofs/src/plonk/prover.rs b/halo2_proofs/src/plonk/prover.rs index 00025169ab..bd9257c6b3 100644 --- a/halo2_proofs/src/plonk/prover.rs +++ b/halo2_proofs/src/plonk/prover.rs @@ -190,7 +190,7 @@ pub fn create_proof< AR: Into, { // Ignore assignment of advice column in different phase than current one. - if self.current_phase != column.column_type().phase { + if self.current_phase.0 < column.column_type().phase.0 { return Ok(()); } @@ -275,6 +275,9 @@ pub fn create_proof< }; instances.len() ]; + #[cfg(feature = "phase-check")] + let mut advice_assignments = + vec![vec![domain.empty_lagrange_assigned(); meta.num_advice_columns]; instances.len()]; let mut challenges = HashMap::::with_capacity(meta.num_challenges); let unusable_rows_start = params.n() as usize - (meta.blinding_factors() + 1); @@ -292,8 +295,11 @@ pub fn create_proof< }) .collect::>(); - for ((circuit, advice), instances) in - circuits.iter().zip(advice.iter_mut()).zip(instances) + for (circuit_idx, ((circuit, advice), instances)) in circuits + .iter() + .zip(advice.iter_mut()) + .zip(instances) + .enumerate() { let mut witness = WitnessCollection { k: params.k(), @@ -317,6 +323,22 @@ pub fn create_proof< meta.constants.clone(), )?; + #[cfg(feature = "phase-check")] + { + for (idx, advice_col) in witness.advice.iter().enumerate() { + if pk.vk.cs.advice_column_phase[idx].0 < current_phase.0 { + if advice_assignments[circuit_idx][idx].values != advice_col.values { + log::error!( + "advice column {}(at {:?}) changed when {:?}", + idx, + pk.vk.cs.advice_column_phase[idx], + current_phase + ); + } + } + } + } + let mut advice_values = batch_invert_assigned::( witness .advice @@ -324,6 +346,10 @@ pub fn create_proof< .enumerate() .filter_map(|(column_index, advice)| { if column_indices.contains(&column_index) { + #[cfg(feature = "phase-check")] + { + advice_assignments[circuit_idx][column_index] = advice.clone(); + } Some(advice) } else { None @@ -334,9 +360,12 @@ pub fn create_proof< // Add blinding factors to advice columns for advice_values in &mut advice_values { - for cell in &mut advice_values[unusable_rows_start..] { - *cell = Scheme::Scalar::random(&mut rng); - } + //for cell in &mut advice_values[unusable_rows_start..] { + //*cell = C::Scalar::random(&mut rng); + //*cell = C::Scalar::one(); + //} + let idx = advice_values.len() - 1; + advice_values[idx] = Scheme::Scalar::one(); } // Compute commitments to advice column polynomials diff --git a/halo2_proofs/src/plonk/vanishing/prover.rs b/halo2_proofs/src/plonk/vanishing/prover.rs index cc52273b59..d85f2988e4 100644 --- a/halo2_proofs/src/plonk/vanishing/prover.rs +++ b/halo2_proofs/src/plonk/vanishing/prover.rs @@ -34,6 +34,9 @@ pub(in crate::plonk) struct Evaluated { } impl Argument { + /// This commitment scheme commits to a _zero polynomial_, + /// that means our commitment scheme is binding but not hidding. + /// This is fine for schemes that does not require zero-knowledge. pub(in crate::plonk) fn commit< 'params, P: ParamsProver<'params, C>, @@ -41,21 +44,18 @@ impl Argument { R: RngCore, T: TranscriptWrite, >( - params: &P, + _params: &P, domain: &EvaluationDomain, - mut rng: R, + mut _rng: R, transcript: &mut T, ) -> Result, Error> { // Sample a random polynomial of degree n - 1 - let mut random_poly = domain.empty_coeff(); - for coeff in random_poly.iter_mut() { - *coeff = C::Scalar::random(&mut rng); - } + let random_poly = domain.empty_coeff(); // Sample a random blinding factor - let random_blind = Blind(C::Scalar::random(rng)); - - // Commit - let c = params.commit(&random_poly, random_blind).to_affine(); + let random_blind = Blind(C::Scalar::zero()); + let c = C::identity(); + // We write the identity point to the transcript which + // is the commitment of the zero polynomial. transcript.write_point(c)?; Ok(Committed { diff --git a/halo2_proofs/src/poly.rs b/halo2_proofs/src/poly.rs index ecf66a6e0b..8d1d80d2a5 100644 --- a/halo2_proofs/src/poly.rs +++ b/halo2_proofs/src/poly.rs @@ -63,7 +63,7 @@ impl Basis for ExtendedLagrangeCoeff {} /// basis. #[derive(Clone, Debug)] pub struct Polynomial { - values: Vec, + pub(crate) values: Vec, _marker: PhantomData, } diff --git a/halo2_proofs/src/poly/kzg/commitment.rs b/halo2_proofs/src/poly/kzg/commitment.rs index 3e8cce6d09..d7ce1cc02d 100644 --- a/halo2_proofs/src/poly/kzg/commitment.rs +++ b/halo2_proofs/src/poly/kzg/commitment.rs @@ -23,7 +23,7 @@ pub struct ParamsKZG { pub(crate) k: u32, pub(crate) n: u64, pub(crate) g: Vec, - pub(crate) g_lagrange: Vec, + pub g_lagrange: Vec, pub(crate) g2: E::G2Affine, pub(crate) s_g2: E::G2Affine, } @@ -54,6 +54,20 @@ impl ParamsKZG { /// Initializes parameters for the curve, draws toxic secret from given rng. /// MUST NOT be used in production. pub fn setup(k: u32, rng: R) -> Self { + let s = ::random(rng); + Self::unsafe_setup_with_s(k, s) + } + + /// Initializes parameters for the curve, Draws random toxic point inside of the function + /// MUST NOT be used in production + pub fn unsafe_setup(k: u32) -> Self { + let s = E::Scalar::random(OsRng); + Self::unsafe_setup_with_s(k, s) + } + + /// Initializes parameters for the curve, using given random `s` + /// MUST NOT be used in production + pub fn unsafe_setup_with_s(k: u32, s: ::Scalar) -> Self { // Largest root of unity exponent of the Engine is `2^E::Scalar::S`, so we can // only support FFTs of polynomials below degree `2^E::Scalar::S`. assert!(k <= E::Scalar::S); @@ -61,8 +75,6 @@ impl ParamsKZG { // Calculate g = [G1, [s] G1, [s^2] G1, ..., [s^(n-1)] G1] in parallel. let g1 = E::G1Affine::generator(); - let s = ::random(rng); - let mut g_projective = vec![E::G1::group_zero(); n as usize]; parallelize(&mut g_projective, |g, start| { let mut current_g: E::G1 = g1.into(); diff --git a/halo2_proofs/src/poly/kzg/msm.rs b/halo2_proofs/src/poly/kzg/msm.rs index 19754146a0..6cc90a5103 100644 --- a/halo2_proofs/src/poly/kzg/msm.rs +++ b/halo2_proofs/src/poly/kzg/msm.rs @@ -152,15 +152,16 @@ impl<'a, E: MultiMillerLoop + Debug> DualMSM<'a, E> { let s_g2_prepared = E::G2Prepared::from(self.params.s_g2); let n_g2_prepared = E::G2Prepared::from(-self.params.g2); - let left = self.left.eval(); - let right = self.right.eval(); + let left: ::G1Affine = self.left.eval().into(); + let right: ::G1Affine = self.right.eval().into(); - let (term_1, term_2) = ( - (&left.into(), &s_g2_prepared), - (&right.into(), &n_g2_prepared), - ); + let (term_1, term_2) = ((&left, &s_g2_prepared), (&right, &n_g2_prepared)); let terms = &[term_1, term_2]; + log::debug!( + "check pairing: {:?}", + (left, right, self.params.s_g2, -self.params.g2) + ); bool::from( E::multi_miller_loop(&terms[..]) .final_exponentiation() diff --git a/halo2_proofs/src/poly/multiopen.rs b/halo2_proofs/src/poly/multiopen.rs new file mode 100644 index 0000000000..854018e90a --- /dev/null +++ b/halo2_proofs/src/poly/multiopen.rs @@ -0,0 +1,372 @@ +//! This module contains an optimisation of the polynomial commitment opening +//! scheme described in the [Halo][halo] paper. +//! +//! [halo]: https://eprint.iacr.org/2019/1021 + +use super::{commitment::ParamsVerifier, PairMSM}; +use crate::{ + arithmetic::{eval_polynomial, CurveAffine, FieldExt}, + pairing::arithmetic::{MillerLoopResult, MultiMillerLoop}, + poly::{msm::MSM, Coeff, Error, Polynomial}, +}; + +use crate::poly::Rotation; +use ff::Field; +use group::Group; +use rand::RngCore; +use std::{ + collections::{BTreeMap, BTreeSet}, + marker::PhantomData, + thread::AccessError, +}; +use subtle::Choice; + +cfg_if::cfg_if! { + if #[cfg(feature = "shplonk")] { + mod shplonk; + pub use shplonk::*; + } else { + mod gwc; + pub use gwc::*; + } +} + +/// Decider performs final pairing check with given verifier params and two channel linear combination +#[derive(Debug)] +pub struct Decider { + _marker: PhantomData, +} + +impl Decider { + fn prepare(params: &ParamsVerifier) -> (E::G2Prepared, E::G2Prepared) { + let s_g2_prepared = E::G2Prepared::from(params.s_g2); + let n_g2_prepared = E::G2Prepared::from(-params.g2); + (s_g2_prepared, n_g2_prepared) + } + + fn pairing_check(terms: &[(&E::G1Affine, &E::G2Prepared); 2]) -> bool { + bool::from( + E::multi_miller_loop(&terms[..]) + .final_exponentiation() + .is_identity(), + ) + } + + /// Performs final pairing check with given verifier params and two channel linear combination + pub fn verify(params: &ParamsVerifier, msm: PairMSM) -> bool { + let (s_g2, n_g2) = Self::prepare(params); + let (left, right) = msm.eval(); + let (term_1, term_2) = ((&left, &s_g2), (&right, &n_g2)); + Self::pairing_check(&[term_1, term_2]) + } +} + +#[doc(hidden)] +#[derive(Debug, Clone, Copy)] +pub struct ProverQuery<'a, C: CurveAffine> { + /// point at which polynomial is queried + pub point: C::Scalar, + pub rotation: Rotation, + /// coefficients of polynomial + pub poly: &'a Polynomial, +} + +/// A polynomial query at a point +#[derive(Debug, Clone, Copy)] +pub struct VerifierQuery<'r, C: CurveAffine> { + /// point at which polynomial is queried + pub point: C::Scalar, + /// rotation at which polynomial is queried + rotation: Rotation, + /// commitment to polynomial + pub commitment: CommitmentReference<'r, C>, + /// evaluation of polynomial at query point + pub eval: C::Scalar, +} + +impl<'r, 'params: 'r, C: CurveAffine> VerifierQuery<'r, C> { + /// Create a new verifier query based on a commitment + pub fn new_commitment( + commitment: &'r C, + point: C::Scalar, + rotation: Rotation, + eval: C::Scalar, + ) -> Self { + VerifierQuery { + point, + rotation, + eval, + commitment: CommitmentReference::Commitment(commitment), + } + } + + /// Create a new verifier query based on a linear combination of commitments + pub fn new_msm(msm: &'r MSM, point: C::Scalar, rotation: Rotation, eval: C::Scalar) -> Self { + VerifierQuery { + point, + rotation, + eval, + commitment: CommitmentReference::MSM(msm), + } + } +} + +#[derive(Copy, Clone, Debug)] +pub enum CommitmentReference<'r, C: CurveAffine> { + Commitment(&'r C), + MSM(&'r MSM), +} + +impl<'r, 'params: 'r, C: CurveAffine> PartialEq for CommitmentReference<'r, C> { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (&CommitmentReference::Commitment(a), &CommitmentReference::Commitment(b)) => { + std::ptr::eq(a, b) + } + (&CommitmentReference::MSM(a), &CommitmentReference::MSM(b)) => std::ptr::eq(a, b), + _ => false, + } + } +} + +trait Query: Sized + Clone { + type Commitment: PartialEq + Clone; + + fn get_rotation(&self) -> Rotation; + fn get_point(&self) -> F; + fn get_eval(&self) -> F; + fn get_commitment(&self) -> Self::Commitment; +} + +#[cfg(test)] +mod tests { + + use crate::arithmetic::{eval_polynomial, FieldExt}; + use crate::pairing::bn256::{Bn256, Fr, G1Affine}; + use crate::poly::{ + commitment::{Params, ParamsVerifier}, + multiopen::{create_proof, verify_proof, Decider, ProverQuery, Query, VerifierQuery}, + Coeff, Polynomial, Rotation, + }; + use crate::transcript::{ + Blake2bRead, Blake2bWrite, Challenge255, ChallengeScalar, Transcript, TranscriptRead, + TranscriptWrite, + }; + + use ff::Field; + use rand::RngCore; + use rand_core::OsRng; + use std::collections::BTreeSet; + use std::marker::PhantomData; + + fn rand_poly(n: usize, mut rng: impl RngCore) -> Polynomial { + Polynomial { + values: (0..n).into_iter().map(|_| Fr::random(&mut rng)).collect(), + _marker: PhantomData, + } + } + + #[test] + fn test_roundtrip() { + use ff::Field; + use group::Curve; + use rand_core::OsRng; + + use super::*; + use crate::arithmetic::{eval_polynomial, FieldExt}; + use crate::poly::{commitment::Params, EvaluationDomain}; + use crate::transcript::Challenge255; + + use pairing::bn256::{Bn256, Fr as Fp, G1Affine}; + + const K: u32 = 4; + + let params: Params = Params::::unsafe_setup::(K); + let params_verifier: ParamsVerifier = params.verifier(0).unwrap(); + + let domain = EvaluationDomain::new(1, K); + let rng = OsRng; + + let mut ax = domain.empty_coeff(); + for (i, a) in ax.iter_mut().enumerate() { + *a = Fp::from(10 + i as u64); + } + + let mut bx = domain.empty_coeff(); + for (i, a) in bx.iter_mut().enumerate() { + *a = Fp::from(100 + i as u64); + } + + let mut cx = domain.empty_coeff(); + for (i, a) in cx.iter_mut().enumerate() { + *a = Fp::from(100 + i as u64); + } + + let a = params.commit(&ax).to_affine(); + let b = params.commit(&bx).to_affine(); + let c = params.commit(&cx).to_affine(); + + let cur = Rotation::cur(); + let next = Rotation::next(); + let x = Fp::random(rng); + let y = domain.rotate_omega(x, next); + let avx = eval_polynomial(&ax, x); + let bvx = eval_polynomial(&bx, x); + let cvy = eval_polynomial(&cx, y); + + let mut transcript = crate::transcript::Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); + create_proof( + ¶ms, + &mut transcript, + std::iter::empty() + .chain(Some(ProverQuery { + point: x, + rotation: cur, + poly: &ax, + })) + .chain(Some(ProverQuery { + point: x, + rotation: cur, + poly: &bx, + })) + .chain(Some(ProverQuery { + point: y, + rotation: Rotation::next(), + poly: &cx, + })), + ) + .unwrap(); + let proof = transcript.finalize(); + + { + let mut proof = &proof[..]; + let mut transcript = + crate::transcript::Blake2bRead::<_, _, Challenge255<_>>::init(&mut proof); + + let pair = verify_proof( + ¶ms_verifier, + &mut transcript, + std::iter::empty() + .chain(Some(VerifierQuery::new_commitment(&a, x, cur, avx))) + .chain(Some(VerifierQuery::new_commitment(&b, x, cur, avx))) // NB: wrong! + .chain(Some(VerifierQuery::new_commitment(&c, y, next, cvy))), + ) + .unwrap(); + + // Should fail. + assert!(!Decider::verify(¶ms_verifier, pair)); + } + + { + let mut proof = &proof[..]; + + let mut transcript = + crate::transcript::Blake2bRead::<_, _, Challenge255<_>>::init(&mut proof); + + let guard = verify_proof( + ¶ms_verifier, + &mut transcript, + std::iter::empty() + .chain(Some(VerifierQuery::new_commitment(&a, x, cur, avx))) + .chain(Some(VerifierQuery::new_commitment(&b, x, cur, bvx))) + .chain(Some(VerifierQuery::new_commitment(&c, y, next, cvy))), + ) + .unwrap(); + + // Should succeed. + assert!(Decider::verify(¶ms_verifier, guard)); + } + } + + #[test] + fn test_multiopen() { + const K: u32 = 3; + + let params = Params::::unsafe_setup::(K); + let params_verifier: ParamsVerifier = params.verifier(0).unwrap(); + + let rotation_sets_init = vec![ + vec![1i32, 2, 3], + vec![2, 3, 4], + vec![2, 3, 4], + vec![4, 5, 6, 7], + vec![8], + vec![9], + vec![10, 11], + vec![10, 11], + vec![10, 11], + ]; + let commitment_per_set: Vec = rotation_sets_init + .iter() + .enumerate() + .map(|(i, _)| i) + .collect(); + let rotation_sets: Vec> = rotation_sets_init + .into_iter() + .map(|rot_set| { + rot_set + .into_iter() + .map(|i| (Rotation(i), Fr::from(i as u64))) + .collect() + }) + .collect(); + + let mut prover_queries = vec![]; + let mut verifier_queries = vec![]; + + #[allow(clippy::type_complexity)] + let (polynomials, commitments): ( + Vec>>, + Vec>, + ) = rotation_sets + .iter() + .enumerate() + .map(|(i, _)| { + (0..commitment_per_set[i]) + .map(|_| { + let poly = rand_poly(params.n as usize, OsRng); + let commitment: G1Affine = params.commit(&poly).into(); + (poly, commitment) + }) + .unzip() + }) + .unzip(); + + for (i, rotation_set) in rotation_sets.iter().enumerate() { + for (rot, point) in rotation_set.iter() { + for j in 0..commitment_per_set[i] { + { + let query: ProverQuery = ProverQuery { + poly: &polynomials[i][j], + point: *point, + rotation: *rot, + }; + prover_queries.push(query); + } + + { + let poly = &polynomials[i][j]; + let commitment: &G1Affine = &commitments[i][j]; + let eval = eval_polynomial(poly, *point); + let query = VerifierQuery::new_commitment(commitment, *point, *rot, eval); + verifier_queries.push(query); + } + } + } + } + + // prover + let proof = { + let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); + create_proof(¶ms, &mut transcript, prover_queries).unwrap(); + transcript.finalize() + }; + + // verifier + { + let mut transcript = Blake2bRead::<_, G1Affine, Challenge255<_>>::init(&proof[..]); + let pair = verify_proof(¶ms_verifier, &mut transcript, verifier_queries).unwrap(); + assert!(Decider::verify(¶ms_verifier, pair)); + } + } +} diff --git a/halo2_proofs/src/transcript.rs b/halo2_proofs/src/transcript.rs deleted file mode 100644 index 5262f3c1c7..0000000000 --- a/halo2_proofs/src/transcript.rs +++ /dev/null @@ -1,329 +0,0 @@ -//! This module contains utilities and traits for dealing with Fiat-Shamir -//! transcripts. - -use blake2b_simd::{Params as Blake2bParams, State as Blake2bState}; -use group::ff::PrimeField; -use std::convert::TryInto; - -use halo2curves::{Coordinates, CurveAffine, FieldExt}; - -use std::io::{self, Read, Write}; -use std::marker::PhantomData; - -/// Prefix to a prover's message soliciting a challenge -const BLAKE2B_PREFIX_CHALLENGE: u8 = 0; - -/// Prefix to a prover's message containing a curve point -const BLAKE2B_PREFIX_POINT: u8 = 1; - -/// Prefix to a prover's message containing a scalar -const BLAKE2B_PREFIX_SCALAR: u8 = 2; - -/// Generic transcript view (from either the prover or verifier's perspective) -pub trait Transcript> { - /// Squeeze an encoded verifier challenge from the transcript. - fn squeeze_challenge(&mut self) -> E; - - /// Squeeze a typed challenge (in the scalar field) from the transcript. - fn squeeze_challenge_scalar(&mut self) -> ChallengeScalar { - ChallengeScalar { - inner: self.squeeze_challenge().get_scalar(), - _marker: PhantomData, - } - } - - /// Writing the point to the transcript without writing it to the proof, - /// treating it as a common input. - fn common_point(&mut self, point: C) -> io::Result<()>; - - /// Writing the scalar to the transcript without writing it to the proof, - /// treating it as a common input. - fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()>; -} - -/// Transcript view from the perspective of a verifier that has access to an -/// input stream of data from the prover to the verifier. -pub trait TranscriptRead>: Transcript { - /// Read a curve point from the prover. - fn read_point(&mut self) -> io::Result; - - /// Read a curve scalar from the prover. - fn read_scalar(&mut self) -> io::Result; -} - -/// Transcript view from the perspective of a prover that has access to an -/// output stream of messages from the prover to the verifier. -pub trait TranscriptWrite>: Transcript { - /// Write a curve point to the proof and the transcript. - fn write_point(&mut self, point: C) -> io::Result<()>; - - /// Write a scalar to the proof and the transcript. - fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()>; -} - -/// Initializes transcript at verifier side. -pub trait TranscriptReadBuffer>: - TranscriptRead -{ - /// Initialize a transcript given an input buffer. - fn init(reader: R) -> Self; -} - -/// Manages begining and finising of transcript pipeline. -pub trait TranscriptWriterBuffer>: - TranscriptWrite -{ - /// Initialize a transcript given an output buffer. - fn init(writer: W) -> Self; - - /// Conclude the interaction and return the output buffer (writer). - fn finalize(self) -> W; -} - -/// We will replace BLAKE2b with an algebraic hash function in a later version. -#[derive(Debug, Clone)] -pub struct Blake2bRead> { - state: Blake2bState, - reader: R, - _marker: PhantomData<(C, E)>, -} - -impl TranscriptReadBuffer> - for Blake2bRead> -{ - /// Initialize a transcript given an input buffer. - fn init(reader: R) -> Self { - Blake2bRead { - state: Blake2bParams::new() - .hash_length(64) - .personal(b"Halo2-Transcript") - .to_state(), - reader, - _marker: PhantomData, - } - } -} - -impl TranscriptRead> - for Blake2bRead> -{ - fn read_point(&mut self) -> io::Result { - let mut compressed = C::Repr::default(); - self.reader.read_exact(compressed.as_mut())?; - let point: C = Option::from(C::from_bytes(&compressed)).ok_or_else(|| { - io::Error::new(io::ErrorKind::Other, "invalid point encoding in proof") - })?; - self.common_point(point)?; - - Ok(point) - } - - fn read_scalar(&mut self) -> io::Result { - let mut data = ::Repr::default(); - self.reader.read_exact(data.as_mut())?; - let scalar: C::Scalar = Option::from(C::Scalar::from_repr(data)).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "invalid field element encoding in proof", - ) - })?; - self.common_scalar(scalar)?; - - Ok(scalar) - } -} - -impl Transcript> - for Blake2bRead> -{ - fn squeeze_challenge(&mut self) -> Challenge255 { - self.state.update(&[BLAKE2B_PREFIX_CHALLENGE]); - let hasher = self.state.clone(); - let result: [u8; 64] = hasher.finalize().as_bytes().try_into().unwrap(); - Challenge255::::new(&result) - } - - fn common_point(&mut self, point: C) -> io::Result<()> { - self.state.update(&[BLAKE2B_PREFIX_POINT]); - let coords: Coordinates = Option::from(point.coordinates()).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "cannot write points at infinity to the transcript", - ) - })?; - self.state.update(coords.x().to_repr().as_ref()); - self.state.update(coords.y().to_repr().as_ref()); - - Ok(()) - } - - fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { - self.state.update(&[BLAKE2B_PREFIX_SCALAR]); - self.state.update(scalar.to_repr().as_ref()); - - Ok(()) - } -} - -/// We will replace BLAKE2b with an algebraic hash function in a later version. -#[derive(Debug, Clone)] -pub struct Blake2bWrite> { - state: Blake2bState, - writer: W, - _marker: PhantomData<(C, E)>, -} - -impl TranscriptWriterBuffer> - for Blake2bWrite> -{ - fn init(writer: W) -> Self { - Blake2bWrite { - state: Blake2bParams::new() - .hash_length(64) - .personal(b"Halo2-Transcript") - .to_state(), - writer, - _marker: PhantomData, - } - } - - fn finalize(self) -> W { - // TODO: handle outstanding scalars? see issue #138 - self.writer - } -} - -impl TranscriptWrite> - for Blake2bWrite> -{ - fn write_point(&mut self, point: C) -> io::Result<()> { - self.common_point(point)?; - let compressed = point.to_bytes(); - self.writer.write_all(compressed.as_ref()) - } - fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { - self.common_scalar(scalar)?; - let data = scalar.to_repr(); - self.writer.write_all(data.as_ref()) - } -} - -impl Transcript> - for Blake2bWrite> -{ - fn squeeze_challenge(&mut self) -> Challenge255 { - self.state.update(&[BLAKE2B_PREFIX_CHALLENGE]); - let hasher = self.state.clone(); - let result: [u8; 64] = hasher.finalize().as_bytes().try_into().unwrap(); - Challenge255::::new(&result) - } - - fn common_point(&mut self, point: C) -> io::Result<()> { - self.state.update(&[BLAKE2B_PREFIX_POINT]); - let coords: Coordinates = Option::from(point.coordinates()).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "cannot write points at infinity to the transcript", - ) - })?; - self.state.update(coords.x().to_repr().as_ref()); - self.state.update(coords.y().to_repr().as_ref()); - - Ok(()) - } - - fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { - self.state.update(&[BLAKE2B_PREFIX_SCALAR]); - self.state.update(scalar.to_repr().as_ref()); - - Ok(()) - } -} - -/// The scalar representation of a verifier challenge. -/// -/// The `Type` type can be used to scope the challenge to a specific context, or -/// set to `()` if no context is required. -#[derive(Copy, Clone, Debug)] -pub struct ChallengeScalar { - inner: C::Scalar, - _marker: PhantomData, -} - -impl std::ops::Deref for ChallengeScalar { - type Target = C::Scalar; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// `EncodedChallenge` defines a challenge encoding with a [`Self::Input`] -/// that is used to derive the challenge encoding and `get_challenge` obtains -/// the _real_ `C::Scalar` that the challenge encoding represents. -pub trait EncodedChallenge { - /// The Input type used to derive the challenge encoding. For example, - /// an input from the Poseidon hash would be a base field element; - /// an input from the Blake2b hash would be a [u8; 64]. - type Input; - - /// Get an encoded challenge from a given input challenge. - fn new(challenge_input: &Self::Input) -> Self; - - /// Get a scalar field element from an encoded challenge. - fn get_scalar(&self) -> C::Scalar; - - /// Cast an encoded challenge as a typed `ChallengeScalar`. - fn as_challenge_scalar(&self) -> ChallengeScalar { - ChallengeScalar { - inner: self.get_scalar(), - _marker: PhantomData, - } - } -} - -/// A 255-bit challenge. -#[derive(Copy, Clone, Debug)] -pub struct Challenge255([u8; 32], PhantomData); - -impl std::ops::Deref for Challenge255 { - type Target = [u8; 32]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl EncodedChallenge for Challenge255 { - type Input = [u8; 64]; - - fn new(challenge_input: &[u8; 64]) -> Self { - Challenge255( - C::Scalar::from_bytes_wide(challenge_input) - .to_repr() - .as_ref() - .try_into() - .expect("Scalar fits into 256 bits"), - PhantomData, - ) - } - fn get_scalar(&self) -> C::Scalar { - let mut repr = ::Repr::default(); - repr.as_mut().copy_from_slice(&self.0); - C::Scalar::from_repr(repr).unwrap() - } -} - -pub(crate) fn read_n_points, T: TranscriptRead>( - transcript: &mut T, - n: usize, -) -> io::Result> { - (0..n).map(|_| transcript.read_point()).collect() -} - -pub(crate) fn read_n_scalars, T: TranscriptRead>( - transcript: &mut T, - n: usize, -) -> io::Result> { - (0..n).map(|_| transcript.read_scalar()).collect() -} diff --git a/halo2_proofs/src/transcript/blake2b.rs b/halo2_proofs/src/transcript/blake2b.rs new file mode 100644 index 0000000000..99954e6efd --- /dev/null +++ b/halo2_proofs/src/transcript/blake2b.rs @@ -0,0 +1,202 @@ +use super::{ + Challenge255, EncodedChallenge, Transcript, TranscriptRead, TranscriptReadBuffer, + TranscriptWrite, TranscriptWriterBuffer, +}; +use blake2b_simd::{Params as Blake2bParams, State as Blake2bState}; +use ff::Field; +use group::ff::PrimeField; +use halo2curves::{Coordinates, CurveAffine, FieldExt}; +use num_bigint::BigUint; +use std::convert::TryInto; +use std::io::{self, Read, Write}; +use std::marker::PhantomData; + +/// Prefix to a prover's message soliciting a challenge +const BLAKE2B_PREFIX_CHALLENGE: u8 = 0; + +/// Prefix to a prover's message containing a curve point +const BLAKE2B_PREFIX_POINT: u8 = 1; + +/// Prefix to a prover's message containing a scalar +const BLAKE2B_PREFIX_SCALAR: u8 = 2; + +// ----------------------Blake2bRead + +/// We will replace BLAKE2b with an algebraic hash function in a later version. +#[derive(Debug, Clone)] +pub struct Blake2bRead> { + state: Blake2bState, + reader: R, + _marker: PhantomData<(C, E)>, +} + +impl TranscriptReadBuffer> + for Blake2bRead> +{ + /// Initialize a transcript given an input buffer. + fn init(reader: R) -> Self { + Blake2bRead { + state: Blake2bParams::new() + .hash_length(64) + .personal(b"Halo2-Transcript") + .to_state(), + reader, + _marker: PhantomData, + } + } +} + +impl TranscriptRead> + for Blake2bRead> +{ + fn read_point(&mut self) -> io::Result { + let mut compressed = C::Repr::default(); + self.reader.read_exact(compressed.as_mut())?; + let point: C = match Option::from(C::from_bytes(&compressed)) { + Some(p) => p, + // TODO: check that this is actually safe to push an + // identity point to the transcript + None => C::identity(), + }; + self.common_point(point)?; + + Ok(point) + } + + fn read_scalar(&mut self) -> io::Result { + let mut data = ::Repr::default(); + self.reader.read_exact(data.as_mut())?; + let scalar = match Option::from(C::Scalar::from_repr(data)) { + Some(p) => p, + // TODO: check that this is actually safe to push an + // identity point to the transcript + None => C::Scalar::zero(), + }; + self.common_scalar(scalar)?; + + Ok(scalar) + } +} + +impl Transcript> + for Blake2bRead> +{ + fn squeeze_challenge(&mut self) -> Challenge255 { + self.state.update(&[BLAKE2B_PREFIX_CHALLENGE]); + let hasher = self.state.clone(); + let result: [u8; 64] = hasher.finalize().as_bytes().try_into().unwrap(); + Challenge255::::new(&result) + } + + // This function is slightly modified from PSE's version. + // In PSE's version, an error is returned if the input point is infinity. + // Here we want to be able to absorb infinity point because of the + // randomness we used in the polynomial commitment is 0. + fn common_point(&mut self, point: C) -> io::Result<()> { + self.state.update(&[BLAKE2B_PREFIX_POINT]); + + let tmp: Option> = Option::from(point.coordinates()); + match tmp { + Some(coords) => { + self.state.update(coords.x().to_repr().as_ref()); + self.state.update(coords.y().to_repr().as_ref()); + } + None => { + // Infinity point + self.state.update(C::Base::zero().to_repr().as_ref()); + self.state.update(C::Base::from(5).to_repr().as_ref()); + } + } + Ok(()) + } + + fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + self.state.update(&[BLAKE2B_PREFIX_SCALAR]); + self.state.update(scalar.to_repr().as_ref()); + + Ok(()) + } +} + +/// We will replace BLAKE2b with an algebraic hash function in a later version. +#[derive(Debug, Clone)] +pub struct Blake2bWrite> { + state: Blake2bState, + writer: W, + _marker: PhantomData<(C, E)>, +} + +impl TranscriptWriterBuffer> + for Blake2bWrite> +{ + fn init(writer: W) -> Self { + Blake2bWrite { + state: Blake2bParams::new() + .hash_length(64) + .personal(b"Halo2-Transcript") + .to_state(), + writer, + _marker: PhantomData, + } + } + + fn finalize(self) -> W { + // TODO: handle outstanding scalars? see issue #138 + self.writer + } +} + +impl TranscriptWrite> + for Blake2bWrite> +{ + fn write_point(&mut self, point: C) -> io::Result<()> { + self.common_point(point)?; + let compressed = point.to_bytes(); + self.writer.write_all(compressed.as_ref()) + } + fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + self.common_scalar(scalar)?; + let data = scalar.to_repr(); + self.writer.write_all(data.as_ref()) + } +} + +impl Transcript> + for Blake2bWrite> +{ + fn squeeze_challenge(&mut self) -> Challenge255 { + self.state.update(&[BLAKE2B_PREFIX_CHALLENGE]); + let hasher = self.state.clone(); + let result: [u8; 64] = hasher.finalize().as_bytes().try_into().unwrap(); + Challenge255::::new(&result) + } + + // This function is slightly modified from PSE's version. + // In PSE's version, an error is returned if the input point is infinity. + // Here we want to be able to absorb infinity point because of the + // randomness we used in the polynomial commitment is 0. + fn common_point(&mut self, point: C) -> io::Result<()> { + self.state.update(&[BLAKE2B_PREFIX_POINT]); + let tmp: Option> = Option::from(point.coordinates()); + match tmp { + Some(coords) => { + self.state.update(coords.x().to_repr().as_ref()); + self.state.update(coords.y().to_repr().as_ref()); + } + None => { + // Infinity point + self.state.update(C::Base::zero().to_repr().as_ref()); + self.state.update(C::Base::from(5).to_repr().as_ref()); + } + } + + Ok(()) + } + + fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + self.state.update(&[BLAKE2B_PREFIX_SCALAR]); + self.state.update(scalar.to_repr().as_ref()); + + Ok(()) + } +} diff --git a/halo2_proofs/src/transcript/mod.rs b/halo2_proofs/src/transcript/mod.rs new file mode 100644 index 0000000000..f33cdad523 --- /dev/null +++ b/halo2_proofs/src/transcript/mod.rs @@ -0,0 +1,166 @@ +//! This module contains utilities and traits for dealing with Fiat-Shamir +//! transcripts. + +mod blake2b; +mod poseidon; + +pub use self::poseidon::{PoseidonRead, PoseidonWrite}; +pub use blake2b::{Blake2bRead, Blake2bWrite}; + +use blake2b_simd::{Params as Blake2bParams, State as Blake2bState}; +use ff::Field; +use group::ff::PrimeField; +use halo2curves::{Coordinates, CurveAffine, FieldExt}; +use num_bigint::BigUint; +use std::convert::TryInto; +use std::io::{self, Read, Write}; +use std::marker::PhantomData; + +/// Generic transcript view (from either the prover or verifier's perspective) +pub trait Transcript> { + /// Squeeze an encoded verifier challenge from the transcript. + fn squeeze_challenge(&mut self) -> E; + + /// Squeeze a typed challenge (in the scalar field) from the transcript. + fn squeeze_challenge_scalar(&mut self) -> ChallengeScalar { + ChallengeScalar { + inner: self.squeeze_challenge().get_scalar(), + _marker: PhantomData, + } + } + + /// Writing the point to the transcript without writing it to the proof, + /// treating it as a common input. + fn common_point(&mut self, point: C) -> io::Result<()>; + + /// Writing the scalar to the transcript without writing it to the proof, + /// treating it as a common input. + fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()>; +} + +/// Transcript view from the perspective of a verifier that has access to an +/// input stream of data from the prover to the verifier. +pub trait TranscriptRead>: Transcript { + /// Read a curve point from the prover. + fn read_point(&mut self) -> io::Result; + + /// Read a curve scalar from the prover. + fn read_scalar(&mut self) -> io::Result; +} + +/// Transcript view from the perspective of a prover that has access to an +/// output stream of messages from the prover to the verifier. +pub trait TranscriptWrite>: Transcript { + /// Write a curve point to the proof and the transcript. + fn write_point(&mut self, point: C) -> io::Result<()>; + + /// Write a scalar to the proof and the transcript. + fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()>; +} + +/// Initializes transcript at verifier side. +pub trait TranscriptReadBuffer>: + TranscriptRead +{ + /// Initialize a transcript given an input buffer. + fn init(reader: R) -> Self; +} + +/// Manages begining and finising of transcript pipeline. +pub trait TranscriptWriterBuffer>: + TranscriptWrite +{ + /// Initialize a transcript given an output buffer. + fn init(writer: W) -> Self; + + /// Conclude the interaction and return the output buffer (writer). + fn finalize(self) -> W; +} + +/// The scalar representation of a verifier challenge. +/// +/// The `Type` type can be used to scope the challenge to a specific context, or +/// set to `()` if no context is required. +#[derive(Copy, Clone, Debug)] +pub struct ChallengeScalar { + inner: C::Scalar, + _marker: PhantomData, +} + +impl std::ops::Deref for ChallengeScalar { + type Target = C::Scalar; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +/// `EncodedChallenge` defines a challenge encoding with a [`Self::Input`] +/// that is used to derive the challenge encoding and `get_challenge` obtains +/// the _real_ `C::Scalar` that the challenge encoding represents. +pub trait EncodedChallenge { + /// The Input type used to derive the challenge encoding. For example, + /// an input from the Poseidon hash would be a base field element; + /// an input from the Blake2b hash would be a [u8; 64]. + type Input; + + /// Get an encoded challenge from a given input challenge. + fn new(challenge_input: &Self::Input) -> Self; + + /// Get a scalar field element from an encoded challenge. + fn get_scalar(&self) -> C::Scalar; + + /// Cast an encoded challenge as a typed `ChallengeScalar`. + fn as_challenge_scalar(&self) -> ChallengeScalar { + ChallengeScalar { + inner: self.get_scalar(), + _marker: PhantomData, + } + } +} + +/// A 255-bit challenge. +#[derive(Copy, Clone, Debug)] +pub struct Challenge255([u8; 32], PhantomData); + +impl std::ops::Deref for Challenge255 { + type Target = [u8; 32]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl EncodedChallenge for Challenge255 { + type Input = [u8; 64]; + + fn new(challenge_input: &[u8; 64]) -> Self { + Challenge255( + C::Scalar::from_bytes_wide(challenge_input) + .to_repr() + .as_ref() + .try_into() + .expect("Scalar fits into 256 bits"), + PhantomData, + ) + } + fn get_scalar(&self) -> C::Scalar { + let mut repr = ::Repr::default(); + repr.as_mut().copy_from_slice(&self.0); + C::Scalar::from_repr(repr).unwrap() + } +} +/// TODO +pub fn read_n_points, T: TranscriptRead>( + transcript: &mut T, + n: usize, +) -> io::Result> { + (0..n).map(|_| transcript.read_point()).collect() +} +/// TODO +pub fn read_n_scalars, T: TranscriptRead>( + transcript: &mut T, + n: usize, +) -> io::Result> { + (0..n).map(|_| transcript.read_scalar()).collect() +} diff --git a/halo2_proofs/src/transcript/poseidon.rs b/halo2_proofs/src/transcript/poseidon.rs new file mode 100644 index 0000000000..d9da18a440 --- /dev/null +++ b/halo2_proofs/src/transcript/poseidon.rs @@ -0,0 +1,174 @@ +use super::{Challenge255, EncodedChallenge, Transcript, TranscriptRead, TranscriptWrite}; +use crate::helpers::base_to_scalar; +use ff::Field; +use group::ff::PrimeField; +use halo2curves::{Coordinates, CurveAffine, FieldExt}; +use num_bigint::BigUint; +use poseidon::Poseidon; +use std::convert::TryInto; +use std::io::{self, Read, Write}; +use std::marker::PhantomData; + +const POSEIDON_RATE: usize = 8usize; +const POSEIDON_T: usize = POSEIDON_RATE + 1usize; + +/// TODO +#[derive(Debug, Clone)] +pub struct PoseidonRead> { + state: Poseidon, + reader: R, + _marker: PhantomData<(C, E)>, +} + +/// TODO +impl> PoseidonRead { + /// Initialize a transcript given an input buffer. + pub fn init(reader: R) -> Self { + PoseidonRead { + state: Poseidon::new(8usize, 63usize), + reader, + _marker: PhantomData, + } + } +} + +impl TranscriptRead> + for PoseidonRead> +{ + fn read_point(&mut self) -> io::Result { + let mut compressed = C::Repr::default(); + self.reader.read_exact(compressed.as_mut())?; + let point: C = Option::from(C::from_bytes(&compressed)).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "invalid point encoding in proof for poseidon", + ) + })?; + self.common_point(point)?; + + Ok(point) + } + + fn read_scalar(&mut self) -> io::Result { + let mut data = ::Repr::default(); + self.reader.read_exact(data.as_mut())?; + let scalar: C::Scalar = Option::from(C::Scalar::from_repr(data)).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "invalid field element encoding in proof for poseidon", + ) + })?; + self.common_scalar(scalar)?; + + Ok(scalar) + } +} + +impl Transcript> + for PoseidonRead> +{ + fn squeeze_challenge(&mut self) -> Challenge255 { + //self.state.update(&[PREFIX_SQUEEZE]); + let scalar = self.state.squeeze(); + let mut scalar_bytes = scalar.to_repr().as_ref().to_vec(); + scalar_bytes.resize(64, 0u8); + Challenge255::::new(&scalar_bytes.try_into().unwrap()) + } + + fn common_point(&mut self, point: C) -> io::Result<()> { + //self.state.update(&[PREFIX_POINT]); + let coords: Coordinates = Option::from(point.coordinates()).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "cannot write points at infinity to the transcript", + ) + })?; + let x = coords.x(); + let y = coords.y(); + self.state + .update(&[base_to_scalar::(x), base_to_scalar::(y)]); + + Ok(()) + } + + fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + //self.state.update(&[BLAKE2B_PREFIX_SCALAR]); + self.state.update(&[scalar]); + + Ok(()) + } +} +/// TODO +#[derive(Debug, Clone)] +pub struct PoseidonWrite> { + state: Poseidon, + writer: W, + _marker: PhantomData<(C, E)>, +} + +impl> PoseidonWrite { + /// Initialize a transcript given an output buffer. + pub fn init(writer: W) -> Self { + PoseidonWrite { + state: Poseidon::new(8usize, 63usize), + writer, + _marker: PhantomData, + } + } + + /// Conclude the interaction and return the output buffer (writer). + pub fn finalize(self) -> W { + // TODO: handle outstanding scalars? see issue #138 + self.writer + } +} + +impl TranscriptWrite> + for PoseidonWrite> +{ + fn write_point(&mut self, point: C) -> io::Result<()> { + self.common_point(point)?; + let compressed = point.to_bytes(); + self.writer.write_all(compressed.as_ref()) + } + fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + self.common_scalar(scalar)?; + let data = scalar.to_repr(); + self.writer.write_all(data.as_ref()) + } +} + +impl Transcript> + for PoseidonWrite> +{ + fn squeeze_challenge(&mut self) -> Challenge255 { + //self.state.update(&[PREFIX_SQUEEZE]); + let scalar = self.state.squeeze(); + let mut scalar_bytes = scalar.to_repr().as_ref().to_vec(); + scalar_bytes.resize(64, 0u8); + Challenge255::::new(&scalar_bytes.try_into().unwrap()) + } + + fn common_point(&mut self, point: C) -> io::Result<()> { + //self.state.update(&[PREFIX_POINT]); + let coords: Coordinates = Option::from(point.coordinates()).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "cannot write points at infinity to the transcript", + ) + })?; + let x = coords.x(); + let y = coords.y(); + self.state + .update(&[base_to_scalar::(x), base_to_scalar::(y)]); + + Ok(()) + } + + fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + //self.state.update(&[BLAKE2B_PREFIX_SCALAR]); + self.state.update(&[scalar]); + + Ok(()) + } +} diff --git a/halo2_proofs/tests/plonk_api.rs b/halo2_proofs/tests/plonk_api.rs index af63b5fb30..913762c0da 100644 --- a/halo2_proofs/tests/plonk_api.rs +++ b/halo2_proofs/tests/plonk_api.rs @@ -1020,7 +1020,6 @@ fn plonk_api() { ); } } - test_plonk_api_ipa(); test_plonk_api_gwc(); test_plonk_api_shplonk();