From ab4e36bd7a6084cd1faf9cc4f0a30f2a6cece59b Mon Sep 17 00:00:00 2001 From: Han Date: Wed, 27 Sep 2023 22:48:49 +0800 Subject: [PATCH] Implement `SolidityGenerator` (#2) * feat: implement `SolidityGenerator` to generate vk and verifier separately * refactor: rename `generate` into `render` * feat: implement `render` with test * refactor: add `ConstraintSystemMeta` and `Data` * refactor: better `Evaluator` * refactor: fmt-able * refactor: better `pcs` * refactor: generally * refactor: more `for_loop` * refactor: docs * refactor: cheaper than `snark-verifier` * feat: add ci * refactor: typo * refactor: less generic * refactor: `Word` * feat: allow more instances and add document with example * refactor: avoid patching `halo2_proofs` * refactor: add more comments and reduce `encode_calldata` verbosity * ci: install `svm-rs` with its `Cargo.lock` * feat: handle `solc` related error in `compile_solidity` more properly * docs: add warning on `README.md` --- .github/workflows/ci.yml | 53 +++ .gitignore | 3 + Cargo.toml | 31 ++ README.md | 48 ++- askama.toml | 3 + examples/separately.rs | 242 +++++++++++++ src/codegen.rs | 334 +++++++++++++++++ src/codegen/evaluator.rs | 386 ++++++++++++++++++++ src/codegen/pcs.rs | 592 ++++++++++++++++++++++++++++++ src/codegen/template.rs | 83 +++++ src/codegen/util.rs | 614 ++++++++++++++++++++++++++++++++ src/evm.rs | 217 +++++++++++ src/lib.rs | 21 ++ src/test.rs | 612 +++++++++++++++++++++++++++++++ src/transcript.rs | 192 ++++++++++ templates/Halo2Verifier.sol | 541 ++++++++++++++++++++++++++++ templates/Halo2VerifyingKey.sol | 25 ++ 17 files changed, 3995 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/ci.yml create mode 100644 Cargo.toml create mode 100644 askama.toml create mode 100644 examples/separately.rs create mode 100644 src/codegen.rs create mode 100644 src/codegen/evaluator.rs create mode 100644 src/codegen/pcs.rs create mode 100644 src/codegen/template.rs create mode 100644 src/codegen/util.rs create mode 100644 src/evm.rs create mode 100644 src/lib.rs create mode 100644 src/test.rs create mode 100644 src/transcript.rs create mode 100644 templates/Halo2Verifier.sol create mode 100644 templates/Halo2VerifyingKey.sol diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..03e508c --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,53 @@ +name: CI + +on: + pull_request: + push: + branches: + - main + +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + + - uses: Swatinem/rust-cache@v1 + with: + cache-on-failure: true + + - name: Install solc + run: (hash svm 2>/dev/null || cargo install --locked --git https://github.com/alloy-rs/svm-rs) && svm install 0.8.21 && solc --version + + - name: Run test + run: cargo test --workspace --all-features --all-targets -- --nocapture + + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + profile: minimal + components: rustfmt, clippy + + - uses: Swatinem/rust-cache@v1 + with: + cache-on-failure: true + + - name: Run fmt + run: cargo fmt --all -- --check + + - name: Run clippy + run: cargo clippy --workspace --all-features --all-targets -- -D warnings diff --git a/.gitignore b/.gitignore index 6985cf1..7e85a55 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,6 @@ Cargo.lock # MSVC Windows builds of rustc generate these, which store debugging information *.pdb + +.vscode +generated/ \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..9871a99 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "halo2_solidity_verifier" +version = "0.1.0" +edition = "2021" + +[dependencies] +halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2", tag = "v2023_04_20" } +askama = { version = "0.12.0", features = ["config"], default-features = false } +hex = "0.4.3" +ruint = "1.10.1" +sha3 = "0.10" +itertools = "0.11.0" + +# Remove when `vk.transcript_repr()` is ready for usage. +blake2b_simd = "1" + +# For feature = "evm" +revm = { version = "3.3.0", optional = true } + +[dev-dependencies] +rand = "0.8.5" +revm = "3.3.0" +halo2_maingate = { git = "https://github.com/privacy-scaling-explorations/halo2wrong", tag = "v2023_04_20", package = "maingate" } + +[features] +default = [] +evm = ["dep:revm"] + +[[example]] +name = "separately" +required-features = ["evm"] diff --git a/README.md b/README.md index bf38cd0..2ec93b6 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,46 @@ -# halo2_solidity_verifier -A set of tooling related to halo2 circuits verification inside Solidity contracts +# Halo2 Solidity Verifier + +> ⚠️ This repo has NOT been audited and is NOT intended for a production environment yet. + +Solidity verifier generator for [`halo2`](http://github.com/privacy-scaling-explorations/halo2) proof with KZG polynomial commitment scheme on BN254 + +## Usage + +### Generate verifier and verifying key separately as 2 solidity contracts + +```rust +let generator = SolidityGenerator::new(¶ms, &vk, Bdfg21, num_instances); +let (verifier_solidity, vk_solidity) = generator.render_separately().unwrap(); +``` + +Check [`examples/separately.rs`](./examples/separately.rs) for more details. + +### Generate verifier and verifying key in a single solidity contract + +```rust +let generator = SolidityGenerator::new(¶ms, &vk, Bdfg21, num_instances); +let verifier_solidity = generator.render().unwrap(); +``` + +### Encode proof into calldata to invoke `verifyProof` + +```rust +let calldata = encode_calldata(vk_address, &proof, &instances); +``` + +Note that function selector is already included. + +## Limitations + +- It only allows circuit with **exact 1 instance column** and **no rotated query to this instance column**. +- Option `--via-ir` seems necessary when compiling the generated contract, otherwise it'd cause stack too deep error. However, `--via-ir` is not allowed to be used with `--standard-json`, not sure how to work around this yet. +- Even the `configure` is same, the [selector compression](https://github.com/privacy-scaling-explorations/halo2/blob/7a2165617195d8baa422ca7b2b364cef02380390/halo2_proofs/src/plonk/circuit/compress_selectors.rs#L51) might lead to different configuration when selector assignments are different. To avoid this we might need to update halo2 to support disabling selector compression. +- Now it only supports BDFG21 batch open scheme (aka SHPLONK), GWC19 is not yet implemented. + +## Compatibility + +The [`Keccak256Transcript`](./src/transcript.rs#L19) behaves exactly same as the `EvmTranscript` in `snark-verifier`. + +## Acknowledgement + +The template is heavily inspired by Aztec's [`BaseUltraVerifier.sol`](https://github.com/AztecProtocol/barretenberg/blob/4c456a2b196282160fd69bead6a1cea85289af37/sol/src/ultra/BaseUltraVerifier.sol). diff --git a/askama.toml b/askama.toml new file mode 100644 index 0000000..44f2f2f --- /dev/null +++ b/askama.toml @@ -0,0 +1,3 @@ +[[escaper]] +path = "askama::Text" +extensions = ["sol"] diff --git a/examples/separately.rs b/examples/separately.rs new file mode 100644 index 0000000..5a4f9a5 --- /dev/null +++ b/examples/separately.rs @@ -0,0 +1,242 @@ +use application::StandardPlonk; +use prelude::*; + +use halo2_solidity_verifier::{ + compile_solidity, encode_calldata, BatchOpenScheme::Bdfg21, Evm, Keccak256Transcript, + SolidityGenerator, +}; + +const K_RANGE: Range = 10..17; + +fn main() { + let mut rng = seeded_std_rng(); + + let params = setup(K_RANGE, &mut rng); + + let vk = keygen_vk(¶ms[&K_RANGE.start], &StandardPlonk::default()).unwrap(); + let generator = SolidityGenerator::new(¶ms[&K_RANGE.start], &vk, Bdfg21, 0); + let (verifier_solidity, _) = generator.render_separately().unwrap(); + save_solidity("Halo2Verifier.sol", &verifier_solidity); + + let verifier_creation_code = compile_solidity(&verifier_solidity); + let verifier_creation_code_size = verifier_creation_code.len(); + println!("Verifier creation code size: {verifier_creation_code_size}"); + + let mut evm = Evm::default(); + let verifier_address = evm.create(verifier_creation_code); + + let deployed_verifier_solidity = verifier_solidity; + + for k in K_RANGE { + let num_instances = k as usize; + let circuit = StandardPlonk::rand(num_instances, &mut rng); + + let vk = keygen_vk(¶ms[&k], &circuit).unwrap(); + let pk = keygen_pk(¶ms[&k], vk, &circuit).unwrap(); + let generator = SolidityGenerator::new(¶ms[&k], pk.get_vk(), Bdfg21, num_instances); + let (verifier_solidity, vk_solidity) = generator.render_separately().unwrap(); + save_solidity(format!("Halo2VerifyingKey-{k}.sol"), &vk_solidity); + + assert_eq!(deployed_verifier_solidity, verifier_solidity); + + let vk_creation_code = compile_solidity(&vk_solidity); + let vk_address = evm.create(vk_creation_code); + + let calldata = { + let instances = circuit.instances(); + let proof = create_proof_checked(¶ms[&k], &pk, circuit, &instances, &mut rng); + encode_calldata(vk_address.0.into(), &proof, &instances) + }; + let (gas_cost, output) = evm.call(verifier_address, calldata); + assert_eq!(output, [vec![0; 31], vec![1]].concat()); + println!("Gas cost of verifying standard Plonk with 2^{k} rows: {gas_cost}"); + } +} + +fn save_solidity(name: impl AsRef, solidity: &str) { + const DIR_GENERATED: &str = "./generated"; + + create_dir_all(DIR_GENERATED).unwrap(); + File::create(format!("{DIR_GENERATED}/{}", name.as_ref())) + .unwrap() + .write_all(solidity.as_bytes()) + .unwrap(); +} + +fn setup(k_range: Range, mut rng: impl RngCore) -> HashMap> { + k_range + .clone() + .zip(k_range.map(|k| ParamsKZG::::setup(k, &mut rng))) + .collect() +} + +fn create_proof_checked( + params: &ParamsKZG, + pk: &ProvingKey, + circuit: impl Circuit, + instances: &[Fr], + mut rng: impl RngCore, +) -> Vec { + use halo2_proofs::{ + poly::kzg::{ + multiopen::{ProverSHPLONK, VerifierSHPLONK}, + strategy::SingleStrategy, + }, + transcript::TranscriptWriterBuffer, + }; + + let proof = { + let mut transcript = Keccak256Transcript::new(Vec::new()); + create_proof::<_, ProverSHPLONK<_>, _, _, _, _>( + params, + pk, + &[circuit], + &[&[instances]], + &mut rng, + &mut transcript, + ) + .unwrap(); + transcript.finalize() + }; + + let result = { + let mut transcript = Keccak256Transcript::new(proof.as_slice()); + verify_proof::<_, VerifierSHPLONK<_>, _, _, SingleStrategy<_>>( + params, + pk.get_vk(), + SingleStrategy::new(params), + &[&[instances]], + &mut transcript, + ) + }; + assert!(result.is_ok()); + + proof +} + +mod application { + use crate::prelude::*; + + #[derive(Clone)] + pub struct StandardPlonkConfig { + selectors: [Column; 5], + wires: [Column; 3], + } + + impl StandardPlonkConfig { + fn configure(meta: &mut ConstraintSystem) -> Self { + let [w_l, w_r, w_o] = [(); 3].map(|_| meta.advice_column()); + let [q_l, q_r, q_o, q_m, q_c] = [(); 5].map(|_| meta.fixed_column()); + let pi = meta.instance_column(); + [w_l, w_r, w_o].map(|column| meta.enable_equality(column)); + meta.create_gate( + "q_l·w_l + q_r·w_r + q_o·w_o + q_m·w_l·w_r + q_c + pi = 0", + |meta| { + let [w_l, w_r, w_o] = + [w_l, w_r, w_o].map(|column| meta.query_advice(column, Rotation::cur())); + let [q_l, q_r, q_o, q_m, q_c] = [q_l, q_r, q_o, q_m, q_c] + .map(|column| meta.query_fixed(column, Rotation::cur())); + let pi = meta.query_instance(pi, Rotation::cur()); + Some( + q_l * w_l.clone() + + q_r * w_r.clone() + + q_o * w_o + + q_m * w_l * w_r + + q_c + + pi, + ) + }, + ); + StandardPlonkConfig { + selectors: [q_l, q_r, q_o, q_m, q_c], + wires: [w_l, w_r, w_o], + } + } + } + + #[derive(Clone, Debug, Default)] + pub struct StandardPlonk(Vec); + + impl StandardPlonk { + pub fn rand(num_instances: usize, mut rng: R) -> Self { + Self((0..num_instances).map(|_| F::random(&mut rng)).collect()) + } + + pub fn instances(&self) -> Vec { + self.0.clone() + } + } + + impl Circuit for StandardPlonk { + type Config = StandardPlonkConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + unimplemented!() + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + meta.set_minimum_degree(4); + StandardPlonkConfig::configure(meta) + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let [q_l, q_r, q_o, q_m, q_c] = config.selectors; + let [w_l, w_r, w_o] = config.wires; + layouter.assign_region( + || "", + |mut region| { + for (offset, instance) in self.0.iter().enumerate() { + region.assign_advice(|| "", w_l, offset, || Value::known(*instance))?; + region.assign_fixed(|| "", q_l, offset, || Value::known(-F::ONE))?; + } + let offset = self.0.len(); + let a = region.assign_advice(|| "", w_l, offset, || Value::known(F::ONE))?; + a.copy_advice(|| "", &mut region, w_r, offset)?; + a.copy_advice(|| "", &mut region, w_o, offset)?; + let offset = offset + 1; + region.assign_advice(|| "", w_l, offset, || Value::known(-F::from(5)))?; + for (column, idx) in [q_l, q_r, q_o, q_m, q_c].iter().zip(1..) { + region.assign_fixed( + || "", + *column, + offset, + || Value::known(F::from(idx)), + )?; + } + Ok(()) + }, + ) + } + } +} + +mod prelude { + pub use halo2_proofs::{ + circuit::{Layouter, SimpleFloorPlanner, Value}, + halo2curves::{ + bn256::{Bn256, Fr, G1Affine}, + ff::PrimeField, + }, + plonk::*, + poly::{commitment::Params, kzg::commitment::ParamsKZG, Rotation}, + }; + pub use rand::{ + rngs::{OsRng, StdRng}, + RngCore, SeedableRng, + }; + pub use std::{ + collections::HashMap, + fs::{create_dir_all, File}, + io::Write, + ops::Range, + }; + + pub fn seeded_std_rng() -> impl RngCore { + StdRng::seed_from_u64(OsRng.next_u64()) + } +} diff --git a/src/codegen.rs b/src/codegen.rs new file mode 100644 index 0000000..c359958 --- /dev/null +++ b/src/codegen.rs @@ -0,0 +1,334 @@ +use crate::codegen::{ + evaluator::Evaluator, + pcs::{ + bdfg21_computations, queries, rotation_sets, + BatchOpenScheme::{Bdfg21, Gwc19}, + }, + template::{Halo2Verifier, Halo2VerifyingKey}, + util::{fr_to_u256, g1_to_u256s, g2_to_u256s, ConstraintSystemMeta, Data, Ptr}, +}; +use halo2_proofs::{ + halo2curves::{bn256, ff::Field}, + plonk::VerifyingKey, + poly::{commitment::ParamsProver, kzg::commitment::ParamsKZG, Rotation}, +}; +use itertools::{chain, Itertools}; +use ruint::aliases::U256; +use std::fmt::{self, Debug}; + +mod evaluator; +mod pcs; +mod template; +pub(crate) mod util; + +pub use pcs::BatchOpenScheme; + +/// Solidity verifier generator for [`halo2`] proof with KZG polynomial commitment scheme on BN254. +#[derive(Debug)] +pub struct SolidityGenerator<'a> { + params: &'a ParamsKZG, + vk: &'a VerifyingKey, + scheme: BatchOpenScheme, + num_instances: usize, + acc_encoding: Option, + meta: ConstraintSystemMeta, +} + +/// KZG accumulator encoding information. +/// Limbs of each field element are assumed to be least significant limb first. +/// +/// Given instances and `AccumulatorEncoding`, the accumulator will be interpreted as below: +/// ```rust +/// use halo2_proofs::halo2curves::{bn256, ff::{Field, PrimeField}, CurveAffine}; +/// +/// fn accumulator_from_limbs( +/// instances: &[bn256::Fr], +/// offset: usize, +/// num_limbs: usize, +/// num_limb_bits: usize, +/// ) -> (bn256::G1Affine, bn256::G1Affine) { +/// let limbs = |offset| &instances[offset..offset + num_limbs]; +/// let acc_lhs_x = fe_from_limbs(limbs(offset), num_limb_bits); +/// let acc_lhs_y = fe_from_limbs(limbs(offset + num_limbs), num_limb_bits); +/// let acc_rhs_x = fe_from_limbs(limbs(offset + 2 * num_limbs), num_limb_bits); +/// let acc_rhs_y = fe_from_limbs(limbs(offset + 3 * num_limbs), num_limb_bits); +/// let acc_lhs = bn256::G1Affine::from_xy(acc_lhs_x, acc_lhs_y).unwrap(); +/// let acc_rhs = bn256::G1Affine::from_xy(acc_rhs_x, acc_rhs_y).unwrap(); +/// (acc_lhs, acc_rhs) +/// } +/// +/// fn fe_from_limbs(limbs: &[bn256::Fr], num_limb_bits: usize) -> bn256::Fq { +/// limbs.iter().rev().fold(bn256::Fq::ZERO, |acc, limb| { +/// acc * bn256::Fq::from(2).pow_vartime([num_limb_bits as u64]) +/// + bn256::Fq::from_repr_vartime(limb.to_repr()).unwrap() +/// }) +/// } +/// ``` +/// +/// In the end of `verifyProof`, the accumulator will be used to do batched pairing with the +/// pairing input of incoming proof. +#[derive(Clone, Copy, Debug)] +pub struct AccumulatorEncoding { + /// Offset of accumulator limbs in instances. + pub offset: usize, + /// Number of limbs per base field element. + pub num_limbs: usize, + /// Number of bits per limb. + pub num_limb_bits: usize, +} + +impl AccumulatorEncoding { + /// Return a new `AccumulatorEncoding`. + pub fn new(offset: usize, num_limbs: usize, num_limb_bits: usize) -> Self { + Self { + offset, + num_limbs, + num_limb_bits, + } + } +} + +impl<'a> SolidityGenerator<'a> { + /// Return a new `SolidityGenerator`. + pub fn new( + params: &'a ParamsKZG, + vk: &'a VerifyingKey, + scheme: BatchOpenScheme, + num_instances: usize, + ) -> Self { + assert_ne!(vk.cs().num_advice_columns(), 0); + assert_eq!( + vk.cs().num_instance_columns(), + 1, + "Multiple instance columns is not yet implemented" + ); + assert!( + !vk.cs() + .instance_queries() + .iter() + .any(|(_, rotation)| *rotation != Rotation::cur()), + "Rotated query to instance column is not yet implemented" + ); + assert_eq!( + scheme, + BatchOpenScheme::Bdfg21, + "BatchOpenScheme::Gwc19 is not yet implemented" + ); + + Self { + params, + vk, + scheme, + num_instances, + acc_encoding: None, + meta: ConstraintSystemMeta::new(vk.cs()), + } + } + + /// Set `AccumulatorEncoding`. + pub fn set_acc_encoding(mut self, acc_encoding: Option) -> Self { + self.acc_encoding = acc_encoding; + self + } +} + +impl<'a> SolidityGenerator<'a> { + /// Render `Halo2Verifier.sol` with verifying key embedded into writer. + pub fn render_into(&self, verifier_writer: &mut impl fmt::Write) -> Result<(), fmt::Error> { + self.generate_verifier(false).render(verifier_writer) + } + + /// Render `Halo2Verifier.sol` with verifying key embedded and return it as `String`. + pub fn render(&self) -> Result { + let mut verifier_output = String::new(); + self.render_into(&mut verifier_output)?; + Ok(verifier_output) + } + + /// Render `Halo2Verifier.sol` and `Halo2VerifyingKey.sol` into writers. + pub fn render_separately_into( + &self, + verifier_writer: &mut impl fmt::Write, + vk_writer: &mut impl fmt::Write, + ) -> Result<(), fmt::Error> { + self.generate_verifier(true).render(verifier_writer)?; + self.generate_vk().render(vk_writer)?; + Ok(()) + } + + /// Render `Halo2Verifier.sol` and `Halo2VerifyingKey.sol` and return them as `String`. + pub fn render_separately(&self) -> Result<(String, String), fmt::Error> { + let mut verifier_output = String::new(); + let mut vk_output = String::new(); + self.render_separately_into(&mut verifier_output, &mut vk_output)?; + Ok((verifier_output, vk_output)) + } + + fn generate_vk(&self) -> Halo2VerifyingKey { + let constants = { + let domain = self.vk.get_domain(); + let vk_digest = fr_to_u256(vk_transcript_repr(self.vk)); + let k = U256::from(domain.k()); + let n_inv = fr_to_u256(bn256::Fr::from(1 << domain.k()).invert().unwrap()); + let omega = fr_to_u256(domain.get_omega()); + let omega_inv = fr_to_u256(domain.get_omega_inv()); + let omega_inv_to_l = { + let l = self.meta.rotation_last.unsigned_abs() as u64; + fr_to_u256(domain.get_omega_inv().pow_vartime([l])) + }; + let num_instances = U256::from(self.num_instances); + let has_accumulator = U256::from(self.acc_encoding.is_some()); + let acc_offset = self + .acc_encoding + .map(|acc_encoding| U256::from(acc_encoding.offset)) + .unwrap_or_default(); + let num_acc_limbs = self + .acc_encoding + .map(|acc_encoding| U256::from(acc_encoding.num_limbs)) + .unwrap_or_default(); + let num_acc_limb_bits = self + .acc_encoding + .map(|acc_encoding| U256::from(acc_encoding.num_limb_bits)) + .unwrap_or_default(); + let g1 = self.params.get_g()[0]; + let g1 = g1_to_u256s(g1); + let g2 = g2_to_u256s(self.params.g2()); + let neg_s_g2 = g2_to_u256s(-self.params.s_g2()); + vec![ + ("vk_digest", vk_digest), + ("k", k), + ("n_inv", n_inv), + ("omega", omega), + ("omega_inv", omega_inv), + ("omega_inv_to_l", omega_inv_to_l), + ("num_instances", num_instances), + ("has_accumulator", has_accumulator), + ("acc_offset", acc_offset), + ("num_acc_limbs", num_acc_limbs), + ("num_acc_limb_bits", num_acc_limb_bits), + ("g1_x", g1[0]), + ("g1_y", g1[1]), + ("g2_x_1", g2[0]), + ("g2_x_2", g2[1]), + ("g2_y_1", g2[2]), + ("g2_y_2", g2[3]), + ("neg_s_g2_x_1", neg_s_g2[0]), + ("neg_s_g2_x_2", neg_s_g2[1]), + ("neg_s_g2_y_1", neg_s_g2[2]), + ("neg_s_g2_y_2", neg_s_g2[3]), + ] + }; + let fixed_comms = chain![self.vk.fixed_commitments()] + .flat_map(g1_to_u256s) + .tuples() + .collect(); + let permutation_comms = chain![self.vk.permutation().commitments()] + .flat_map(g1_to_u256s) + .tuples() + .collect(); + Halo2VerifyingKey { + constants, + fixed_comms, + permutation_comms, + } + } + + fn generate_verifier(&self, separate: bool) -> Halo2Verifier { + let proof_cptr = Ptr::calldata(if separate { 0x84 } else { 0x64 }); + + let vk = self.generate_vk(); + let vk_len = vk.len(); + let vk_mptr = Ptr::memory(self.estimate_static_working_memory_size(&vk, proof_cptr)); + let data = Data::new(&self.meta, &vk, vk_mptr, proof_cptr); + + let evaluator = Evaluator::new(self.vk.cs(), &self.meta, &data); + let quotient_eval_numer_computations = chain![ + evaluator.gate_computations(), + evaluator.permutation_computations(), + evaluator.lookup_computations() + ] + .enumerate() + .map(|(idx, (mut lines, var))| { + let line = if idx == 0 { + format!("quotient_eval_numer := {var}") + } else { + format!( + "quotient_eval_numer := addmod(mulmod(quotient_eval_numer, y, r), {var}, r)" + ) + }; + lines.push(line); + lines + }) + .collect(); + + let pcs_computations = match self.scheme { + Bdfg21 => bdfg21_computations(&self.meta, &data), + Gwc19 => unimplemented!(), + }; + + Halo2Verifier { + scheme: self.scheme, + vk: (!separate).then_some(vk), + vk_len, + vk_mptr, + num_neg_lagranges: self.meta.rotation_last.unsigned_abs() as usize, + num_advices: self.meta.num_advices(), + num_challenges: self.meta.num_challenges(), + num_evals: self.meta.num_evals, + num_quotients: self.meta.num_quotients, + proof_cptr, + quotient_comm_cptr: data.quotient_comm_cptr, + proof_len: self.meta.proof_len(self.scheme), + challenge_mptr: data.challenge_mptr, + theta_mptr: data.theta_mptr, + quotient_eval_numer_computations, + pcs_computations, + } + } + + fn estimate_static_working_memory_size( + &self, + vk: &Halo2VerifyingKey, + proof_cptr: Ptr, + ) -> usize { + let pcs_computation = match self.scheme { + Bdfg21 => { + let mock_vk_mptr = Ptr::memory(0x100000); + let mock = Data::new(&self.meta, vk, mock_vk_mptr, proof_cptr); + let (superset, sets) = rotation_sets(&queries(&self.meta, &mock)); + let num_coeffs = sets.iter().map(|set| set.rots().len()).sum::(); + 2 * (1 + num_coeffs) + 6 + 2 * superset.len() + 1 + 3 * sets.len() + } + Gwc19 => unimplemented!(), + }; + + itertools::max(chain![ + // Hashing advice commitments + chain![self.meta.num_advices().into_iter()].map(|n| n * 2 + 1), + // Hashing evaluations + [self.meta.num_evals + 1], + // PCS computation + [pcs_computation], + // Pairing + [12], + ]) + .unwrap() + * 0x20 + } +} + +// Remove when `vk.transcript_repr()` is ready for usage. +fn vk_transcript_repr(vk: &VerifyingKey) -> bn256::Fr { + use blake2b_simd::Params; + use halo2_proofs::halo2curves::ff::FromUniformBytes; + + let fmtted_pinned_vk = format!("{:?}", vk.pinned()); + let mut hasher = Params::new() + .hash_length(64) + .personal(b"Halo2-Verify-Key") + .to_state(); + hasher + .update(&(fmtted_pinned_vk.len() as u64).to_le_bytes()) + .update(fmtted_pinned_vk.as_bytes()); + FromUniformBytes::from_uniform_bytes(hasher.finalize().as_array()) +} diff --git a/src/codegen/evaluator.rs b/src/codegen/evaluator.rs new file mode 100644 index 0000000..f558987 --- /dev/null +++ b/src/codegen/evaluator.rs @@ -0,0 +1,386 @@ +#![allow(clippy::useless_format)] + +use crate::codegen::util::{code_block, fe_to_u256, ConstraintSystemMeta, Data}; +use halo2_proofs::{ + halo2curves::ff::PrimeField, + plonk::{ + Advice, AdviceQuery, Any, Challenge, ConstraintSystem, Expression, Fixed, FixedQuery, Gate, + InstanceQuery, + }, +}; +use itertools::{chain, izip, Itertools}; +use ruint::aliases::U256; +use std::{borrow::Borrow, cell::RefCell, cmp::Ordering, collections::HashMap, iter}; + +#[derive(Debug)] +pub(crate) struct Evaluator<'a, F: PrimeField> { + cs: &'a ConstraintSystem, + meta: &'a ConstraintSystemMeta, + data: &'a Data, + var_counter: RefCell, + var_cache: RefCell>, +} + +impl<'a, F> Evaluator<'a, F> +where + F: PrimeField, +{ + pub(crate) fn new( + cs: &'a ConstraintSystem, + meta: &'a ConstraintSystemMeta, + data: &'a Data, + ) -> Self { + Self { + cs, + meta, + data, + var_counter: Default::default(), + var_cache: Default::default(), + } + } + + pub fn gate_computations(&self) -> Vec<(Vec, String)> { + self.cs + .gates() + .iter() + .flat_map(Gate::polynomials) + .map(|expression| self.evaluate_and_reset(expression)) + .collect() + } + + pub fn permutation_computations(&self) -> Vec<(Vec, String)> { + let Self { meta, data, .. } = self; + let last_chunk_idx = meta.num_permutation_zs - 1; + chain![ + data.permutation_z_evals.first().map(|(z, _, _)| { + vec![ + format!("let l_0 := mload(L_0_MPTR)"), + format!("let eval := addmod(l_0, sub(r, mulmod(l_0, {z}, r)), r)"), + ] + }), + data.permutation_z_evals.last().map(|(z, _, _)| { + let item = "addmod(mulmod(perm_z_last, perm_z_last, r), sub(r, perm_z_last), r)"; + vec![ + format!("let perm_z_last := {z}"), + format!("let eval := mulmod(mload(L_LAST_MPTR), {item}, r)"), + ] + }), + data.permutation_z_evals.iter().tuple_windows().map( + |((_, _, z_i_last), (z_j, _, _))| { + let item = format!("addmod({z_j}, sub(r, {z_i_last}), r)"); + vec![format!("let eval := mulmod(mload(L_0_MPTR), {item}, r)")] + } + ), + izip!( + meta.permutation_columns.chunks(meta.permutation_chunk_len), + &data.permutation_z_evals, + ) + .enumerate() + .map(|(chunk_idx, (columns, evals))| { + let last_column_idx = columns.len() - 1; + chain![ + [ + format!("let gamma := mload(GAMMA_MPTR)"), + format!("let beta := mload(BETA_MPTR)"), + format!("let lhs := {}", evals.1), + format!("let rhs := {}", evals.0), + ], + columns.iter().flat_map(|column| { + let perm_eval = &data.permutation_evals[column]; + let eval = self.eval(*column.column_type(), column.index(), 0); + let item = format!("mulmod(beta, {perm_eval}, r)"); + [format!( + "lhs := mulmod(lhs, addmod(addmod({eval}, {item}, r), gamma, r), r)" + )] + }), + (chunk_idx == 0) + .then(|| "mstore(0x00, mulmod(beta, mload(X_MPTR), r))".to_string()), + columns.iter().enumerate().flat_map(|(idx, column)| { + let eval = self.eval(*column.column_type(), column.index(), 0); + let item = format!("addmod(addmod({eval}, mload(0x00), r), gamma, r)"); + chain![ + [format!("rhs := mulmod(rhs, {item}, r)")], + (!(chunk_idx == last_chunk_idx && idx == last_column_idx)) + .then(|| "mstore(0x00, mulmod(mload(0x00), delta, r))".to_string()), + ] + }), + { + let item = format!("addmod(mload(L_LAST_MPTR), mload(L_BLIND_MPTR), r)"); + let item = format!("sub(r, mulmod(left_sub_right, {item}, r))"); + [ + format!("let left_sub_right := addmod(lhs, sub(r, rhs), r)"), + format!("let eval := addmod(left_sub_right, {item}, r)"), + ] + } + ] + .collect_vec() + }) + ] + .zip(iter::repeat("eval".to_string())) + .collect() + } + + pub fn lookup_computations(&self) -> Vec<(Vec, String)> { + let input_tables = self + .cs + .lookups() + .iter() + .map(|lookup| { + let [(input_lines, inputs), (table_lines, tables)] = + [lookup.input_expressions(), lookup.table_expressions()].map(|expressions| { + let (lines, inputs) = expressions + .iter() + .map(|expression| self.evaluate(expression)) + .fold((Vec::new(), Vec::new()), |mut acc, result| { + acc.0.extend(result.0); + acc.1.push(result.1); + acc + }); + self.reset(); + (lines, inputs) + }); + (input_lines, inputs, table_lines, tables) + }) + .collect_vec(); + izip!(input_tables, &self.data.lookup_evals) + .flat_map(|(input_table, evals)| { + let (input_lines, inputs, table_lines, tables) = input_table; + let (input_0, rest_inputs) = inputs.split_first().unwrap(); + let (table_0, rest_tables) = tables.split_first().unwrap(); + let (z, z_next, p_input, p_input_prev, p_table) = evals; + [ + vec![ + format!("let l_0 := mload(L_0_MPTR)"), + format!("let eval := addmod(l_0, mulmod(l_0, sub(r, {z}), r), r)"), + ], + { + let item = format!("addmod(mulmod({z}, {z}, r), sub(r, {z}), r)"); + vec![ + format!("let l_last := mload(L_LAST_MPTR)"), + format!("let eval := mulmod(l_last, {item}, r)"), + ] + }, + chain![ + ["let theta := mload(THETA_MPTR)", "let input"].map(str::to_string), + code_block::<1, false>(chain![ + input_lines, + [format!("input := {input_0}")], + rest_inputs.iter().map(|input| format!( + "input := addmod(mulmod(input, theta, r), {input}, r)" + )) + ]), + ["let table"].map(str::to_string), + code_block::<1, false>(chain![ + table_lines, + [format!("table := {table_0}")], + rest_tables.iter().map(|table| format!( + "table := addmod(mulmod(table, theta, r), {table}, r)" + )) + ]), + { + let lhs = format!("addmod({p_input}, beta, r)"); + let rhs = format!("addmod({p_table}, gamma, r)"); + let permuted = format!("mulmod({lhs}, {rhs}, r)"); + let input = + "mulmod(addmod(input, beta, r), addmod(table, gamma, r), r)"; + [ + format!("let beta := mload(BETA_MPTR)"), + format!("let gamma := mload(GAMMA_MPTR)"), + format!("let lhs := mulmod({z_next}, {permuted}, r)"), + format!("let rhs := mulmod({z}, {input}, r)"), + ] + }, + { + let l_inactive = "addmod(mload(L_BLIND_MPTR), mload(L_LAST_MPTR), r)"; + let l_active = format!("addmod(1, sub(r, {l_inactive}), r)"); + [format!( + "let eval := mulmod({l_active}, addmod(lhs, sub(r, rhs), r), r)" + )] + }, + ] + .collect_vec(), + { + let l_0 = "mload(L_0_MPTR)"; + let item = format!("addmod({p_input}, sub(r, {p_table}), r)"); + vec![format!("let eval := mulmod({l_0}, {item}, r)")] + }, + { + let l_inactive = "addmod(mload(L_BLIND_MPTR), mload(L_LAST_MPTR), r)"; + let l_active = format!("addmod(1, sub(r, {l_inactive}), r)"); + let lhs = format!("addmod({p_input}, sub(r, {p_table}), r)"); + let rhs = format!("addmod({p_input}, sub(r, {p_input_prev}), r)"); + vec![format!( + "let eval := mulmod({l_active}, mulmod({lhs}, {rhs}, r), r)" + )] + }, + ] + }) + .zip(iter::repeat("eval".to_string())) + .collect_vec() + } + + fn eval(&self, column_type: impl Into, column_index: usize, rotation: i32) -> String { + match column_type.into() { + Any::Advice(_) => self.data.advice_evals[&(column_index, rotation)].to_string(), + Any::Fixed => self.data.fixed_evals[&(column_index, rotation)].to_string(), + Any::Instance => self.data.instance_eval.to_string(), + } + } + + fn reset(&self) { + *self.var_counter.borrow_mut() = Default::default(); + *self.var_cache.borrow_mut() = Default::default(); + } + + fn evaluate_and_reset(&self, expression: &Expression) -> (Vec, String) { + let result = self.evaluate(expression); + self.reset(); + result + } + + fn evaluate(&self, expression: &Expression) -> (Vec, String) { + evaluate( + expression, + &|constant| { + let constant = u256_string(constant); + self.init_var(constant, None) + }, + &|query| { + self.init_var( + self.eval(Fixed, query.column_index(), query.rotation().0), + Some(fixed_eval_var(query)), + ) + }, + &|query| { + self.init_var( + self.eval(Advice::default(), query.column_index(), query.rotation().0), + Some(advice_eval_var(query)), + ) + }, + &|_| self.init_var(self.data.instance_eval, Some("i_eval".to_string())), + &|challenge| { + self.init_var( + self.data.challenges[challenge.index()], + Some(format!("c_{}", challenge.index())), + ) + }, + &|(mut acc, var)| { + let (lines, var) = self.init_var(format!("sub(r, {var})"), None); + acc.extend(lines); + (acc, var) + }, + &|(mut lhs_acc, lhs_var), (rhs_acc, rhs_var)| { + let (lines, var) = self.init_var(format!("addmod({lhs_var}, {rhs_var}, r)"), None); + lhs_acc.extend(rhs_acc); + lhs_acc.extend(lines); + (lhs_acc, var) + }, + &|(mut lhs_acc, lhs_var), (rhs_acc, rhs_var)| { + let (lines, var) = self.init_var(format!("mulmod({lhs_var}, {rhs_var}, r)"), None); + lhs_acc.extend(rhs_acc); + lhs_acc.extend(lines); + (lhs_acc, var) + }, + &|(mut acc, var), scalar| { + let scalar = u256_string(scalar); + let (lines, var) = self.init_var(format!("mulmod({var}, {scalar}, r)"), None); + acc.extend(lines); + (acc, var) + }, + ) + } + + fn init_var(&self, value: impl ToString, var: Option) -> (Vec, String) { + let value = value.to_string(); + if self.var_cache.borrow().contains_key(&value) { + (vec![], self.var_cache.borrow()[&value].clone()) + } else { + let var = var.unwrap_or_else(|| self.next_var()); + self.var_cache + .borrow_mut() + .insert(value.clone(), var.clone()); + (vec![format!("let {var} := {value}")], var) + } + } + + fn next_var(&self) -> String { + let count = *self.var_counter.borrow(); + *self.var_counter.borrow_mut() += 1; + format!("var{count}") + } +} + +fn u256_string(value: U256) -> String { + if value.bit_len() < 64 { + format!("0x{:x}", value.as_limbs()[0]) + } else { + format!("0x{value:x}") + } +} + +fn fixed_eval_var(fixed_query: FixedQuery) -> String { + let column_index = fixed_query.column_index(); + let rotation = fixed_query.rotation().0; + match rotation.cmp(&0) { + Ordering::Less => { + format!("f_{}_prev_{}", column_index, rotation.abs()) + } + Ordering::Equal => { + format!("f_{}", column_index) + } + Ordering::Greater => { + format!("f_{}_next_{}", column_index, rotation) + } + } +} + +fn advice_eval_var(advice_query: AdviceQuery) -> String { + let column_index = advice_query.column_index(); + let rotation = advice_query.rotation().0; + match rotation.cmp(&0) { + Ordering::Less => { + format!("a_{}_prev_{}", column_index, rotation.abs()) + } + Ordering::Equal => { + format!("a_{}", column_index) + } + Ordering::Greater => { + format!("a_{}_next_{}", column_index, rotation) + } + } +} + +#[allow(clippy::too_many_arguments)] +fn evaluate( + expression: &Expression, + constant: &impl Fn(U256) -> T, + fixed: &impl Fn(FixedQuery) -> T, + advice: &impl Fn(AdviceQuery) -> T, + instance: &impl Fn(InstanceQuery) -> T, + challenge: &impl Fn(Challenge) -> T, + negated: &impl Fn(T) -> T, + sum: &impl Fn(T, T) -> T, + product: &impl Fn(T, T) -> T, + scaled: &impl Fn(T, U256) -> T, +) -> T +where + F: PrimeField, +{ + let evaluate = |expr| { + evaluate( + expr, constant, fixed, advice, instance, challenge, negated, sum, product, scaled, + ) + }; + match expression.borrow() { + Expression::Constant(scalar) => constant(fe_to_u256(*scalar)), + Expression::Selector(_) => unreachable!(), + Expression::Fixed(query) => fixed(*query), + Expression::Advice(query) => advice(*query), + Expression::Instance(query) => instance(*query), + Expression::Challenge(value) => challenge(*value), + Expression::Negated(value) => negated(evaluate(value)), + Expression::Sum(lhs, rhs) => sum(evaluate(lhs), evaluate(rhs)), + Expression::Product(lhs, rhs) => product(evaluate(lhs), evaluate(rhs)), + Expression::Scaled(value, scalar) => scaled(evaluate(value), fe_to_u256(*scalar)), + } +} diff --git a/src/codegen/pcs.rs b/src/codegen/pcs.rs new file mode 100644 index 0000000..ee96f85 --- /dev/null +++ b/src/codegen/pcs.rs @@ -0,0 +1,592 @@ +#![allow(clippy::useless_format)] + +use crate::codegen::util::{for_loop, ConstraintSystemMeta, Data, EcPoint, Location, Ptr, Word}; +use itertools::{chain, izip, Itertools}; +use std::collections::{BTreeMap, BTreeSet}; + +/// KZG batch open schemes in `halo2`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BatchOpenScheme { + /// Batch open scheme in [Plonk] paper. + /// Corresponding to `halo2_proofs::poly::kzg::multiopen::ProverGWC` + /// + /// [Plonk]: https://eprint.iacr.org/2019/953.pdf + Gwc19, + /// Batch open scheme in [BDFG21] paper. + /// Corresponding to `halo2_proofs::poly::kzg::multiopen::ProverSHPLONK` + /// + /// [BDFG21]: https://eprint.iacr.org/2020/081.pdf + Bdfg21, +} + +#[derive(Debug)] +pub(crate) struct Query { + comm: EcPoint, + rot: i32, + eval: Word, +} + +impl Query { + fn new(comm: EcPoint, rot: i32, eval: Word) -> Self { + Self { comm, rot, eval } + } +} + +pub(crate) fn queries(meta: &ConstraintSystemMeta, data: &Data) -> Vec { + chain![ + meta.advice_queries.iter().map(|query| { + let comm = data.advice_comms[query.0]; + let eval = data.advice_evals[query]; + Query::new(comm, query.1, eval) + }), + izip!(&data.permutation_z_comms, &data.permutation_z_evals).flat_map(|(&comm, evals)| { + [Query::new(comm, 0, evals.0), Query::new(comm, 1, evals.1)] + }), + izip!(&data.permutation_z_comms, &data.permutation_z_evals) + .rev() + .skip(1) + .map(|(&comm, evals)| Query::new(comm, meta.rotation_last, evals.2)), + izip!( + &data.lookup_permuted_comms, + &data.lookup_z_comms, + &data.lookup_evals + ) + .flat_map(|(permuted_comms, &z_comm, evals)| { + [ + Query::new(z_comm, 0, evals.0), + Query::new(permuted_comms.0, 0, evals.2), + Query::new(permuted_comms.1, 0, evals.4), + Query::new(permuted_comms.0, -1, evals.3), + Query::new(z_comm, 1, evals.1), + ] + }), + meta.fixed_queries.iter().map(|query| { + let comm = data.fixed_comms[query.0]; + let eval = data.fixed_evals[query]; + Query::new(comm, query.1, eval) + }), + meta.permutation_columns.iter().map(|column| { + let comm = data.permutation_comms[column]; + let eval = data.permutation_evals[column]; + Query::new(comm, 0, eval) + }), + [ + Query::new(data.computed_quotient_comm, 0, data.computed_quotient_eval), + Query::new(data.random_comm, 0, data.random_eval), + ] + ] + .collect() +} + +#[derive(Debug)] +pub(crate) struct RotationSet { + rots: BTreeSet, + diffs: BTreeSet, + comms: Vec, + evals: Vec>, +} + +impl RotationSet { + pub(crate) fn rots(&self) -> &BTreeSet { + &self.rots + } + + pub(crate) fn diffs(&self) -> &BTreeSet { + &self.diffs + } + + pub(crate) fn comms(&self) -> &[EcPoint] { + &self.comms + } + + pub(crate) fn evals(&self) -> &[Vec] { + &self.evals + } +} + +pub(crate) fn rotation_sets(queries: &[Query]) -> (BTreeSet, Vec) { + let mut superset = BTreeSet::new(); + let comm_queries = queries.iter().fold( + Vec::<(EcPoint, BTreeMap)>::new(), + |mut comm_queries, query| { + superset.insert(query.rot); + if let Some(pos) = comm_queries + .iter() + .position(|(comm, _)| comm == &query.comm) + { + let (_, queries) = &mut comm_queries[pos]; + assert!(!queries.contains_key(&query.rot)); + queries.insert(query.rot, query.eval); + } else { + comm_queries.push((query.comm, BTreeMap::from_iter([(query.rot, query.eval)]))); + } + comm_queries + }, + ); + let superset = superset; + let sets = + comm_queries + .into_iter() + .fold(Vec::::new(), |mut sets, (comm, queries)| { + if let Some(pos) = sets + .iter() + .position(|set| itertools::equal(&set.rots, queries.keys())) + { + let set = &mut sets[pos]; + if !set.comms.contains(&comm) { + set.comms.push(comm); + set.evals.push(queries.into_values().collect_vec()); + } + } else { + let diffs = BTreeSet::from_iter( + superset + .iter() + .filter(|rot| !queries.contains_key(rot)) + .copied(), + ); + let set = RotationSet { + rots: BTreeSet::from_iter(queries.keys().copied()), + diffs, + comms: vec![comm], + evals: vec![queries.into_values().collect()], + }; + sets.push(set); + } + sets + }); + (superset, sets) +} + +pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> Vec> { + let queries = queries(meta, data); + let (superset, sets) = rotation_sets(&queries); + let min_rot = *superset.first().unwrap(); + let max_rot = *superset.last().unwrap(); + let num_coeffs = sets.iter().map(|set| set.rots().len()).sum::(); + + let w = EcPoint::from(data.w_cptr); + let w_prime = EcPoint::from(data.w_cptr + 2); + + let diff_0 = Word::from(Ptr::memory(0x00)); + let coeffs = sets + .iter() + .scan(diff_0.ptr() + 1, |state, set| { + let ptrs = Word::range(*state).take(set.rots().len()).collect_vec(); + *state = *state + set.rots().len(); + Some(ptrs) + }) + .collect_vec(); + + let first_batch_invert_end = diff_0.ptr() + 1 + num_coeffs; + let second_batch_invert_end = diff_0.ptr() + sets.len(); + let free_mptr = diff_0.ptr() + 2 * (1 + num_coeffs) + 6; + + let point_mptr = free_mptr; + let mu_minus_point_mptr = point_mptr + superset.len(); + let vanishing_0_mptr = mu_minus_point_mptr + superset.len(); + let diff_mptr = vanishing_0_mptr + 1; + let r_eval_mptr = diff_mptr + sets.len(); + let sum_mptr = r_eval_mptr + sets.len(); + + let point_vars = + izip!(&superset, (0..).map(|idx| format!("point_{idx}"))).collect::>(); + let points = izip!(&superset, Word::range(point_mptr)).collect::>(); + let mu_minus_points = + izip!(&superset, Word::range(mu_minus_point_mptr)).collect::>(); + let vanishing_0 = Word::from(vanishing_0_mptr); + let diffs = Word::range(diff_mptr).take(sets.len()).collect_vec(); + let r_evals = Word::range(r_eval_mptr).take(sets.len()).collect_vec(); + let sums = Word::range(sum_mptr).take(sets.len()).collect_vec(); + + let point_computations = chain![ + [ + "let x := mload(X_MPTR)", + "let omega := mload(OMEGA_MPTR)", + "let omega_inv := mload(OMEGA_INV_MPTR)", + "let x_pow_of_omega := mulmod(x, omega, r)" + ] + .map(str::to_string), + (1..=max_rot).flat_map(|rot| { + chain![ + points + .get(&rot) + .map(|point| format!("mstore({}, x_pow_of_omega)", point.ptr())), + (rot != max_rot) + .then(|| { "x_pow_of_omega := mulmod(x_pow_of_omega, omega, r)".to_string() }) + ] + }), + [ + format!("mstore({}, x)", points[&0].ptr()), + format!("x_pow_of_omega := mulmod(x, omega_inv, r)") + ], + (min_rot..0).rev().flat_map(|rot| { + chain![ + points + .get(&rot) + .map(|point| format!("mstore({}, x_pow_of_omega)", point.ptr())), + (rot != min_rot).then(|| { + "x_pow_of_omega := mulmod(x_pow_of_omega, omega_inv, r)".to_string() + }) + ] + }) + ] + .collect_vec(); + + let vanishing_computations = chain![ + ["let mu := mload(MU_MPTR)".to_string()], + { + let mptr = mu_minus_points.first_key_value().unwrap().1.ptr(); + let mptr_end = mptr + mu_minus_points.len(); + for_loop( + [ + format!("let mptr := {mptr}"), + format!("let mptr_end := {mptr_end}"), + format!("let point_mptr := {free_mptr}"), + ], + "lt(mptr, mptr_end)", + [ + "mptr := add(mptr, 0x20)", + "point_mptr := add(point_mptr, 0x20)", + ] + .map(str::to_string), + ["mstore(mptr, addmod(mu, sub(r, mload(point_mptr)), r))".to_string()], + ) + }, + ["let s".to_string()], + chain![ + [format!( + "s := {}", + mu_minus_points[sets[0].rots().first().unwrap()] + )], + chain![sets[0].rots().iter().skip(1)] + .map(|rot| { format!("s := mulmod(s, {}, r)", mu_minus_points[rot]) }), + [format!("mstore({}, s)", vanishing_0.ptr())], + ], + ["let diff".to_string()], + izip!(0.., &sets, &diffs).flat_map(|(set_idx, set, diff)| { + chain![ + [set.diffs() + .first() + .map(|rot| format!("diff := {}", mu_minus_points[rot])) + .unwrap_or_else(|| "diff := 1".to_string())], + chain![set.diffs().iter().skip(1)] + .map(|rot| { format!("diff := mulmod(diff, {}, r)", mu_minus_points[rot]) }), + [format!("mstore({}, diff)", diff.ptr())], + (set_idx == 0).then(|| format!("mstore({}, diff)", diff_0.ptr())), + ] + }) + ] + .collect_vec(); + + let coeff_computations = izip!(&sets, &coeffs) + .map(|(set, coeffs)| { + let coeff_points = set + .rots() + .iter() + .map(|rot| &point_vars[rot]) + .enumerate() + .map(|(i, rot_i)| { + set.rots() + .iter() + .map(|rot| &point_vars[rot]) + .enumerate() + .filter_map(|(j, rot_j)| (i != j).then_some((rot_i, rot_j))) + .collect_vec() + }) + .collect_vec(); + chain![ + set.rots() + .iter() + .map(|rot| { format!("let {} := {}", &point_vars[rot], points[rot]) }), + ["let coeff".to_string()], + izip!(set.rots(), &coeff_points, coeffs).flat_map( + |(rot_i, coeff_points, coeff)| chain![ + [coeff_points + .first() + .map(|(point_i, point_j)| { + format!("coeff := addmod({point_i}, sub(r, {point_j}), r)") + }) + .unwrap_or_else(|| { "coeff := 1".to_string() })], + coeff_points.iter().skip(1).map(|(point_i, point_j)| { + let item = format!("addmod({point_i}, sub(r, {point_j}), r)"); + format!("coeff := mulmod(coeff, {item}, r)") + }), + [ + format!("coeff := mulmod(coeff, {}, r)", mu_minus_points[rot_i]), + format!("mstore({}, coeff)", coeff.ptr()) + ], + ] + ) + ] + .collect_vec() + }) + .collect_vec(); + + let normalized_coeff_computations = chain![ + [ + format!("success := batch_invert(success, 0, {first_batch_invert_end}, r)"), + format!("let diff_0_inv := {diff_0}"), + format!("mstore({}, diff_0_inv)", diffs[0].ptr()), + ], + for_loop( + [ + format!("let mptr := {}", diffs[0].ptr() + 1), + format!("let mptr_end := {}", diffs[0].ptr() + sets.len()), + ], + "lt(mptr, mptr_end)", + ["mptr := add(mptr, 0x20)".to_string()], + ["mstore(mptr, mulmod(mload(mptr), diff_0_inv, r))".to_string()], + ), + ] + .collect_vec(); + + let r_evals_computations = izip!(0.., &sets, &coeffs, &diffs, &r_evals).map( + |(set_idx, set, coeffs, set_coeff, r_eval)| { + let is_single_rot_set = set.rots().len() == 1; + chain![ + is_single_rot_set.then(|| format!("let coeff := {}", coeffs[0])), + ["let zeta := mload(ZETA_MPTR)", "let r_eval := 0"].map(str::to_string), + if is_single_rot_set { + let eval_groups = set.evals().iter().rev().fold( + Vec::>::new(), + |mut eval_groups, evals| { + let eval = &evals[0]; + if let Some(last_group) = eval_groups.last_mut() { + let last_eval = **last_group.last().unwrap(); + if last_eval.ptr().value().is_integer() + && last_eval.ptr() - 1 == eval.ptr() + { + last_group.push(eval) + } else { + eval_groups.push(vec![eval]) + } + eval_groups + } else { + vec![vec![eval]] + } + }, + ); + chain![eval_groups.iter().enumerate()] + .flat_map(|(group_idx, evals)| { + if evals.len() < 3 { + chain![evals.iter().enumerate()] + .flat_map(|(eval_idx, eval)| { + let is_first_eval = group_idx == 0 && eval_idx == 0; + let item = format!("mulmod(coeff, {eval}, r)"); + chain![ + (!is_first_eval).then(|| format!( + "r_eval := mulmod(r_eval, zeta, r)" + )), + [format!("r_eval := addmod(r_eval, {item}, r)")], + ] + }) + .collect_vec() + } else { + let item = "mulmod(coeff, calldataload(mptr), r)"; + for_loop( + [ + format!("let mptr := {}", evals[0].ptr()), + format!("let mptr_end := {}", evals[0].ptr() - evals.len()), + ], + "lt(mptr_end, mptr)".to_string(), + ["mptr := sub(mptr, 0x20)".to_string()], + [format!( + "r_eval := addmod(mulmod(r_eval, zeta, r), {item}, r)" + )], + ) + } + }) + .collect_vec() + } else { + chain![set.evals().iter().enumerate().rev()] + .flat_map(|(idx, evals)| { + chain![ + izip!(evals, coeffs).map(|(eval, coeff)| { + let item = format!("mulmod({coeff}, {eval}, r)"); + format!("r_eval := addmod(r_eval, {item}, r)") + }), + (idx != 0).then(|| format!("r_eval := mulmod(r_eval, zeta, r)")), + ] + }) + .collect_vec() + }, + (set_idx != 0).then(|| format!("r_eval := mulmod(r_eval, {set_coeff}, r)")), + [format!("mstore({}, r_eval)", r_eval.ptr())], + ] + .collect_vec() + }, + ); + + let coeff_sums_computation = izip!(&coeffs, &sums).map(|(coeffs, sum)| { + let (coeff_0, rest_coeffs) = coeffs.split_first().unwrap(); + chain![ + [format!("let sum := {coeff_0}")], + rest_coeffs + .iter() + .map(|coeff_mptr| format!("sum := addmod(sum, {coeff_mptr}, r)")), + [format!("mstore({}, sum)", sum.ptr())], + ] + .collect_vec() + }); + + let r_eval_computations = chain![ + for_loop( + [ + format!("let mptr := 0x00"), + format!("let mptr_end := {}", second_batch_invert_end), + format!("let sum_mptr := {}", sums[0].ptr()), + ], + "lt(mptr, mptr_end)", + ["mptr := add(mptr, 0x20)", "sum_mptr := add(sum_mptr, 0x20)"].map(str::to_string), + ["mstore(mptr, mload(sum_mptr))".to_string()], + ), + [ + format!("success := batch_invert(success, 0, {second_batch_invert_end}, r)"), + format!( + "let r_eval := mulmod(mload({}), {}, r)", + second_batch_invert_end - 1, + r_evals.last().unwrap() + ) + ], + for_loop( + [ + format!("let sum_inv_mptr := {}", second_batch_invert_end - 2), + format!("let sum_inv_mptr_end := {}", second_batch_invert_end), + format!("let r_eval_mptr := {}", r_evals[r_evals.len() - 2].ptr()), + ], + "lt(sum_inv_mptr, sum_inv_mptr_end)", + [ + "sum_inv_mptr := sub(sum_inv_mptr, 0x20)", + "r_eval_mptr := sub(r_eval_mptr, 0x20)" + ] + .map(str::to_string), + [ + "r_eval := mulmod(r_eval, mload(NU_MPTR), r)", + "r_eval := addmod(r_eval, mulmod(mload(sum_inv_mptr), mload(r_eval_mptr), r), r)" + ] + .map(str::to_string), + ), + ["mstore(R_EVAL_MPTR, r_eval)".to_string()], + ] + .collect_vec(); + + let pairing_input_computations = chain![ + ["let nu := mload(NU_MPTR)".to_string()], + izip!(0.., &sets, &diffs).flat_map(|(set_idx, set, set_coeff)| { + let is_first_set = set_idx == 0; + let is_last_set = set_idx == sets.len() - 1; + + let ec_add = &format!("ec_add_{}", if is_first_set { "acc" } else { "tmp" }); + let ec_mul = &format!("ec_mul_{}", if is_first_set { "acc" } else { "tmp" }); + let acc_x = Ptr::memory(0x00) + if is_first_set { 0 } else { 4 }; + let acc_y = acc_x + 1; + + let comm_groups = set.comms().iter().rev().skip(1).fold( + Vec::<(Location, Vec<&EcPoint>)>::new(), + |mut comm_groups, comm| { + if let Some(last_group) = comm_groups.last_mut() { + let last_comm = **last_group.1.last().unwrap(); + if last_group.0 == comm.loc() + && last_comm.x().ptr().value().is_integer() + && last_comm.x().ptr() - 2 == comm.x().ptr() + { + last_group.1.push(comm) + } else { + comm_groups.push((comm.loc(), vec![comm])) + } + comm_groups + } else { + vec![(comm.loc(), vec![comm])] + } + }, + ); + + chain![ + set.comms() + .last() + .map(|comm| { + [ + format!("mstore({acc_x}, {})", comm.x()), + format!("mstore({acc_y}, {})", comm.y()), + ] + }) + .into_iter() + .flatten(), + comm_groups.into_iter().flat_map(move |(loc, comms)| { + if comms.len() < 3 { + comms + .iter() + .flat_map(|comm| { + let (x, y) = (comm.x(), comm.y()); + [ + format!("success := {ec_mul}(success, mload(ZETA_MPTR))"), + format!("success := {ec_add}(success, {x}, {y})"), + ] + }) + .collect_vec() + } else { + let mptr = comms.first().unwrap().x().ptr(); + let mptr_end = mptr - 2 * comms.len(); + let x = Word::from(Ptr::new(loc, "mptr")); + let y = Word::from(Ptr::new(loc, "add(mptr, 0x20)")); + for_loop( + [ + format!("let mptr := {mptr}"), + format!("let mptr_end := {mptr_end}"), + ], + "lt(mptr_end, mptr)", + ["mptr := sub(mptr, 0x40)".to_string()], + [ + format!("success := {ec_mul}(success, mload(ZETA_MPTR))"), + format!("success := {ec_add}(success, {x}, {y})"), + ], + ) + } + }), + (!is_first_set) + .then(|| { + let scalar = format!("mulmod(nu, {set_coeff}, r)"); + chain![ + [ + format!("success := ec_mul_tmp(success, {scalar})"), + format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), + ], + (!is_last_set).then(|| format!("nu := mulmod(nu, mload(NU_MPTR), r)")) + ] + }) + .into_iter() + .flatten(), + ] + .collect_vec() + }), + [ + format!("mstore(0x80, mload(G1_X_MPTR))"), + format!("mstore(0xa0, mload(G1_Y_MPTR))"), + format!("success := ec_mul_tmp(success, sub(r, mload(R_EVAL_MPTR)))"), + format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), + format!("mstore(0x80, {})", w.x()), + format!("mstore(0xa0, {})", w.y()), + format!("success := ec_mul_tmp(success, sub(r, {vanishing_0}))"), + format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), + format!("mstore(0x80, {})", w_prime.x()), + format!("mstore(0xa0, {})", w_prime.y()), + format!("success := ec_mul_tmp(success, mload(MU_MPTR))"), + format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), + format!("mstore(PAIRING_LHS_X_MPTR, mload(0x00))"), + format!("mstore(PAIRING_LHS_Y_MPTR, mload(0x20))"), + format!("mstore(PAIRING_RHS_X_MPTR, {})", w_prime.x()), + format!("mstore(PAIRING_RHS_Y_MPTR, {})", w_prime.y()), + ], + ] + .collect_vec(); + + chain![ + [point_computations, vanishing_computations], + coeff_computations, + [normalized_coeff_computations], + r_evals_computations, + coeff_sums_computation, + [r_eval_computations, pairing_input_computations], + ] + .collect_vec() +} diff --git a/src/codegen/template.rs b/src/codegen/template.rs new file mode 100644 index 0000000..218ac63 --- /dev/null +++ b/src/codegen/template.rs @@ -0,0 +1,83 @@ +use crate::codegen::{ + pcs::BatchOpenScheme::{self, Bdfg21, Gwc19}, + util::Ptr, +}; +use askama::{Error, Template}; +use ruint::aliases::U256; +use std::fmt; + +#[derive(Template)] +#[template(path = "Halo2VerifyingKey.sol")] +pub(crate) struct Halo2VerifyingKey { + pub(crate) constants: Vec<(&'static str, U256)>, + pub(crate) fixed_comms: Vec<(U256, U256)>, + pub(crate) permutation_comms: Vec<(U256, U256)>, +} + +impl Halo2VerifyingKey { + pub(crate) fn len(&self) -> usize { + (self.constants.len() * 0x20) + + (self.fixed_comms.len() + self.permutation_comms.len()) * 0x40 + } +} + +#[derive(Template)] +#[template(path = "Halo2Verifier.sol")] +pub(crate) struct Halo2Verifier { + pub(crate) scheme: BatchOpenScheme, + pub(crate) vk: Option, + pub(crate) vk_len: usize, + pub(crate) proof_len: usize, + pub(crate) vk_mptr: Ptr, + pub(crate) challenge_mptr: Ptr, + pub(crate) theta_mptr: Ptr, + pub(crate) proof_cptr: Ptr, + pub(crate) quotient_comm_cptr: Ptr, + pub(crate) num_neg_lagranges: usize, + pub(crate) num_advices: Vec, + pub(crate) num_challenges: Vec, + pub(crate) num_evals: usize, + pub(crate) num_quotients: usize, + pub(crate) quotient_eval_numer_computations: Vec>, + pub(crate) pcs_computations: Vec>, +} + +impl Halo2VerifyingKey { + pub(crate) fn render(&self, writer: &mut impl fmt::Write) -> Result<(), fmt::Error> { + self.render_into(writer).map_err(|err| match err { + Error::Fmt(err) => err, + _ => unreachable!(), + }) + } +} + +impl Halo2Verifier { + pub(crate) fn render(&self, writer: &mut impl fmt::Write) -> Result<(), fmt::Error> { + self.render_into(writer).map_err(|err| match err { + Error::Fmt(err) => err, + _ => unreachable!(), + }) + } +} + +mod filters { + use std::fmt::LowerHex; + + pub fn hex(value: impl LowerHex) -> ::askama::Result { + let value = format!("{value:x}"); + Ok(if value.len() % 2 == 1 { + format!("0x0{value}") + } else { + format!("0x{value}") + }) + } + + pub fn hex_padded(value: impl LowerHex, pad: usize) -> ::askama::Result { + let string = format!("0x{value:0pad$x}", pad = pad); + if string == "0x0" { + Ok(format!("0x{}", "0".repeat(pad))) + } else { + Ok(string) + } + } +} diff --git a/src/codegen/util.rs b/src/codegen/util.rs new file mode 100644 index 0000000..9d3aea9 --- /dev/null +++ b/src/codegen/util.rs @@ -0,0 +1,614 @@ +use crate::codegen::{ + template::Halo2VerifyingKey, + BatchOpenScheme::{self, Bdfg21, Gwc19}, +}; +use halo2_proofs::{ + halo2curves::{bn256, ff::PrimeField, CurveAffine}, + plonk::{Any, Column, ConstraintSystem}, +}; +use itertools::{chain, izip, Itertools}; +use ruint::{aliases::U256, UintTryFrom}; +use std::{ + borrow::Borrow, + collections::HashMap, + fmt::{self, Display, Formatter}, + ops::{Add, Sub}, +}; + +#[derive(Debug)] +pub(crate) struct ConstraintSystemMeta { + pub(crate) num_fixeds: usize, + pub(crate) permutation_columns: Vec>, + pub(crate) permutation_chunk_len: usize, + pub(crate) num_lookup_permuteds: usize, + pub(crate) num_permutation_zs: usize, + pub(crate) num_lookup_zs: usize, + pub(crate) num_quotients: usize, + pub(crate) advice_queries: Vec<(usize, i32)>, + pub(crate) fixed_queries: Vec<(usize, i32)>, + pub(crate) num_evals: usize, + pub(crate) num_user_advices: Vec, + pub(crate) num_user_challenges: Vec, + pub(crate) advice_indices: Vec, + pub(crate) challenge_indices: Vec, + pub(crate) rotation_last: i32, +} + +impl ConstraintSystemMeta { + pub(crate) fn new(cs: &ConstraintSystem) -> Self { + let num_fixeds = cs.num_fixed_columns(); + let permutation_columns = cs.permutation().get_columns(); + let permutation_chunk_len = cs.degree() - 2; + let num_lookup_permuteds = 2 * cs.lookups().len(); + let num_permutation_zs = cs + .permutation() + .get_columns() + .chunks(cs.degree() - 2) + .count(); + let num_lookup_zs = cs.lookups().len(); + let num_quotients = cs.degree() - 1; + let advice_queries = cs + .advice_queries() + .iter() + .map(|(column, rotation)| (column.index(), rotation.0)) + .collect_vec(); + let fixed_queries = cs + .fixed_queries() + .iter() + .map(|(column, rotation)| (column.index(), rotation.0)) + .collect_vec(); + let num_evals = advice_queries.len() + + fixed_queries.len() + + 1 + + cs.permutation().get_columns().len() + + (3 * num_permutation_zs - 1) + + 5 * cs.lookups().len(); + let num_phase = *cs.advice_column_phase().iter().max().unwrap_or(&0) as usize + 1; + // Indices of advice and challenge are not same as their position in calldata/memory, + // because we support multiple phases, we need to remap them and find their actual indices. + let remapping = |phase: Vec| { + let nums = phase.iter().fold(vec![0; num_phase], |mut nums, phase| { + nums[*phase as usize] += 1; + nums + }); + let offsets = nums + .iter() + .take(num_phase - 1) + .fold(vec![0], |mut offsets, n| { + offsets.push(offsets.last().unwrap() + n); + offsets + }); + let index = phase + .iter() + .scan(offsets, |state, phase| { + let index = state[*phase as usize]; + state[*phase as usize] += 1; + Some(index) + }) + .collect::>(); + (nums, index) + }; + let (num_user_advices, advice_indices) = remapping(cs.advice_column_phase()); + let (num_user_challenges, challenge_indices) = remapping(cs.challenge_phase()); + let rotation_last = -(cs.blinding_factors() as i32 + 1); + Self { + num_fixeds, + permutation_columns, + permutation_chunk_len, + num_lookup_permuteds, + num_permutation_zs, + num_lookup_zs, + num_quotients, + advice_queries, + fixed_queries, + num_evals, + num_user_advices, + num_user_challenges, + advice_indices, + challenge_indices, + rotation_last, + } + } + + pub(crate) fn num_advices(&self) -> Vec { + chain![ + self.num_user_advices.iter().cloned(), + (self.num_lookup_permuteds != 0).then_some(self.num_lookup_permuteds), // lookup permuted + [ + self.num_permutation_zs + self.num_lookup_zs + 1, // permutation and lookup grand products, random + self.num_quotients, // quotients + ], + ] + .collect() + } + + pub(crate) fn num_challenges(&self) -> Vec { + let mut num_challenges = self.num_user_challenges.clone(); + // If there is no lookup used, merge also beta and gamma into the last user phase, to avoid + // squeezing challenge from nothing. + // Otherwise, merge theta into last user phase since they are originally adjacent. + if self.num_lookup_permuteds == 0 { + *num_challenges.last_mut().unwrap() += 3; // theta, beta, gamma + num_challenges.extend([ + 1, // y + 1, // x + ]); + } else { + *num_challenges.last_mut().unwrap() += 1; // theta + num_challenges.extend([ + 2, // beta, gamma + 1, // y + 1, // x + ]); + } + num_challenges + } + + pub(crate) fn num_permutations(&self) -> usize { + self.permutation_columns.len() + } + + pub(crate) fn num_lookups(&self) -> usize { + self.num_lookup_zs + } + + pub(crate) fn proof_len(&self, scheme: BatchOpenScheme) -> usize { + self.num_advices().iter().sum::() * 0x40 + + self.num_evals * 0x20 + + self.batch_open_proof_len(scheme) + } + + pub(crate) fn batch_open_proof_len(&self, scheme: BatchOpenScheme) -> usize { + match scheme { + Bdfg21 => 2 * 0x40, + Gwc19 => { + unimplemented!() + } + } + } +} + +#[derive(Debug)] +pub(crate) struct Data { + pub(crate) challenge_mptr: Ptr, + pub(crate) theta_mptr: Ptr, + + pub(crate) quotient_comm_cptr: Ptr, + pub(crate) w_cptr: Ptr, + + pub(crate) fixed_comms: Vec, + pub(crate) permutation_comms: HashMap, EcPoint>, + pub(crate) advice_comms: Vec, + pub(crate) lookup_permuted_comms: Vec<(EcPoint, EcPoint)>, + pub(crate) permutation_z_comms: Vec, + pub(crate) lookup_z_comms: Vec, + pub(crate) random_comm: EcPoint, + + pub(crate) challenges: Vec, + + pub(crate) instance_eval: Word, + pub(crate) advice_evals: HashMap<(usize, i32), Word>, + pub(crate) fixed_evals: HashMap<(usize, i32), Word>, + pub(crate) random_eval: Word, + pub(crate) permutation_evals: HashMap, Word>, + pub(crate) permutation_z_evals: Vec<(Word, Word, Word)>, + pub(crate) lookup_evals: Vec<(Word, Word, Word, Word, Word)>, + + pub(crate) computed_quotient_comm: EcPoint, + pub(crate) computed_quotient_eval: Word, +} + +impl Data { + pub(crate) fn new( + meta: &ConstraintSystemMeta, + vk: &Halo2VerifyingKey, + vk_mptr: Ptr, + proof_cptr: Ptr, + ) -> Self { + let fixed_comm_mptr = vk_mptr + vk.constants.len(); + let permutation_comm_mptr = fixed_comm_mptr + 2 * vk.fixed_comms.len(); + let challenge_mptr = permutation_comm_mptr + 2 * vk.permutation_comms.len(); + let theta_mptr = challenge_mptr + meta.challenge_indices.len(); + + let advice_comm_start = proof_cptr; + let lookup_permuted_comm_start = advice_comm_start + 2 * meta.advice_indices.len(); + let permutation_z_comm_start = lookup_permuted_comm_start + 2 * meta.num_lookup_permuteds; + let lookup_z_comm_start = permutation_z_comm_start + 2 * meta.num_permutation_zs; + let random_comm_start = lookup_z_comm_start + 2 * meta.num_lookup_zs; + let quotient_comm_start = random_comm_start + 2; + + let eval_cptr = quotient_comm_start + 2 * meta.num_quotients; + let advice_eval_cptr = eval_cptr; + let fixed_eval_cptr = advice_eval_cptr + meta.advice_queries.len(); + let random_eval_cptr = fixed_eval_cptr + meta.fixed_queries.len(); + let permutation_eval_cptr = random_eval_cptr + 1; + let permutation_z_eval_cptr = permutation_eval_cptr + meta.num_permutations(); + let lookup_eval_cptr = permutation_z_eval_cptr + 3 * meta.num_permutation_zs - 1; + let w_cptr = lookup_eval_cptr + 5 * meta.num_lookups(); + + let fixed_comms = EcPoint::range(fixed_comm_mptr) + .take(meta.num_fixeds) + .collect(); + let permutation_comms = izip!( + meta.permutation_columns.iter().cloned(), + EcPoint::range(permutation_comm_mptr) + ) + .collect(); + let advice_comms = meta + .advice_indices + .iter() + .map(|idx| advice_comm_start + 2 * idx) + .map_into() + .collect(); + let lookup_permuted_comms = EcPoint::range(lookup_permuted_comm_start) + .take(meta.num_lookup_permuteds) + .tuples() + .collect(); + let permutation_z_comms = EcPoint::range(permutation_z_comm_start) + .take(meta.num_permutation_zs) + .collect(); + let lookup_z_comms = EcPoint::range(lookup_z_comm_start) + .take(meta.num_lookup_zs) + .collect(); + let random_comm = random_comm_start.into(); + let computed_quotient_comm = EcPoint::new( + Ptr::memory("QUOTIENT_X_MPTR"), + Ptr::memory("QUOTIENT_Y_MPTR"), + ); + + let challenges = meta + .challenge_indices + .iter() + .map(|idx| challenge_mptr + *idx) + .map_into() + .collect_vec(); + let instance_eval = Ptr::memory("INSTANCE_EVAL_MPTR").into(); + let advice_evals = izip!( + meta.advice_queries.iter().cloned(), + Word::range(advice_eval_cptr) + ) + .collect(); + let fixed_evals = izip!( + meta.fixed_queries.iter().cloned(), + Word::range(fixed_eval_cptr) + ) + .collect(); + let random_eval = random_eval_cptr.into(); + let permutation_evals = izip!( + meta.permutation_columns.iter().cloned(), + Word::range(permutation_eval_cptr) + ) + .collect(); + let permutation_z_evals = Word::range(permutation_z_eval_cptr) + .take(3 * meta.num_permutation_zs) + .tuples() + .collect_vec(); + let lookup_evals = Word::range(lookup_eval_cptr) + .take(5 * meta.num_lookup_zs) + .tuples() + .collect_vec(); + let computed_quotient_eval = Ptr::memory("QUOTIENT_EVAL_MPTR").into(); + + Self { + challenge_mptr, + theta_mptr, + quotient_comm_cptr: quotient_comm_start, + w_cptr, + + fixed_comms, + permutation_comms, + advice_comms, + lookup_permuted_comms, + permutation_z_comms, + lookup_z_comms, + random_comm, + computed_quotient_comm, + + challenges, + + instance_eval, + advice_evals, + fixed_evals, + permutation_evals, + permutation_z_evals, + lookup_evals, + random_eval, + computed_quotient_eval, + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum Location { + Calldata, + Memory, +} + +impl Location { + fn opcode(&self) -> &'static str { + match self { + Location::Calldata => "calldataload", + Location::Memory => "mload", + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum Value { + Integer(usize), + Identifier(&'static str), +} + +impl Value { + pub(crate) fn is_integer(&self) -> bool { + match self { + Value::Integer(_) => true, + Value::Identifier(_) => false, + } + } + + pub(crate) fn as_usize(&self) -> usize { + match self { + Value::Integer(int) => *int, + Value::Identifier(_) => unreachable!(), + } + } +} + +impl Default for Value { + fn default() -> Self { + Self::Integer(0) + } +} + +impl From<&'static str> for Value { + fn from(ident: &'static str) -> Self { + Value::Identifier(ident) + } +} + +impl From for Value { + fn from(int: usize) -> Self { + Value::Integer(int) + } +} + +impl Display for Value { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Value::Integer(int) => { + let hex = format!("{int:x}"); + if hex.len() % 2 == 1 { + write!(f, "0x0{hex}") + } else { + write!(f, "0x{hex}") + } + } + Value::Identifier(ident) => { + write!(f, "{ident}") + } + } + } +} + +impl Add for Value { + type Output = Value; + + fn add(self, rhs: usize) -> Self::Output { + (self.as_usize() + rhs * 0x20).into() + } +} + +impl Sub for Value { + type Output = Value; + + fn sub(self, rhs: usize) -> Self::Output { + (self.as_usize() - rhs * 0x20).into() + } +} + +/// `Ptr` points to a EVM word at either calldata or memory. +/// +/// When adding or subtracting it by 1, its value moves by 32 and points to next/previous EVM word. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct Ptr { + loc: Location, + value: Value, +} + +impl Ptr { + pub(crate) fn new(loc: Location, value: impl Into) -> Self { + Self { + loc, + value: value.into(), + } + } + + pub(crate) fn memory(value: impl Into) -> Self { + Self::new(Location::Memory, value.into()) + } + + pub(crate) fn calldata(value: impl Into) -> Self { + Self::new(Location::Calldata, value.into()) + } + + pub(crate) fn loc(&self) -> Location { + self.loc + } + + pub(crate) fn value(&self) -> Value { + self.value + } +} + +impl Display for Ptr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.value) + } +} + +impl Add for Ptr { + type Output = Ptr; + + fn add(mut self, rhs: usize) -> Self::Output { + self.value = self.value + rhs; + self + } +} + +impl Sub for Ptr { + type Output = Ptr; + + fn sub(mut self, rhs: usize) -> Self::Output { + self.value = self.value - rhs; + self + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct Word(Ptr); + +impl Word { + pub(crate) fn range(word: impl Into) -> impl Iterator { + let ptr = word.into().ptr(); + (0..).map(move |idx| ptr + idx).map_into() + } + + pub(crate) fn ptr(&self) -> Ptr { + self.0 + } +} + +impl Display for Word { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}({})", self.0.loc.opcode(), self.0.value) + } +} + +impl From for Word { + fn from(ptr: Ptr) -> Self { + Self(ptr) + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct EcPoint { + x: Word, + y: Word, +} + +impl EcPoint { + pub(crate) fn new(x: impl Into, y: impl Into) -> Self { + Self { + x: x.into(), + y: y.into(), + } + } + + pub(crate) fn range(ec_point: impl Into) -> impl Iterator { + let ptr = ec_point.into().x.ptr(); + (0..).map(move |idx| ptr + 2 * idx).map_into() + } + + pub(crate) fn loc(&self) -> Location { + self.x.ptr().loc() + } + + pub(crate) fn x(&self) -> Word { + self.x + } + + pub(crate) fn y(&self) -> Word { + self.y + } +} + +impl From for EcPoint { + fn from(ptr: Ptr) -> Self { + Self::new(ptr, ptr + 1) + } +} + +/// Add indention to given lines by `4 * N` spaces. +pub(crate) fn indent(lines: impl IntoIterator) -> Vec { + lines + .into_iter() + .map(|line| format!("{}{line}", " ".repeat(N * 4))) + .collect() +} + +/// Create a code block for given lines with indention. +/// +/// If `PACKED` is true, single line code block will be packed into single line. +pub(crate) fn code_block( + lines: impl IntoIterator, +) -> Vec { + let lines = lines.into_iter().collect_vec(); + let bracket_indent = " ".repeat((N - 1) * 4); + match lines.len() { + 0 => vec![format!("{bracket_indent}{{}}")], + 1 if PACKED => vec![format!("{bracket_indent}{{ {} }}", lines[0])], + _ => chain![ + [format!("{bracket_indent}{{")], + indent::(lines), + [format!("{bracket_indent}}}")], + ] + .collect(), + } +} + +/// Create a for loop with proper indention. +pub(crate) fn for_loop( + initialization: impl IntoIterator, + condition: impl Into, + advancement: impl IntoIterator, + body: impl IntoIterator, +) -> Vec { + chain![ + ["for".to_string()], + code_block::<2, true>(initialization), + indent::<1>([condition.into()]), + code_block::<2, true>(advancement), + code_block::<1, false>(body), + ] + .collect() +} + +pub(crate) fn g1_to_u256s(ec_point: impl Borrow) -> [U256; 2] { + let coords = ec_point.borrow().coordinates().unwrap(); + [coords.x(), coords.y()].map(fq_to_u256) +} + +pub(crate) fn g2_to_u256s(ec_point: impl Borrow) -> [U256; 4] { + let coords = ec_point.borrow().coordinates().unwrap(); + let x = coords.x().to_repr(); + let y = coords.y().to_repr(); + [ + U256::try_from_le_slice(&x.as_ref()[0x20..]).unwrap(), + U256::try_from_le_slice(&x.as_ref()[..0x20]).unwrap(), + U256::try_from_le_slice(&y.as_ref()[0x20..]).unwrap(), + U256::try_from_le_slice(&y.as_ref()[..0x20]).unwrap(), + ] +} + +pub(crate) fn fq_to_u256(fe: impl Borrow) -> U256 { + fe_to_u256(fe) +} + +pub(crate) fn fr_to_u256(fe: impl Borrow) -> U256 { + fe_to_u256(fe) +} + +pub(crate) fn fe_to_u256(fe: impl Borrow) -> U256 +where + F: PrimeField, +{ + U256::from_le_bytes(fe.borrow().to_repr()) +} + +pub(crate) fn to_u256_be_bytes(value: T) -> [u8; 32] +where + U256: UintTryFrom, +{ + U256::from(value).to_be_bytes() +} diff --git a/src/evm.rs b/src/evm.rs new file mode 100644 index 0000000..e67594e --- /dev/null +++ b/src/evm.rs @@ -0,0 +1,217 @@ +use crate::codegen::util::{fr_to_u256, to_u256_be_bytes}; +use halo2_proofs::halo2curves::bn256; +use itertools::chain; +use ruint::aliases::U256; + +/// Function signature of `verifyProof(bytes,uint256[])`. +pub const FN_SIG_VERIFY_PROOF: [u8; 4] = [0x1e, 0x8e, 0x1e, 0x13]; + +/// Function signature of `verifyProof(address,bytes,uint256[])`. +pub const FN_SIG_VERIFY_PROOF_WITH_VK_ADDRESS: [u8; 4] = [0xaf, 0x83, 0xa1, 0x8d]; + +/// Encode proof into calldata to invoke `Halo2Verifier.verifyProof`. +/// +/// For `vk_address`: +/// - Pass `None` if verifying key is embedded in `Halo2Verifier` +/// - Pass `Some(vk_address)` if verifying key is separated and deployed at `vk_address` +pub fn encode_calldata( + vk_address: Option<[u8; 20]>, + proof: &[u8], + instances: &[bn256::Fr], +) -> Vec { + let (fn_sig, offset) = if vk_address.is_some() { + (FN_SIG_VERIFY_PROOF_WITH_VK_ADDRESS, 0x60) + } else { + (FN_SIG_VERIFY_PROOF, 0x40) + }; + let vk_address = if let Some(vk_address) = vk_address { + U256::try_from_be_slice(&vk_address) + .unwrap() + .to_be_bytes::<0x20>() + .to_vec() + } else { + Vec::new() + }; + let num_instances = instances.len(); + chain![ + fn_sig, // function signature + vk_address, // verifying key address + to_u256_be_bytes(offset), // offset of proof + to_u256_be_bytes(offset + 0x20 + proof.len()), // offset of instances + to_u256_be_bytes(proof.len()), // length of proof + proof.iter().cloned(), // proof + to_u256_be_bytes(num_instances), // length of instances + instances.iter().map(fr_to_u256).flat_map(to_u256_be_bytes), // instances + ] + .collect() +} + +#[cfg(any(test, feature = "evm"))] +pub(crate) mod test { + use revm::{ + primitives::{Address, CreateScheme, ExecutionResult, Output, TransactTo, TxEnv}, + InMemoryDB, EVM, + }; + use std::{ + fmt::{self, Debug, Formatter}, + io::{self, Write}, + process::{Command, Stdio}, + str, + }; + + /// Compile solidity with `--via-ir` flag, then return creation bytecode. + /// + /// # Panics + /// Panics if executable `solc` can not be found, or compilation fails. + pub fn compile_solidity(solidity: impl AsRef<[u8]>) -> Vec { + let mut process = match Command::new("solc") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .arg("--bin") + .arg("--via-ir") + .arg("-") + .spawn() + { + Ok(process) => process, + Err(err) if err.kind() == io::ErrorKind::NotFound => { + panic!("Command 'solc' not found"); + } + Err(err) => { + panic!("Failed to spwan process with command 'solc':\n{err}"); + } + }; + process + .stdin + .take() + .unwrap() + .write_all(solidity.as_ref()) + .unwrap(); + let output = process.wait_with_output().unwrap(); + let stdout = str::from_utf8(&output.stdout).unwrap(); + if let Some(binary) = find_binary(stdout) { + binary + } else { + panic!( + "Compilation fails:\n{}", + str::from_utf8(&output.stderr).unwrap() + ) + } + } + + fn find_binary(stdout: &str) -> Option> { + let start = stdout.find("Binary:")? + 8; + Some(hex::decode(&stdout[start..stdout.len() - 1]).unwrap()) + } + + /// Evm runner. + pub struct Evm { + evm: EVM, + } + + impl Debug for Evm { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut debug_struct = f.debug_struct("Evm"); + debug_struct + .field("env", &self.evm.env) + .field("db", &self.evm.db.as_ref().unwrap()) + .finish() + } + } + + impl Default for Evm { + fn default() -> Self { + Self { + evm: EVM { + env: Default::default(), + db: Some(Default::default()), + }, + } + } + } + + impl Evm { + /// Return code_size of given address. + /// + /// # Panics + /// Panics if given address doesn't have bytecode. + pub fn code_size(&mut self, address: Address) -> usize { + self.evm.db.as_ref().unwrap().accounts[&address] + .info + .code + .as_ref() + .unwrap() + .len() + } + + /// Apply create transaction with given `bytecode` as creation bytecode. + /// Return created `address`. + /// + /// # Panics + /// Panics if execution reverts or halts unexpectedly. + pub fn create(&mut self, bytecode: Vec) -> Address { + let (_, output) = self.transact_success_or_panic(TxEnv { + gas_limit: u64::MAX, + transact_to: TransactTo::Create(CreateScheme::Create), + data: bytecode.into(), + ..Default::default() + }); + match output { + Output::Create(_, Some(address)) => address, + _ => unreachable!(), + } + } + + /// Apply call transaction to given `address` with `calldata`. + /// Returns `gas_used` and `return_data`. + /// + /// # Panics + /// Panics if execution reverts or halts unexpectedly. + pub fn call(&mut self, address: Address, calldata: Vec) -> (u64, Vec) { + let (gas_used, output) = self.transact_success_or_panic(TxEnv { + gas_limit: u64::MAX, + transact_to: TransactTo::Call(address), + data: calldata.into(), + ..Default::default() + }); + match output { + Output::Call(output) => (gas_used, output.into()), + _ => unreachable!(), + } + } + + fn transact_success_or_panic(&mut self, tx: TxEnv) -> (u64, Output) { + self.evm.env.tx = tx; + let result = self.evm.transact_commit().unwrap(); + self.evm.env.tx = Default::default(); + match result { + ExecutionResult::Success { + gas_used, + output, + logs, + .. + } => { + if !logs.is_empty() { + println!("--- logs from {} ---", logs[0].address); + for (log_idx, log) in logs.iter().enumerate() { + println!("log#{log_idx}"); + for (topic_idx, topic) in log.topics.iter().enumerate() { + println!(" topic{topic_idx}: {:?}", topic); + } + } + println!("--- end ---"); + } + (gas_used, output) + } + ExecutionResult::Revert { gas_used, output } => panic!( + "Transaction reverts with gas_used {gas_used} and output {:#x}", + output + ), + ExecutionResult::Halt { reason, gas_used } => panic!( + "Transaction halts unexpectedly with gas_used {gas_used} and reason {:?}", + reason + ), + } + } + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..47d1d29 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,21 @@ +//! Solidity verifier generator for [`halo2`] proof with KZG polynomial commitment scheme on BN254. +//! +//! [`halo2`]: http://github.com/privacy-scaling-explorations/halo2 + +#![deny(missing_docs)] +#![deny(missing_debug_implementations)] +#![deny(rustdoc::broken_intra_doc_links)] + +mod codegen; +mod evm; +mod transcript; + +#[cfg(test)] +mod test; + +pub use codegen::{AccumulatorEncoding, BatchOpenScheme, SolidityGenerator}; +pub use evm::{encode_calldata, FN_SIG_VERIFY_PROOF, FN_SIG_VERIFY_PROOF_WITH_VK_ADDRESS}; +pub use transcript::Keccak256Transcript; + +#[cfg(feature = "evm")] +pub use evm::test::{compile_solidity, Evm}; diff --git a/src/test.rs b/src/test.rs new file mode 100644 index 0000000..416664a --- /dev/null +++ b/src/test.rs @@ -0,0 +1,612 @@ +use crate::{ + codegen::{AccumulatorEncoding, BatchOpenScheme::Bdfg21, SolidityGenerator}, + encode_calldata, + evm::test::{compile_solidity, Evm}, + FN_SIG_VERIFY_PROOF, FN_SIG_VERIFY_PROOF_WITH_VK_ADDRESS, +}; +use halo2_proofs::halo2curves::bn256::{Bn256, Fr}; +use rand::{rngs::StdRng, RngCore, SeedableRng}; +use sha3::Digest; +use std::{fs::File, io::Write}; + +#[test] +fn function_signature() { + for (fn_name, fn_sig) in [ + ("verifyProof(bytes,uint256[])", FN_SIG_VERIFY_PROOF), + ( + "verifyProof(address,bytes,uint256[])", + FN_SIG_VERIFY_PROOF_WITH_VK_ADDRESS, + ), + ] { + assert_eq!( + <[u8; 32]>::from(sha3::Keccak256::digest(fn_name))[..4], + fn_sig, + ); + } +} + +#[test] +fn render_huge() { + run_render::>() +} + +#[test] +fn render_maingate() { + run_render::>() +} + +#[test] +fn render_separately_huge() { + run_render_separately::>() +} + +#[test] +fn render_separately_maingate() { + run_render_separately::>() +} + +fn run_render>() { + let acc_encoding = AccumulatorEncoding::new(0, 4, 68).into(); + let (params, vk, instances, proof) = + halo2::create_testdata_bdfg21::(C::min_k(), acc_encoding, std_rng()); + + let generator = SolidityGenerator::new(¶ms, &vk, Bdfg21, instances.len()) + .set_acc_encoding(acc_encoding); + let verifier_solidity = generator.render().unwrap(); + let verifier_creation_code = compile_solidity(verifier_solidity); + let verifier_creation_code_size = verifier_creation_code.len(); + + let mut evm = Evm::default(); + let verifier_address = evm.create(verifier_creation_code); + let verifier_runtime_code_size = evm.code_size(verifier_address); + + println!("Verifier creation code size: {verifier_creation_code_size}"); + println!("Verifier runtime code size: {verifier_runtime_code_size}"); + + let (gas_cost, output) = evm.call(verifier_address, encode_calldata(None, &proof, &instances)); + assert_eq!(output, [vec![0; 31], vec![1]].concat()); + println!("Gas cost: {gas_cost}"); +} + +fn run_render_separately>() { + let acc_encoding = AccumulatorEncoding::new(0, 4, 68).into(); + let (params, vk, instances, _) = + halo2::create_testdata_bdfg21::(C::min_k(), acc_encoding, std_rng()); + + let generator = SolidityGenerator::new(¶ms, &vk, Bdfg21, instances.len()) + .set_acc_encoding(acc_encoding); + let (verifier_solidity, _vk_solidity) = generator.render_separately().unwrap(); + let verifier_creation_code = compile_solidity(&verifier_solidity); + let verifier_creation_code_size = verifier_creation_code.len(); + + let mut evm = Evm::default(); + let verifier_address = evm.create(verifier_creation_code); + let verifier_runtime_code_size = evm.code_size(verifier_address); + + println!("Verifier creation code size: {verifier_creation_code_size}"); + println!("Verifier runtime code size: {verifier_runtime_code_size}"); + + let deployed_verifier_solidity = verifier_solidity; + + for k in C::min_k()..C::min_k() + 4 { + let (params, vk, instances, proof) = + halo2::create_testdata_bdfg21::(k, acc_encoding, std_rng()); + let generator = SolidityGenerator::new(¶ms, &vk, Bdfg21, instances.len()) + .set_acc_encoding(acc_encoding); + + let (verifier_solidity, vk_solidity) = generator.render_separately().unwrap(); + assert_eq!(deployed_verifier_solidity, verifier_solidity); + + let vk_creation_code = compile_solidity(&vk_solidity); + let vk_address = evm.create(vk_creation_code); + + let (gas_cost, output) = evm.call( + verifier_address, + encode_calldata(vk_address.0.into(), &proof, &instances), + ); + assert_eq!(output, [vec![0; 31], vec![1]].concat()); + println!("Gas cost: {gas_cost}"); + } +} + +fn std_rng() -> impl RngCore + Clone { + StdRng::seed_from_u64(0) +} + +#[allow(dead_code)] +fn save_generated(verifier: &str, vk: Option<&str>) { + const DIR_GENERATED: &str = "./target/generated"; + + std::fs::create_dir_all(DIR_GENERATED).unwrap(); + File::create(format!("{DIR_GENERATED}/Halo2Verifier.sol")) + .unwrap() + .write_all(verifier.as_bytes()) + .unwrap(); + if let Some(vk) = vk { + File::create(format!("{DIR_GENERATED}/Halo2VerifyingKey.sol")) + .unwrap() + .write_all(vk.as_bytes()) + .unwrap(); + } +} + +mod halo2 { + use crate::{codegen::AccumulatorEncoding, transcript::Keccak256Transcript}; + use halo2_proofs::{ + arithmetic::CurveAffine, + halo2curves::{ + bn256, + ff::{Field, PrimeField}, + group::{prime::PrimeCurveAffine, Curve, Group}, + pairing::{MillerLoopResult, MultiMillerLoop}, + }, + plonk::{create_proof, keygen_pk, keygen_vk, verify_proof, Circuit, VerifyingKey}, + poly::kzg::{ + commitment::ParamsKZG, + multiopen::{ProverSHPLONK, VerifierSHPLONK}, + strategy::SingleStrategy, + }, + transcript::TranscriptWriterBuffer, + }; + use itertools::Itertools; + use rand::RngCore; + use ruint::aliases::U256; + use std::borrow::Borrow; + + pub trait TestCircuit: Circuit { + fn min_k() -> u32; + + fn new(acc_encoding: Option, rng: impl RngCore) -> Self; + + fn instances(&self) -> Vec; + } + + #[allow(clippy::type_complexity)] + pub fn create_testdata_bdfg21>( + k: u32, + acc_encoding: Option, + mut rng: impl RngCore + Clone, + ) -> ( + ParamsKZG, + VerifyingKey, + Vec, + Vec, + ) { + let circuit = C::new(acc_encoding, rng.clone()); + let instances = circuit.instances(); + + let params = ParamsKZG::::setup(k, &mut rng); + let vk = keygen_vk(¶ms, &circuit).unwrap(); + let pk = keygen_pk(¶ms, vk.clone(), &circuit).unwrap(); + + let proof = { + let mut transcript = Keccak256Transcript::new(Vec::new()); + create_proof::<_, ProverSHPLONK<_>, _, _, _, _>( + ¶ms, + &pk, + &[circuit], + &[&[&instances]], + &mut rng, + &mut transcript, + ) + .unwrap(); + transcript.finalize() + }; + + let result = { + let mut transcript = Keccak256Transcript::new(proof.as_slice()); + verify_proof::<_, VerifierSHPLONK<_>, _, _, SingleStrategy<_>>( + ¶ms, + pk.get_vk(), + SingleStrategy::new(¶ms), + &[&[&instances]], + &mut transcript, + ) + }; + assert!(result.is_ok()); + + (params, vk, instances, proof) + } + + fn random_accumulator_limbs( + acc_encoding: AccumulatorEncoding, + mut rng: impl RngCore, + ) -> Vec + where + M: MultiMillerLoop, + ::Base: PrimeField, + M::Scalar: PrimeField, + { + let s = M::Scalar::random(&mut rng); + let g1 = M::G1Affine::generator(); + let g2 = M::G2Affine::generator(); + let neg_s_g2 = (g2 * -s).to_affine(); + let lhs_scalar = M::Scalar::random(&mut rng); + let rhs_scalar = lhs_scalar * s.invert().unwrap(); + let [lhs, rhs] = [lhs_scalar, rhs_scalar].map(|scalar| (g1 * scalar).to_affine()); + + assert!(bool::from( + M::multi_miller_loop(&[(&lhs, &g2.into()), (&rhs, &neg_s_g2.into())]) + .final_exponentiation() + .is_identity() + )); + + [lhs, rhs] + .into_iter() + .flat_map(|ec_point| ec_point_to_limbs(ec_point, acc_encoding.num_limb_bits)) + .collect() + } + + fn ec_point_to_limbs(ec_point: impl Borrow, num_limb_bits: usize) -> Vec + where + C: CurveAffine, + C::Base: PrimeField, + C::Scalar: PrimeField, + { + let coords = ec_point.borrow().coordinates().unwrap(); + [*coords.x(), *coords.y()] + .into_iter() + .flat_map(|coord| fe_to_limbs(coord, num_limb_bits)) + .collect() + } + + fn fe_to_limbs(fe: impl Borrow, num_limb_bits: usize) -> Vec + where + F1: PrimeField, + F2: PrimeField, + { + let big = U256::from_le_bytes(fe.borrow().to_repr()); + let mask = &((U256::from(1) << num_limb_bits) - U256::from(1)); + (0usize..) + .step_by(num_limb_bits) + .map(|shift| fe_from_u256((big >> shift) & mask)) + .take((F1::NUM_BITS as usize + num_limb_bits - 1) / num_limb_bits) + .collect_vec() + } + + fn fe_from_u256(u256: impl Borrow) -> F + where + F: PrimeField, + { + let bytes = u256.borrow().to_le_bytes::<32>(); + F::from_repr_vartime(bytes).unwrap() + } + + pub mod huge { + use crate::{ + codegen::AccumulatorEncoding, + test::halo2::{random_accumulator_limbs, TestCircuit}, + }; + use halo2_proofs::{ + arithmetic::CurveAffine, + circuit::{Layouter, SimpleFloorPlanner, Value}, + halo2curves::{ + ff::{Field, PrimeField}, + pairing::MultiMillerLoop, + }, + plonk::{ + self, Advice, Circuit, Column, ConstraintSystem, Expression, FirstPhase, Fixed, + Instance, SecondPhase, Selector, ThirdPhase, + }, + poly::Rotation, + }; + use itertools::{izip, Itertools}; + use rand::RngCore; + use std::{array, fmt::Debug, iter, mem}; + + #[derive(Clone, Debug, Default)] + pub struct HugeCircuit(Vec); + + impl TestCircuit for HugeCircuit + where + M: MultiMillerLoop, + ::Base: PrimeField, + M::Scalar: PrimeField, + { + fn min_k() -> u32 { + 6 + } + + fn new(acc_encoding: Option, mut rng: impl RngCore) -> Self { + let instances = if let Some(acc_encoding) = acc_encoding { + random_accumulator_limbs::(acc_encoding, rng) + } else { + iter::repeat_with(|| M::Scalar::random(&mut rng)) + .take(10) + .collect() + }; + Self(instances) + } + + fn instances(&self) -> Vec { + self.0.clone() + } + } + + impl Circuit for HugeCircuit + where + M::Scalar: PrimeField, + { + type Config = ( + [Selector; 10], + [Selector; 10], + [Column; 10], + [Column; 10], + Column, + ); + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "halo2_circuit_params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + unimplemented!() + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + let selectors = [(); 10].map(|_| meta.selector()); + let complex_selectors = [(); 10].map(|_| meta.complex_selector()); + let fixeds = [(); 10].map(|_| meta.fixed_column()); + let (advices, challenges) = (0..10) + .map(|idx| match idx % 3 { + 0 => ( + meta.advice_column_in(FirstPhase), + meta.challenge_usable_after(FirstPhase), + ), + 1 => ( + meta.advice_column_in(SecondPhase), + meta.challenge_usable_after(SecondPhase), + ), + 2 => ( + meta.advice_column_in(ThirdPhase), + meta.challenge_usable_after(ThirdPhase), + ), + _ => unreachable!(), + }) + .unzip::<_, _, Vec<_>, Vec<_>>(); + let advices: [_; 10] = advices.try_into().unwrap(); + let challenges: [_; 10] = challenges.try_into().unwrap(); + let instance = meta.instance_column(); + + meta.create_gate("", |meta| { + let selectors = selectors.map(|selector| meta.query_selector(selector)); + let advices: [Expression; 10] = array::from_fn(|idx| { + let rotation = Rotation((idx as i32 - advices.len() as i32) / 2); + meta.query_advice(advices[idx], rotation) + }); + let challenges = challenges.map(|challenge| meta.query_challenge(challenge)); + + izip!( + selectors, + advices.iter().cloned(), + advices[1..].iter().cloned(), + advices[2..].iter().cloned(), + challenges.iter().cloned(), + challenges[1..].iter().cloned(), + challenges[2..].iter().cloned(), + ) + .map(|(q, a1, a2, a3, c1, c2, c3)| q * a1 * a2 * a3 * c1 * c2 * c3) + .collect_vec() + }); + + for ((q1, q2, q3), (f1, f2, f3), (a1, a2, a3)) in izip!( + complex_selectors.iter().tuple_windows(), + fixeds.iter().tuple_windows(), + advices.iter().tuple_windows() + ) { + meta.lookup_any("", |meta| { + izip!([q1, q2, q3], [f1, f2, f3], [a1, a2, a3]) + .map(|(q, f, a)| { + let q = meta.query_selector(*q); + let f = meta.query_fixed(*f, Rotation::cur()); + let a = meta.query_advice(*a, Rotation::cur()); + (q * a, f) + }) + .collect_vec() + }); + } + + fixeds.map(|column| meta.enable_equality(column)); + advices.map(|column| meta.enable_equality(column)); + meta.enable_equality(instance); + + (selectors, complex_selectors, fixeds, advices, instance) + } + + fn synthesize( + &self, + (selectors, complex_selectors, fixeds, advices, instance): Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), plonk::Error> { + let assigneds = layouter.assign_region( + || "", + |mut region| { + let offset = &mut 10; + let mut next_offset = || mem::replace(offset, *offset + 1); + + for q in selectors { + q.enable(&mut region, next_offset())?; + } + for q in complex_selectors { + q.enable(&mut region, next_offset())?; + } + for (idx, column) in izip!(1.., fixeds) { + let value = Value::known(M::Scalar::from(idx)); + region.assign_fixed(|| "", column, next_offset(), || value)?; + } + izip!(advices, &self.0) + .map(|(column, value)| { + let value = Value::known(*value); + region.assign_advice(|| "", column, next_offset(), || value) + }) + .try_collect::<_, Vec<_>, _>() + }, + )?; + for (idx, assigned) in izip!(0.., assigneds) { + layouter.constrain_instance(assigned.cell(), instance, idx)?; + } + Ok(()) + } + } + } + + pub mod maingate { + use crate::{ + codegen::AccumulatorEncoding, + test::halo2::{random_accumulator_limbs, TestCircuit}, + }; + use halo2_maingate::{ + MainGate, MainGateConfig, MainGateInstructions, RangeChip, RangeConfig, + RangeInstructions, RegionCtx, + }; + use halo2_proofs::{ + arithmetic::CurveAffine, + circuit::{Layouter, SimpleFloorPlanner, Value}, + halo2curves::{ + ff::{Field, PrimeField}, + pairing::MultiMillerLoop, + }, + plonk::{Circuit, ConstraintSystem, Error}, + }; + use itertools::Itertools; + use rand::RngCore; + use std::iter; + + #[derive(Clone)] + pub struct MainGateWithRangeConfig { + main_gate_config: MainGateConfig, + range_config: RangeConfig, + } + + impl MainGateWithRangeConfig { + fn configure( + meta: &mut ConstraintSystem, + composition_bits: Vec, + overflow_bits: Vec, + ) -> Self { + let main_gate_config = MainGate::::configure(meta); + let range_config = RangeChip::::configure( + meta, + &main_gate_config, + composition_bits, + overflow_bits, + ); + MainGateWithRangeConfig { + main_gate_config, + range_config, + } + } + + fn main_gate(&self) -> MainGate { + MainGate::new(self.main_gate_config.clone()) + } + + fn range_chip(&self) -> RangeChip { + RangeChip::new(self.range_config.clone()) + } + } + + #[derive(Clone, Default)] + pub struct MainGateWithRange { + instances: Vec, + } + + impl TestCircuit for MainGateWithRange + where + M: MultiMillerLoop, + ::Base: PrimeField, + M::Scalar: PrimeField, + { + fn min_k() -> u32 { + 9 + } + + fn new(acc_encoding: Option, mut rng: impl RngCore) -> Self { + let instances = if let Some(acc_encoding) = acc_encoding { + random_accumulator_limbs::(acc_encoding, rng) + } else { + iter::repeat_with(|| M::Scalar::random(&mut rng)) + .take(10) + .collect() + }; + Self { instances } + } + + fn instances(&self) -> Vec { + self.instances.clone() + } + } + + impl Circuit for MainGateWithRange + where + M::Scalar: PrimeField, + { + type Config = MainGateWithRangeConfig; + type FloorPlanner = SimpleFloorPlanner; + #[cfg(feature = "halo2_circuit_params")] + type Params = (); + + fn without_witnesses(&self) -> Self { + unimplemented!() + } + + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + MainGateWithRangeConfig::configure(meta, vec![8], vec![4, 7]) + } + + fn synthesize( + &self, + config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let main_gate = config.main_gate(); + let range_chip = config.range_chip(); + range_chip.load_table(&mut layouter)?; + + let advices = layouter.assign_region( + || "", + |region| { + let mut ctx = RegionCtx::new(region, 0); + + let advices = self + .instances + .iter() + .map(|value| main_gate.assign_value(&mut ctx, Value::known(*value))) + .try_collect::<_, Vec<_>, _>()?; + + // Dummy gates to make all fixed column with values + range_chip.decompose( + &mut ctx, + Value::known(M::Scalar::from(u64::MAX)), + 8, + 64, + )?; + range_chip.decompose( + &mut ctx, + Value::known(M::Scalar::from(u32::MAX as u64)), + 8, + 39, + )?; + let a = &advices[0]; + let b = main_gate.sub_sub_with_constant( + &mut ctx, + a, + a, + a, + M::Scalar::from(2), + )?; + let cond = main_gate.assign_bit(&mut ctx, Value::known(M::Scalar::ONE))?; + main_gate.select(&mut ctx, a, &b, &cond)?; + + Ok(advices) + }, + )?; + + for (offset, advice) in advices.into_iter().enumerate() { + main_gate.expose_public(layouter.namespace(|| ""), advice, offset)? + } + + Ok(()) + } + } + } +} diff --git a/src/transcript.rs b/src/transcript.rs new file mode 100644 index 0000000..f125329 --- /dev/null +++ b/src/transcript.rs @@ -0,0 +1,192 @@ +use halo2_proofs::{ + halo2curves::{ff::PrimeField, Coordinates, CurveAffine}, + transcript::{ + EncodedChallenge, Transcript, TranscriptRead, TranscriptReadBuffer, TranscriptWrite, + TranscriptWriterBuffer, + }, +}; +use itertools::{chain, Itertools}; +use ruint::aliases::U256; +use sha3::{Digest, Keccak256}; +use std::{ + io::{self, Read, Write}, + marker::PhantomData, + mem, +}; + +/// Transcript using Keccak256 as hash function in Fiat-Shamir transformation. +#[derive(Debug, Default)] +pub struct Keccak256Transcript { + stream: S, + buf: Vec, + _marker: PhantomData, +} + +impl Keccak256Transcript { + /// Return a `Keccak256Transcript` with empty buffer. + pub fn new(stream: S) -> Self { + Self { + stream, + buf: Vec::new(), + _marker: PhantomData, + } + } +} + +#[derive(Debug)] +pub struct ChallengeEvm(C::Scalar) +where + C: CurveAffine, + C::Scalar: PrimeField; + +impl EncodedChallenge for ChallengeEvm +where + C: CurveAffine, + C::Scalar: PrimeField, +{ + type Input = [u8; 0x20]; + + fn new(challenge_input: &[u8; 0x20]) -> Self { + ChallengeEvm(u256_to_fe(U256::from_be_bytes(*challenge_input))) + } + + fn get_scalar(&self) -> C::Scalar { + self.0 + } +} + +impl Transcript> for Keccak256Transcript +where + C: CurveAffine, + C::Scalar: PrimeField, +{ + fn squeeze_challenge(&mut self) -> ChallengeEvm { + let buf_len = self.buf.len(); + let data = chain![ + mem::take(&mut self.buf), + if buf_len == 0x20 { Some(1) } else { None } + ] + .collect_vec(); + let hash: [u8; 0x20] = Keccak256::digest(data).into(); + self.buf = hash.to_vec(); + ChallengeEvm::new(&hash) + } + + fn common_point(&mut self, ec_point: C) -> io::Result<()> { + let coords: Coordinates = Option::from(ec_point.coordinates()).ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "Invalid elliptic curve point".to_string(), + ) + })?; + [coords.x(), coords.y()].map(|coordinate| { + self.buf + .extend(coordinate.to_repr().as_ref().iter().rev().cloned()); + }); + Ok(()) + } + + fn common_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + self.buf.extend(scalar.to_repr().as_ref().iter().rev()); + Ok(()) + } +} + +impl TranscriptRead> for Keccak256Transcript +where + C: CurveAffine, + C::Scalar: PrimeField, +{ + fn read_point(&mut self) -> io::Result { + let mut reprs = [::Repr::default(); 2]; + for repr in &mut reprs { + self.stream.read_exact(repr.as_mut())?; + repr.as_mut().reverse(); + } + let [x, y] = reprs.map(|repr| Option::from(C::Base::from_repr(repr))); + let ec_point = x + .zip(y) + .and_then(|(x, y)| Option::from(C::from_xy(x, y))) + .ok_or_else(|| { + io::Error::new( + io::ErrorKind::Other, + "Invalid elliptic curve point".to_string(), + ) + })?; + self.common_point(ec_point)?; + Ok(ec_point) + } + + fn read_scalar(&mut self) -> io::Result { + let mut data = [0; 0x20]; + self.stream.read_exact(data.as_mut())?; + data.reverse(); + let scalar = C::Scalar::from_repr_vartime(data) + .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Invalid scalar".to_string()))?; + Transcript::>::common_scalar(self, scalar)?; + Ok(scalar) + } +} + +impl TranscriptReadBuffer> for Keccak256Transcript +where + C: CurveAffine, + C::Scalar: PrimeField, +{ + fn init(reader: R) -> Self { + Keccak256Transcript::new(reader) + } +} + +impl TranscriptWrite> for Keccak256Transcript +where + C: CurveAffine, + C::Scalar: PrimeField, +{ + fn write_point(&mut self, ec_point: C) -> io::Result<()> { + self.common_point(ec_point)?; + let coords = ec_point.coordinates().unwrap(); + for coord in [coords.x(), coords.y()] { + let mut repr = coord.to_repr(); + repr.as_mut().reverse(); + self.stream.write_all(repr.as_ref())?; + } + Ok(()) + } + + fn write_scalar(&mut self, scalar: C::Scalar) -> io::Result<()> { + Transcript::>::common_scalar(self, scalar)?; + let mut data = scalar.to_repr(); + data.as_mut().reverse(); + self.stream.write_all(data.as_ref()) + } +} + +impl TranscriptWriterBuffer> for Keccak256Transcript +where + C: CurveAffine, + C::Scalar: PrimeField, +{ + fn init(writer: W) -> Self { + Keccak256Transcript::new(writer) + } + + fn finalize(self) -> W { + self.stream + } +} + +fn u256_to_fe(value: U256) -> F +where + F: PrimeField, +{ + let value = value % modulus::(); + F::from_repr(value.to_le_bytes::<0x20>()).unwrap() +} + +fn modulus() -> U256 +where + F: PrimeField, +{ + U256::from_le_bytes((-F::ONE).to_repr()) + U256::from(1) +} diff --git a/templates/Halo2Verifier.sol b/templates/Halo2Verifier.sol new file mode 100644 index 0000000..bd917bb --- /dev/null +++ b/templates/Halo2Verifier.sol @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +contract Halo2Verifier { + uint256 internal constant PROOF_LEN_CPTR = {{ proof_cptr - 1 }}; + uint256 internal constant PROOF_CPTR = {{ proof_cptr }}; + uint256 internal constant NUM_INSTANCE_CPTR = {{ proof_cptr + (proof_len / 32) }}; + uint256 internal constant INSTANCE_CPTR = {{ proof_cptr + (proof_len / 32) + 1 }}; + + uint256 internal constant FIRST_QUOTIENT_X_CPTR = {{ quotient_comm_cptr }}; + uint256 internal constant LAST_QUOTIENT_X_CPTR = {{ quotient_comm_cptr + 2 * (num_quotients - 1) }}; + + uint256 internal constant VK_MPTR = {{ vk_mptr }}; + uint256 internal constant VK_DIGEST_MPTR = {{ vk_mptr }}; + uint256 internal constant K_MPTR = {{ vk_mptr + 1 }}; + uint256 internal constant N_INV_MPTR = {{ vk_mptr + 2 }}; + uint256 internal constant OMEGA_MPTR = {{ vk_mptr + 3 }}; + uint256 internal constant OMEGA_INV_MPTR = {{ vk_mptr + 4 }}; + uint256 internal constant OMEGA_INV_TO_L_MPTR = {{ vk_mptr + 5 }}; + uint256 internal constant NUM_INSTANCES_MPTR = {{ vk_mptr + 6 }}; + uint256 internal constant HAS_ACCUMULATOR_MPTR = {{ vk_mptr + 7 }}; + uint256 internal constant ACC_OFFSET_MPTR = {{ vk_mptr + 8 }}; + uint256 internal constant NUM_ACC_LIMBS_MPTR = {{ vk_mptr + 9 }}; + uint256 internal constant NUM_ACC_LIMB_BITS_MPTR = {{ vk_mptr + 10 }}; + uint256 internal constant G1_X_MPTR = {{ vk_mptr + 11 }}; + uint256 internal constant G1_Y_MPTR = {{ vk_mptr + 12 }}; + uint256 internal constant G2_X_1_MPTR = {{ vk_mptr + 13 }}; + uint256 internal constant G2_X_2_MPTR = {{ vk_mptr + 14 }}; + uint256 internal constant G2_Y_1_MPTR = {{ vk_mptr + 15 }}; + uint256 internal constant G2_Y_2_MPTR = {{ vk_mptr + 16 }}; + uint256 internal constant NEG_S_G2_X_1_MPTR = {{ vk_mptr + 17 }}; + uint256 internal constant NEG_S_G2_X_2_MPTR = {{ vk_mptr + 18 }}; + uint256 internal constant NEG_S_G2_Y_1_MPTR = {{ vk_mptr + 19 }}; + uint256 internal constant NEG_S_G2_Y_2_MPTR = {{ vk_mptr + 20 }}; + + uint256 internal constant CHALLENGE_MPTR = {{ challenge_mptr }}; + + uint256 internal constant THETA_MPTR = {{ theta_mptr }}; + uint256 internal constant BETA_MPTR = {{ theta_mptr + 1 }}; + uint256 internal constant GAMMA_MPTR = {{ theta_mptr + 2 }}; + uint256 internal constant Y_MPTR = {{ theta_mptr + 3 }}; + uint256 internal constant X_MPTR = {{ theta_mptr + 4 }}; + {%- match scheme %} + {%- when Bdfg21 %} + uint256 internal constant ZETA_MPTR = {{ theta_mptr + 5 }}; + uint256 internal constant NU_MPTR = {{ theta_mptr + 6 }}; + uint256 internal constant MU_MPTR = {{ theta_mptr + 7 }}; + {%- when Gwc19 %} + // TODO + {%- endmatch %} + + uint256 internal constant ACC_LHS_X_MPTR = {{ theta_mptr + 8 }}; + uint256 internal constant ACC_LHS_Y_MPTR = {{ theta_mptr + 9 }}; + uint256 internal constant ACC_RHS_X_MPTR = {{ theta_mptr + 10 }}; + uint256 internal constant ACC_RHS_Y_MPTR = {{ theta_mptr + 11 }}; + uint256 internal constant X_N_MPTR = {{ theta_mptr + 12 }}; + uint256 internal constant X_N_MINUS_1_INV_MPTR = {{ theta_mptr + 13 }}; + uint256 internal constant L_LAST_MPTR = {{ theta_mptr + 14 }}; + uint256 internal constant L_BLIND_MPTR = {{ theta_mptr + 15 }}; + uint256 internal constant L_0_MPTR = {{ theta_mptr + 16 }}; + uint256 internal constant INSTANCE_EVAL_MPTR = {{ theta_mptr + 17 }}; + uint256 internal constant QUOTIENT_EVAL_MPTR = {{ theta_mptr + 18 }}; + uint256 internal constant QUOTIENT_X_MPTR = {{ theta_mptr + 19 }}; + uint256 internal constant QUOTIENT_Y_MPTR = {{ theta_mptr + 20 }}; + uint256 internal constant R_EVAL_MPTR = {{ theta_mptr + 21 }}; + uint256 internal constant PAIRING_LHS_X_MPTR = {{ theta_mptr + 22 }}; + uint256 internal constant PAIRING_LHS_Y_MPTR = {{ theta_mptr + 23 }}; + uint256 internal constant PAIRING_RHS_X_MPTR = {{ theta_mptr + 24 }}; + uint256 internal constant PAIRING_RHS_Y_MPTR = {{ theta_mptr + 25 }}; + + function verifyProof( + {%- match vk %} + {%- when Some with (vk) %} + {%- when None %} + address vk, + {%- endmatch %} + bytes calldata proof, + uint256[] calldata instances + ) public returns (bool) { + assembly { + // Read EC point (x, y) at (proof_cptr, proof_cptr + 0x20), + // and check if the point is on affine plane, + // and store them in (hash_mptr, hash_mptr + 0x20). + // Return updated (success, proof_cptr, hash_mptr). + function read_ec_point(success, proof_cptr, hash_mptr, q) -> ret0, ret1, ret2 { + let x := calldataload(proof_cptr) + let y := calldataload(add(proof_cptr, 0x20)) + ret0 := and(success, lt(x, q)) + ret0 := and(ret0, lt(y, q)) + ret0 := and(ret0, eq(mulmod(y, y, q), addmod(mulmod(x, mulmod(x, x, q), q), 3, q))) + mstore(hash_mptr, x) + mstore(add(hash_mptr, 0x20), y) + ret1 := add(proof_cptr, 0x40) + ret2 := add(hash_mptr, 0x40) + } + + // Squeeze challenge by keccak256(memory[0..hash_mptr]), + // and store hash mod r as challenge in challenge_mptr, + // and push back hash in 0x00 as the first input for next squeeze. + // Return updated (challenge_mptr, hash_mptr). + function squeeze_challenge(challenge_mptr, hash_mptr, r) -> ret0, ret1 { + let hash := keccak256(0x00, hash_mptr) + mstore(challenge_mptr, mod(hash, r)) + mstore(0x00, hash) + ret0 := add(challenge_mptr, 0x20) + ret1 := 0x20 + } + + // Squeeze challenge without absorbing new input from calldata, + // by putting an extra 0x01 in memory[0x20] and squeeze by keccak256(memory[0..21]), + // and store hash mod r as challenge in challenge_mptr, + // and push back hash in 0x00 as the first input for next squeeze. + // Return updated (challenge_mptr). + function squeeze_challenge_cont(challenge_mptr, r) -> ret { + mstore8(0x20, 0x01) + let hash := keccak256(0x00, 0x21) + mstore(challenge_mptr, mod(hash, r)) + mstore(0x00, hash) + ret := add(challenge_mptr, 0x20) + } + + // Batch invert values in memory[mptr_start..mptr_end] in place. + // Return updated (success). + function batch_invert(success, mptr_start, mptr_end, r) -> ret { + let gp_mptr := mptr_end + let gp := mload(mptr_start) + let mptr := add(mptr_start, 0x20) + for + {} + lt(mptr, sub(mptr_end, 0x20)) + {} + { + gp := mulmod(gp, mload(mptr), r) + mstore(gp_mptr, gp) + mptr := add(mptr, 0x20) + gp_mptr := add(gp_mptr, 0x20) + } + gp := mulmod(gp, mload(mptr), r) + + mstore(gp_mptr, 0x20) + mstore(add(gp_mptr, 0x20), 0x20) + mstore(add(gp_mptr, 0x40), 0x20) + mstore(add(gp_mptr, 0x60), gp) + mstore(add(gp_mptr, 0x80), sub(r, 2)) + mstore(add(gp_mptr, 0xa0), r) + ret := and(success, staticcall(gas(), 0x05, gp_mptr, 0xc0, gp_mptr, 0x20)) + let all_inv := mload(gp_mptr) + + let first_mptr := mptr_start + let second_mptr := add(first_mptr, 0x20) + gp_mptr := sub(gp_mptr, 0x20) + for + {} + lt(second_mptr, mptr) + {} + { + let inv := mulmod(all_inv, mload(gp_mptr), r) + all_inv := mulmod(all_inv, mload(mptr), r) + mstore(mptr, inv) + mptr := sub(mptr, 0x20) + gp_mptr := sub(gp_mptr, 0x20) + } + let inv_first := mulmod(all_inv, mload(second_mptr), r) + let inv_second := mulmod(all_inv, mload(first_mptr), r) + mstore(first_mptr, inv_first) + mstore(second_mptr, inv_second) + } + + // Add (x, y) into point at (0x00, 0x20). + // Return updated (success). + function ec_add_acc(success, x, y) -> ret { + mstore(0x40, x) + mstore(0x60, y) + ret := and(success, staticcall(gas(), 0x06, 0x00, 0x80, 0x00, 0x40)) + } + + // Scale point at (0x00, 0x20) by scalar. + function ec_mul_acc(success, scalar) -> ret { + mstore(0x40, scalar) + ret := and(success, staticcall(gas(), 0x07, 0x00, 0x60, 0x00, 0x40)) + } + + // Add (x, y) into point at (0x80, 0xa0). + // Return updated (success). + function ec_add_tmp(success, x, y) -> ret { + mstore(0xc0, x) + mstore(0xe0, y) + ret := and(success, staticcall(gas(), 0x06, 0x80, 0x80, 0x80, 0x40)) + } + + // Scale point at (0x80, 0xa0) by scalar. + // Return updated (success). + function ec_mul_tmp(success, scalar) -> ret { + mstore(0xc0, scalar) + ret := and(success, staticcall(gas(), 0x07, 0x80, 0x60, 0x80, 0x40)) + } + + // Perform pairing check. + // Return updated (success). + function ec_pairing(success, lhs_x, lhs_y, rhs_x, rhs_y) -> ret { + mstore(0x00, lhs_x) + mstore(0x20, lhs_y) + mstore(0x40, mload(G2_X_1_MPTR)) + mstore(0x60, mload(G2_X_2_MPTR)) + mstore(0x80, mload(G2_Y_1_MPTR)) + mstore(0xa0, mload(G2_Y_2_MPTR)) + mstore(0xc0, rhs_x) + mstore(0xe0, rhs_y) + mstore(0x100, mload(NEG_S_G2_X_1_MPTR)) + mstore(0x120, mload(NEG_S_G2_X_2_MPTR)) + mstore(0x140, mload(NEG_S_G2_Y_1_MPTR)) + mstore(0x160, mload(NEG_S_G2_Y_2_MPTR)) + ret := and(success, staticcall(gas(), 0x08, 0x00, 0x180, 0x00, 0x20)) + ret := and(ret, mload(0x00)) + } + + // Modulus + let q := 21888242871839275222246405745257275088696311157297823662689037894645226208583 // BN254 base field + let r := 21888242871839275222246405745257275088548364400416034343698204186575808495617 // BN254 scalar field + + // Initialize success as true + let success := true + + { + {%- match vk %} + {%- when Some with (vk) %} + // Load vk into memory + {%- for (name, chunk) in vk.constants %} + mstore({{ vk_mptr + loop.index0 }}, {{ chunk|hex_padded(64) }}) // {{ name }} + {%- endfor %} + {%- for (x, y) in vk.fixed_comms %} + {%- let offset = vk.constants.len() %} + mstore({{ vk_mptr + offset + 2 * loop.index0 }}, {{ x|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].x + mstore({{ vk_mptr + offset + 2 * loop.index0 + 1 }}, {{ y|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].y + {%- endfor %} + {%- for (x, y) in vk.permutation_comms %} + {%- let offset = vk.constants.len() + 2 * vk.fixed_comms.len() %} + mstore({{ vk_mptr + offset + 2 * loop.index0 }}, {{ x|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].x + mstore({{ vk_mptr + offset + 2 * loop.index0 + 1 }}, {{ y|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].y + {%- endfor %} + {%- when None %} + // Copy vk into memory + extcodecopy(vk, VK_MPTR, 0x00, {{ vk_len|hex() }}) + {%- endmatch %} + + // Check valid length of proof + success := and(success, eq({{ proof_len|hex() }}, calldataload(PROOF_LEN_CPTR))) + + // Check valid length of instances + let num_instances := mload(NUM_INSTANCES_MPTR) + success := and(success, eq(num_instances, calldataload(NUM_INSTANCE_CPTR))) + + // Absorb vk diegst + mstore(0x00, mload(VK_DIGEST_MPTR)) + + // Read instances and witness commitments and generate challenges + let hash_mptr := 0x20 + let instance_cptr := INSTANCE_CPTR + for + { let instance_cptr_end := add(instance_cptr, mul(0x20, num_instances)) } + lt(instance_cptr, instance_cptr_end) + {} + { + let instance := calldataload(instance_cptr) + success := and(success, lt(instance, r)) + mstore(hash_mptr, instance) + instance_cptr := add(instance_cptr, 0x20) + hash_mptr := add(hash_mptr, 0x20) + } + + let proof_cptr := PROOF_CPTR + let challenge_mptr := CHALLENGE_MPTR + {%- for num_advices in num_advices %} + {%- let num_challenges = num_challenges[loop.index0] %} + + // Phase {{ loop.index }} + for + { let proof_cptr_end := add(proof_cptr, {{ (2 * 32 * num_advices)|hex() }}) } + lt(proof_cptr, proof_cptr_end) + {} + { + success, proof_cptr, hash_mptr := read_ec_point(success, proof_cptr, hash_mptr, q) + } + + challenge_mptr, hash_mptr := squeeze_challenge(challenge_mptr, hash_mptr, r) + {%- for _ in 0..num_challenges - 1 %} + challenge_mptr := squeeze_challenge_cont(challenge_mptr, r) + {%- endfor %} + {%- endfor %} + + // Read evaluations + for + { let proof_cptr_end := add(proof_cptr, {{ (32 * num_evals)|hex() }}) } + lt(proof_cptr, proof_cptr_end) + {} + { + let eval := calldataload(proof_cptr) + success := and(success, lt(eval, r)) + mstore(hash_mptr, eval) + proof_cptr := add(proof_cptr, 0x20) + hash_mptr := add(hash_mptr, 0x20) + } + + // Read batch opening proof and generate challenges + {%- match scheme %} + {%- when Bdfg21 %} + challenge_mptr, hash_mptr := squeeze_challenge(challenge_mptr, hash_mptr, r) // zeta + challenge_mptr := squeeze_challenge_cont(challenge_mptr, r) // nu + + success, proof_cptr, hash_mptr := read_ec_point(success, proof_cptr, hash_mptr, q) // W + + challenge_mptr, hash_mptr := squeeze_challenge(challenge_mptr, hash_mptr, r) // mu + + success, proof_cptr, hash_mptr := read_ec_point(success, proof_cptr, hash_mptr, q) // W' + {%- when Gwc19 %} + // TODO + {%- endmatch %} + + // Read accumulator from instances + if mload(HAS_ACCUMULATOR_MPTR) { + let num_limbs := mload(NUM_ACC_LIMBS_MPTR) + let num_limb_bits := mload(NUM_ACC_LIMB_BITS_MPTR) + + let cptr := add(INSTANCE_CPTR, mul(mload(ACC_OFFSET_MPTR), 0x20)) + let lhs_y_off := mul(num_limbs, 0x20) + let rhs_x_off := mul(lhs_y_off, 2) + let rhs_y_off := mul(lhs_y_off, 3) + let lhs_x := calldataload(cptr) + let lhs_y := calldataload(add(cptr, lhs_y_off)) + let rhs_x := calldataload(add(cptr, rhs_x_off)) + let rhs_y := calldataload(add(cptr, rhs_y_off)) + for + { + let cptr_end := add(cptr, mul(0x20, num_limbs)) + let shift := num_limb_bits + } + lt(cptr, cptr_end) + {} + { + cptr := add(cptr, 0x20) + lhs_x := add(lhs_x, shl(shift, calldataload(cptr))) + lhs_y := add(lhs_y, shl(shift, calldataload(add(cptr, lhs_y_off)))) + rhs_x := add(rhs_x, shl(shift, calldataload(add(cptr, rhs_x_off)))) + rhs_y := add(rhs_y, shl(shift, calldataload(add(cptr, rhs_y_off)))) + shift := add(shift, num_limb_bits) + } + + success := and(success, eq(mulmod(lhs_y, lhs_y, q), addmod(mulmod(lhs_x, mulmod(lhs_x, lhs_x, q), q), 3, q))) + success := and(success, eq(mulmod(rhs_y, rhs_y, q), addmod(mulmod(rhs_x, mulmod(rhs_x, rhs_x, q), q), 3, q))) + + mstore(ACC_LHS_X_MPTR, lhs_x) + mstore(ACC_LHS_Y_MPTR, lhs_y) + mstore(ACC_RHS_X_MPTR, rhs_x) + mstore(ACC_RHS_Y_MPTR, rhs_y) + } + + pop(q) + } + + // Revert earlier if anything from calldata is invalid + if iszero(success) { + revert(0, 0) + } + + // Compute lagrange evaluations and instance evaluation + { + let k := mload(K_MPTR) + let x := mload(X_MPTR) + let x_n := x + for + { let idx := 0 } + lt(idx, k) + { idx := add(idx, 1) } + { + x_n := mulmod(x_n, x_n, r) + } + + let omega := mload(OMEGA_MPTR) + + let mptr := X_N_MPTR + let mptr_end := add(mptr, mul(0x20, add(mload(NUM_INSTANCES_MPTR), {{ num_neg_lagranges }}))) + for + { let pow_of_omega := mload(OMEGA_INV_TO_L_MPTR) } + lt(mptr, mptr_end) + { mptr := add(mptr, 0x20) } + { + mstore(mptr, addmod(x, sub(r, pow_of_omega), r)) + pow_of_omega := mulmod(pow_of_omega, omega, r) + } + let x_n_minus_1 := addmod(x_n, sub(r, 1), r) + mstore(mptr_end, x_n_minus_1) + success := batch_invert(success, X_N_MPTR, add(mptr_end, 0x20), r) + + mptr := X_N_MPTR + let l_i_common := mulmod(x_n_minus_1, mload(N_INV_MPTR), r) + for + { let pow_of_omega := mload(OMEGA_INV_TO_L_MPTR) } + lt(mptr, mptr_end) + { mptr := add(mptr, 0x20) } + { + mstore(mptr, mulmod(l_i_common, mulmod(mload(mptr), pow_of_omega, r), r)) + pow_of_omega := mulmod(pow_of_omega, omega, r) + } + + let l_blind := mload(add(X_N_MPTR, 0x20)) + let l_i_cptr := add(X_N_MPTR, 0x40) + for + { let l_i_cptr_end := add(X_N_MPTR, {{ (num_neg_lagranges * 32)|hex() }}) } + lt(l_i_cptr, l_i_cptr_end) + { l_i_cptr := add(l_i_cptr, 0x20) } + { + l_blind := addmod(l_blind, mload(l_i_cptr), r) + } + + let instance_eval := mulmod(mload(l_i_cptr), calldataload(INSTANCE_CPTR), r) + let instance_cptr := add(INSTANCE_CPTR, 0x20) + l_i_cptr := add(l_i_cptr, 0x20) + for + { let instance_cptr_end := add(INSTANCE_CPTR, mul(0x20, mload(NUM_INSTANCES_MPTR))) } + lt(instance_cptr, instance_cptr_end) + { + instance_cptr := add(instance_cptr, 0x20) + l_i_cptr := add(l_i_cptr, 0x20) + } + { + instance_eval := addmod(instance_eval, mulmod(mload(l_i_cptr), calldataload(instance_cptr), r), r) + } + + let x_n_minus_1_inv := mload(mptr_end) + let l_last := mload(X_N_MPTR) + let l_0 := mload(add(X_N_MPTR, {{ (num_neg_lagranges * 32)|hex() }})) + + mstore(X_N_MPTR, x_n) + mstore(X_N_MINUS_1_INV_MPTR, x_n_minus_1_inv) + mstore(L_LAST_MPTR, l_last) + mstore(L_BLIND_MPTR, l_blind) + mstore(L_0_MPTR, l_0) + mstore(INSTANCE_EVAL_MPTR, instance_eval) + } + + // Compute quotient evavluation + { + let quotient_eval_numer + let delta := 4131629893567559867359510883348571134090853742863529169391034518566172092834 + let y := mload(Y_MPTR) + + {%- for code_block in quotient_eval_numer_computations %} + { + {%- for line in code_block %} + {{ line }} + {%- endfor %} + } + {%- endfor %} + + pop(y) + pop(delta) + + let quotient_eval := mulmod(quotient_eval_numer, mload(X_N_MINUS_1_INV_MPTR), r) + mstore(QUOTIENT_EVAL_MPTR, quotient_eval) + } + + // Compute quotient commitment + { + mstore(0x00, calldataload(LAST_QUOTIENT_X_CPTR)) + mstore(0x20, calldataload(add(LAST_QUOTIENT_X_CPTR, 0x20))) + let x_n := mload(X_N_MPTR) + for + { + let cptr := sub(LAST_QUOTIENT_X_CPTR, 0x40) + let cptr_end := sub(FIRST_QUOTIENT_X_CPTR, 0x40) + } + lt(cptr_end, cptr) + {} + { + success := ec_mul_acc(success, x_n) + success := ec_add_acc(success, calldataload(cptr), calldataload(add(cptr, 0x20))) + cptr := sub(cptr, 0x40) + } + mstore(QUOTIENT_X_MPTR, mload(0x00)) + mstore(QUOTIENT_Y_MPTR, mload(0x20)) + } + + // Compute pairing lhs and rhs + { + {%- for code_block in pcs_computations %} + { + {%- for line in code_block %} + {{ line }} + {%- endfor %} + } + {%- endfor %} + } + + // Random linear combine with accumulator + if mload(HAS_ACCUMULATOR_MPTR) { + mstore(0x00, mload(ACC_LHS_X_MPTR)) + mstore(0x20, mload(ACC_LHS_Y_MPTR)) + mstore(0x40, mload(ACC_RHS_X_MPTR)) + mstore(0x60, mload(ACC_RHS_Y_MPTR)) + mstore(0x80, mload(PAIRING_LHS_X_MPTR)) + mstore(0xa0, mload(PAIRING_LHS_Y_MPTR)) + mstore(0xc0, mload(PAIRING_RHS_X_MPTR)) + mstore(0xe0, mload(PAIRING_RHS_Y_MPTR)) + let challenge := mod(keccak256(0x00, 0x100), r) + + // [pairing_lhs] += challenge * [acc_lhs] + success := ec_mul_acc(success, challenge) + success := ec_add_acc(success, mload(PAIRING_LHS_X_MPTR), mload(PAIRING_LHS_Y_MPTR)) + mstore(PAIRING_LHS_X_MPTR, mload(0x00)) + mstore(PAIRING_LHS_Y_MPTR, mload(0x20)) + + // [pairing_rhs] += challenge * [acc_rhs] + mstore(0x00, mload(ACC_RHS_X_MPTR)) + mstore(0x20, mload(ACC_RHS_Y_MPTR)) + success := ec_mul_acc(success, challenge) + success := ec_add_acc(success, mload(PAIRING_RHS_X_MPTR), mload(PAIRING_RHS_Y_MPTR)) + mstore(PAIRING_RHS_X_MPTR, mload(0x00)) + mstore(PAIRING_RHS_Y_MPTR, mload(0x20)) + } + + // Perform pairing + success := ec_pairing( + success, + mload(PAIRING_LHS_X_MPTR), + mload(PAIRING_LHS_Y_MPTR), + mload(PAIRING_RHS_X_MPTR), + mload(PAIRING_RHS_Y_MPTR) + ) + + // Revert if anything fails + if iszero(success) { + revert(0x00, 0x00) + } + + // Return 1 as result if everything succeeds + mstore(0x00, 1) + return(0x00, 0x20) + } + } +} diff --git a/templates/Halo2VerifyingKey.sol b/templates/Halo2VerifyingKey.sol new file mode 100644 index 0000000..c9edfce --- /dev/null +++ b/templates/Halo2VerifyingKey.sol @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +contract Halo2VerifyingKey { + constructor() { + assembly { + {%- for (name, chunk) in constants %} + mstore({{ (32 * loop.index0)|hex_padded(4) }}, {{ chunk|hex_padded(64) }}) // {{ name }} + {%- endfor %} + {%- for (x, y) in fixed_comms %} + {%- let offset = constants.len() %} + mstore({{ (32 * (offset + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].x + mstore({{ (32 * (offset + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].y + {%- endfor %} + {%- for (x, y) in permutation_comms %} + {%- let offset = constants.len() + 2 * fixed_comms.len() %} + mstore({{ (32 * (offset + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].x + mstore({{ (32 * (offset + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].y + {%- endfor %} + + return(0, {{ (32 * (constants.len() + 2 * fixed_comms.len() + 2 * permutation_comms.len()))|hex() }}) + } + } +}