diff --git a/.github/workflows/blueprint.yml b/.github/workflows/blueprint.yml index b49d4e8ab..35f392d5e 100644 --- a/.github/workflows/blueprint.yml +++ b/.github/workflows/blueprint.yml @@ -2,11 +2,10 @@ name: Compile blueprint on: push: - # branches: - # - main # Trigger on pushes to ANY branch now + branches: [main] workflow_dispatch: # Allow manual triggering of the workflow from the GitHub Actions interface - # pull_request: - # branches: [main] + pull_request: + branches: [main] # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: @@ -38,7 +37,7 @@ jobs: - name: check that all files are imported run: git diff --exit-code - + build_project: runs-on: ubuntu-latest name: Build project @@ -57,7 +56,6 @@ jobs: - name: Build project run: ~/.elan/bin/lake build ArkLib - - name: Build blueprint and copy to `home_page/blueprint` uses: xu-cheng/texlive-action@v2 with: diff --git a/.gitignore b/.gitignore index 6229f832d..d536449f9 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,5 @@ /blueprint/src/web.pdf .nvim.lua +# Web files for local viewing +/blueprint/web/ diff --git a/AUTHORS b/AUTHORS index ca7cdb377..52a59d221 100644 --- a/AUTHORS +++ b/AUTHORS @@ -5,4 +5,6 @@ Bolton Bailey Frantisek Silvasi Ilia Vlasov Julian Sutherland -Chung Thai Nguyen \ No newline at end of file +Chung Thai Nguyen +Poulami Das +Mirco Richter diff --git a/ArkLib.lean b/ArkLib.lean index f45b27c5a..17379f6be 100644 --- a/ArkLib.lean +++ b/ArkLib.lean @@ -5,15 +5,34 @@ import ArkLib.CommitmentScheme.MerkleTree import ArkLib.CommitmentScheme.SimpleRO import ArkLib.CommitmentScheme.Tensor import ArkLib.CommitmentScheme.Trivial +import ArkLib.Data.Classes.DCast +import ArkLib.Data.Classes.HasSize +import ArkLib.Data.Classes.Initialize +import ArkLib.Data.Classes.Serde +import ArkLib.Data.Classes.Zeroize import ArkLib.Data.CodingTheory.Basic -import ArkLib.Data.CodingTheory.JohnsonBound +import ArkLib.Data.CodingTheory.BerlekampWelch.BerlekampWelch +import ArkLib.Data.CodingTheory.BerlekampWelch.Condition +import ArkLib.Data.CodingTheory.BerlekampWelch.ElocPoly +import ArkLib.Data.CodingTheory.BerlekampWelch.Existence +import ArkLib.Data.CodingTheory.BerlekampWelch.Sorries +import ArkLib.Data.CodingTheory.BerlekampWelch.ToMathlib +import ArkLib.Data.CodingTheory.DivergenceOfSets +import ArkLib.Data.CodingTheory.InterleavedCode +import ArkLib.Data.CodingTheory.JohnsonBound.Basic +import ArkLib.Data.CodingTheory.JohnsonBound.Choose2 +import ArkLib.Data.CodingTheory.JohnsonBound.Expectations +import ArkLib.Data.CodingTheory.JohnsonBound.Lemmas +import ArkLib.Data.CodingTheory.ListDecodability import ArkLib.Data.CodingTheory.ProximityGap import ArkLib.Data.CodingTheory.ReedMuller import ArkLib.Data.CodingTheory.ReedSolomon -import ArkLib.Data.CodingTheory import ArkLib.Data.EllipticCurve.BN254 -import ArkLib.Data.FieldTheory.BinaryTowerField.Basic -import ArkLib.Data.FieldTheory.BinaryTowerField.Prelude +import ArkLib.Data.FieldTheory.BinaryField.AdditiveNTT.NovelPolynomialBasis +import ArkLib.Data.FieldTheory.BinaryField.AdditiveNTT.Prelude +import ArkLib.Data.FieldTheory.BinaryField.Tower.Basic +import ArkLib.Data.FieldTheory.BinaryField.Tower.Impl +import ArkLib.Data.FieldTheory.BinaryField.Tower.Prelude import ArkLib.Data.FieldTheory.NonBinaryField.BLS12_377 import ArkLib.Data.FieldTheory.NonBinaryField.BLS12_381 import ArkLib.Data.FieldTheory.NonBinaryField.BN254 @@ -23,22 +42,32 @@ import ArkLib.Data.FieldTheory.NonBinaryField.Goldilocks import ArkLib.Data.FieldTheory.NonBinaryField.Mersenne import ArkLib.Data.FieldTheory.NonBinaryField.Secp256k1 import ArkLib.Data.Fin.Basic +import ArkLib.Data.Fin.Lift +import ArkLib.Data.Fin.Pad +import ArkLib.Data.Hash.DomainSep +import ArkLib.Data.Hash.DuplexSponge import ArkLib.Data.Math.Basic -import ArkLib.Data.Math.DepCast import ArkLib.Data.Math.HList +import ArkLib.Data.Matrix.Basic +import ArkLib.Data.Matrix.Sparse import ArkLib.Data.MlPoly.Basic import ArkLib.Data.MlPoly.Equiv import ArkLib.Data.MvPolynomial.Degrees import ArkLib.Data.MvPolynomial.Interpolation +import ArkLib.Data.MvPolynomial.LinearMvExtension import ArkLib.Data.MvPolynomial.Multilinear import ArkLib.Data.MvPolynomial.Notation import ArkLib.Data.MvPolynomial.Sumcheck import ArkLib.Data.Polynomial.EvenAndOdd +import ArkLib.Data.Polynomial.Interface import ArkLib.Data.Probability.Instances import ArkLib.Data.Probability.Notation +import ArkLib.Data.Tree.Binary +import ArkLib.Data.Tree.General import ArkLib.Data.UniPoly.Basic import ArkLib.Data.UniPoly.BasicOld import ArkLib.Data.UniPoly.PolynomialReflection +import ArkLib.OracleReduction.BCS.Basic import ArkLib.OracleReduction.Basic import ArkLib.OracleReduction.Cast import ArkLib.OracleReduction.Composition.Parallel.Basic @@ -47,15 +76,24 @@ import ArkLib.OracleReduction.Composition.Sequential.General import ArkLib.OracleReduction.Composition.Sequential.ProtocolSpec import ArkLib.OracleReduction.Equiv import ArkLib.OracleReduction.Execution -import ArkLib.OracleReduction.LiftContext.Basic +import ArkLib.OracleReduction.FiatShamir.Basic +import ArkLib.OracleReduction.FiatShamir.DuplexSponge.State +import ArkLib.OracleReduction.LiftContext.Lens +import ArkLib.OracleReduction.LiftContext.OracleReduction +import ArkLib.OracleReduction.LiftContext.Reduction import ArkLib.OracleReduction.OracleInterface import ArkLib.OracleReduction.Prelude +import ArkLib.OracleReduction.ProtocolSpec import ArkLib.OracleReduction.Security.Basic -import ArkLib.OracleReduction.Transform.BCS -import ArkLib.OracleReduction.Transform.FiatShamir +import ArkLib.OracleReduction.Security.Implications +import ArkLib.OracleReduction.Security.Rewinding +import ArkLib.OracleReduction.Security.RoundByRound +import ArkLib.OracleReduction.Security.StateRestoration import ArkLib.OracleReduction.VectorIOR -import ArkLib.ProofSystem.Component.CheckPred +import ArkLib.ProofSystem.Component.CheckClaim +import ArkLib.ProofSystem.Component.DoNothing import ArkLib.ProofSystem.Component.RandomQuery +import ArkLib.ProofSystem.Component.ReduceClaim import ArkLib.ProofSystem.Component.SendClaim import ArkLib.ProofSystem.Component.SendWitness import ArkLib.ProofSystem.ConstraintSystem.IndexedLookup @@ -65,12 +103,32 @@ import ArkLib.ProofSystem.DSL import ArkLib.ProofSystem.Fri.RoundConsistency import ArkLib.ProofSystem.Plonk.Basic import ArkLib.ProofSystem.Spartan.Basic -import ArkLib.ProofSystem.Sumcheck.Basic -import ArkLib.ProofSystem.Sumcheck.SingleRound +import ArkLib.ProofSystem.Stir +import ArkLib.ProofSystem.Stir.Combine +import ArkLib.ProofSystem.Stir.Folding +import ArkLib.ProofSystem.Stir.MainThm +import ArkLib.ProofSystem.Stir.OutOfDomSmpl +import ArkLib.ProofSystem.Stir.ProximityBound +import ArkLib.ProofSystem.Stir.ProximityGap +import ArkLib.ProofSystem.Stir.Quotienting +import ArkLib.ProofSystem.Sumcheck.Impl.Basic +import ArkLib.ProofSystem.Sumcheck.Spec.General +import ArkLib.ProofSystem.Sumcheck.Spec.SingleRound +import ArkLib.ProofSystem.Whir +import ArkLib.ProofSystem.Whir.BlockRelDistance +import ArkLib.ProofSystem.Whir.Folding +import ArkLib.ProofSystem.Whir.GenMutualCorrAgreement +import ArkLib.ProofSystem.Whir.MainThm +import ArkLib.ProofSystem.Whir.OutofDomainSmpl +import ArkLib.ProofSystem.Whir.ProximityGap +import ArkLib.ProofSystem.Whir.ProximityGen import ArkLib.ToMathlib.BigOperators.Fin import ArkLib.ToMathlib.Finset.Basic import ArkLib.ToMathlib.Finsupp.Fin import ArkLib.ToMathlib.MvPolynomial.Equiv import ArkLib.ToMathlib.NumberTheory.PrattCertificate import ArkLib.ToMathlib.UInt.Equiv +import ArkLib.ToVCVio.DistEq +import ArkLib.ToVCVio.Lemmas import ArkLib.ToVCVio.Oracle +import ArkLib.ToVCVio.SimOracle diff --git a/ArkLib/CommitmentScheme/Basic.lean b/ArkLib/CommitmentScheme/Basic.lean index 6f91d8fe5..b15ad8070 100644 --- a/ArkLib/CommitmentScheme/Basic.lean +++ b/ArkLib/CommitmentScheme/Basic.lean @@ -29,24 +29,24 @@ namespace Commitment open OracleSpec OracleComp SubSpec -variable {n : ℕ} {ι : Type} +variable {ι : Type} (oSpec : OracleSpec ι) (Data Randomness Commitment : Type) -structure Commit (oSpec : OracleSpec ι) (Data Randomness Commitment : Type) where +structure Commit where commit : Data → Randomness → OracleComp oSpec Commitment -structure Opening (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) (Data : Type) - [O : OracleInterface Data] (Randomness Commitment : Type) where - opening : Proof pSpec oSpec (Commitment × O.Query × O.Response) (Data × Randomness) +variable [O : OracleInterface Data] {n : ℕ} (pSpec : ProtocolSpec n) + +structure Opening where + opening : Proof oSpec (Commitment × O.Query × O.Response) (Data × Randomness) pSpec -- abbrev Statement (Data Commitment : Type) [O : OracleInterface Data] := -- Commitment × O.Query × O.Response -- abbrev Witness (Data Randomness : Type) := Data × Randomness -structure Scheme (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) (Data : Type) [O : OracleInterface Data] - (Randomness Commitment : Type) extends - Commit oSpec Data Randomness Commitment, - Opening pSpec oSpec Data Randomness Commitment +structure Scheme extends + Commit oSpec Data Randomness Commitment, + Opening oSpec Data Randomness Commitment pSpec section Security @@ -54,14 +54,16 @@ noncomputable section open scoped NNReal -variable {pSpec : ProtocolSpec n} [∀ i, VCVCompatible (pSpec.Challenge i)] [DecidableEq ι] - {oSpec : OracleSpec ι} {Data : Type} [O : OracleInterface Data] {Randomness : Type} [Fintype Randomness] - {Commitment : Type} [oSpec.FiniteRange] +variable [∀ i, VCVCompatible (pSpec.Challenge i)] [DecidableEq ι] + {oSpec : OracleSpec ι} {Data : Type} [O : OracleInterface Data] {Randomness : Type} + [Fintype Randomness] + {Commitment : Type} [oSpec.FiniteRange] {n : ℕ} {pSpec : ProtocolSpec n} + [∀ i, VCVCompatible (pSpec.Challenge i)] /-- A commitment scheme satisfies **correctness** with error `correctnessError` if for all `data : Data`, `randomness : Randomness`, and `query : O.Query`, the probability of accepting upon executing the commitment and opening procedures honestly is at least `1 - correctnessError`. -/ -def correctness (scheme : Scheme pSpec oSpec Data Randomness Commitment) +def correctness (scheme : Scheme oSpec Data Randomness Commitment pSpec) (correctnessError : ℝ≥0) : Prop := ∀ data : Data, ∀ randomness : Randomness, @@ -72,14 +74,15 @@ def correctness (scheme : Scheme pSpec oSpec Data Randomness Commitment) return result] ≥ 1 - correctnessError /-- A commitment scheme satisfies **perfect correctness** if it satisfies correctness with no error. - -/ -def perfectCorrectness (scheme : Scheme pSpec oSpec Data Randomness Commitment) : Prop := +-/ +def perfectCorrectness (scheme : Scheme oSpec Data Randomness Commitment pSpec) : Prop := correctness scheme 0 /-- An adversary in the (evaluation) binding game returns a commitment `cm`, a query `q`, two purported responses `r₁, r₂` to the query, and an auxiliary private state (to be passed to the malicious prover in the opening procedure). -/ -def BindingAdversary (oSpec : OracleSpec ι) (Data Commitment AuxState : Type) [O : OracleInterface Data] := +def BindingAdversary (oSpec : OracleSpec ι) (Data Commitment AuxState : Type) + [O : OracleInterface Data] := OracleComp oSpec (Commitment × O.Query × O.Response × O.Response × AuxState) /-- A commitment scheme satisfies **(evaluation) binding** with error `bindingError` if for all @@ -94,11 +97,11 @@ def BindingAdversary (oSpec : OracleSpec ι) (Data Commitment AuxState : Type) [ Informally, evaluation binding says that it's computationally infeasible to open a commitment to two different responses for the same query. -/ -def binding (scheme : Scheme pSpec oSpec Data Randomness Commitment) +def binding (scheme : Scheme oSpec Data Randomness Commitment pSpec) (bindingError : ℝ≥0): Prop := ∀ AuxState : Type, ∀ adversary : BindingAdversary oSpec Data Commitment AuxState, - ∀ prover : Prover pSpec oSpec (Commitment × O.Query × O.Response) AuxState Bool Unit, + ∀ prover : Prover oSpec (Commitment × O.Query × O.Response) AuxState Bool Unit pSpec, False -- [ fun ⟨x, x', b₁, b₂⟩ => x ≠ x' ∧ b₁ ∧ b₂ | do -- let result ← liftM adversary @@ -133,12 +136,12 @@ def ExtractabilityAdversary (oSpec : OracleSpec ι) (Data Commitment AuxState : Informally, extractability says that if an adversary can convince the verifier to accept an opening, then the extractor must be able to recover some underlying data that is consistent with the evaluation query. -/ -def extractability (scheme : Scheme pSpec oSpec Data Randomness Commitment) +def extractability (scheme : Scheme oSpec Data Randomness Commitment pSpec) (extractabilityError : ℝ≥0) : Prop := ∃ extractor : StraightlineExtractor oSpec Data Commitment, ∀ AuxState : Type, ∀ adversary : ExtractabilityAdversary oSpec Data Commitment AuxState, - ∀ prover : Prover pSpec oSpec (Commitment × O.Query × O.Response) AuxState Bool Unit, + ∀ prover : Prover oSpec (Commitment × O.Query × O.Response) AuxState Bool Unit pSpec, False -- [ fun ⟨b, d, q, r⟩ => b ∧ O.oracle d q = r | do -- let result ← liftM (simulate loggingOracle ∅ adversary) @@ -156,7 +159,7 @@ def extractability (scheme : Scheme pSpec oSpec Data Randomness Commitment) /-- A commitment scheme satisfies **hiding** with error `hidingError` if .... Note: have to put it as `hiding'` because `hiding` is already used somewhere else. -/ -def hiding' (scheme : Scheme pSpec oSpec Data Randomness Commitment) : Prop := sorry +def hiding' (scheme : Scheme oSpec Data Randomness Commitment pSpec) : Prop := sorry #check seededOracle diff --git a/ArkLib/CommitmentScheme/MerkleTree.lean b/ArkLib/CommitmentScheme/MerkleTree.lean index 5588f7718..3014baa5b 100644 --- a/ArkLib/CommitmentScheme/MerkleTree.lean +++ b/ArkLib/CommitmentScheme/MerkleTree.lean @@ -7,6 +7,8 @@ Authors: Quang Dao import VCVio import ArkLib.Data.Math.Basic import ArkLib.CommitmentScheme.Basic +import ArkLib.Data.Tree.Binary +import ArkLib.Data.Tree.General import Mathlib.Data.Vector.Snoc /-! @@ -68,11 +70,13 @@ def Cache.upper (n : ℕ) (cache : Cache α (n + 1)) : def Cache.leaves (n : ℕ) (cache : Cache α (n + 1)) : List.Vector α (2 ^ (n + 1)) := cache (Fin.last _) +omit [DecidableEq α] [Inhabited α] [Fintype α] in @[simp] lemma Cache.upper_cons (n : ℕ) (leaves : List.Vector α (2 ^ (n + 1))) (cache : Cache α n) : Cache.upper α n (Cache.cons α n leaves cache) = cache := by simp [Cache.upper, Cache.cons] +omit [DecidableEq α] [Inhabited α] [Fintype α] in @[simp] lemma Cache.leaves_cons (n : ℕ) (leaves : List.Vector α (2 ^ (n + 1))) (cache : Cache α n) : Cache.leaves α n (Cache.cons α n leaves cache) = leaves := by @@ -130,9 +134,9 @@ theorem getRoot_trivial (a : α) : getRoot α <$> (buildMerkleTree α 0 ⟨[a], @[simp] theorem getRoot_single (a b : α) : getRoot α <$> buildMerkleTree α 1 ⟨[a, b], rfl⟩ = (query (spec := spec α) () (a, b)) := by - simp [buildMerkleTree, buildLayer, List.Vector.ofFn, List.Vector.head, List.Vector.get] + simp [buildMerkleTree, buildLayer, List.Vector.ofFn, List.Vector.get] unfold Cache.cons getRoot - simp [map_flatMap, Fin.snoc] + simp [Fin.snoc] section @@ -144,7 +148,7 @@ def generateProof {n : ℕ} (i : Fin (2 ^ n)) (cache : Cache α n) : List.Vector α n := match n with | 0 => List.Vector.nil - | n + 1 => List.Vector.snoc (generateProof (i/2) (cache.upper)) + | n + 1 => List.Vector.snoc (generateProof ⟨i.val / 2, by omega⟩ (cache.upper)) ((cache.leaves).get (findNeighbors i (Fin.last _))) /-- @@ -155,21 +159,22 @@ according to its index. -/ def getPutativeRoot {n : ℕ} (i : Fin (2 ^ n)) (leaf : α) (proof : List.Vector α n) : OracleComp (spec α) α := do - if h : n = 0 then + match h : n with + | 0 => do -- When we have an empty proof, the root is just the leaf return leaf - else + | n + 1 => do -- Get the sign bit of `i` let signBit := i.val % 2 -- Show that `i / 2` is in `Fin (2 ^ (n - 1))` - let i' : Fin (2 ^ (n - 1)) := i.val / 2 + let i' : Fin (2 ^ n) := ⟨i.val / 2, by omega⟩ if signBit = 0 then -- `i` is a left child - let newLeaf ← query (spec := spec α) () ⟨leaf, proof.get ⟨n - 1, by omega⟩⟩ + let newLeaf ← query (spec := spec α) () ⟨leaf, proof.get (Fin.last n)⟩ getPutativeRoot i' newLeaf (proof.drop 1) else -- `i` is a right child - let newLeaf ← query (spec := spec α) () ⟨proof.get ⟨n - 1, by omega⟩, leaf⟩ + let newLeaf ← query (spec := spec α) () ⟨proof.get (Fin.last n), leaf⟩ getPutativeRoot i' newLeaf (proof.drop 1) /-- Verify a Merkle proof `proof` that a given `leaf` at index `i` is in the Merkle tree with given @@ -187,7 +192,7 @@ theorem buildLayer_neverFails (α : Type) [inst : DecidableEq α] [inst_1 : Sele (leaves : List.Vector α (2 ^ (n + 1))) : ((simulateQ randomOracle (buildLayer α n leaves)).run preexisting_cache).neverFails := by simp_rw [buildLayer] - simp [neverFails_query_bind] + simp only [range_def, Prod.mk.eta, eq_mp_eq_cast, cast_eq, bind_pure] -- I now require a "neverFails_mmap_iff" lemma, but I don't see one in VCVio. -- Feels like evidence for avoiding Vector-based merkle trees in favor of inductive-based ones. sorry @@ -204,7 +209,7 @@ theorem buildMerkleTree_neverFails (α : Type) [DecidableEq α] [SelectableType -- and therefore can't fail. induction n generalizing preexisting_cache with | zero => - simp [buildMerkleTree, getRoot, generateProof, verifyProof, getPutativeRoot] + simp [buildMerkleTree] | succ n ih => simp [buildMerkleTree, neverFails_bind_iff] constructor @@ -250,8 +255,10 @@ theorem completeness [SelectableType α] {n : ℕ} -- simp [Fin.instGetElemFinVal] -- | succ n ih => -- -- simp_all [Fin.getElem_fin, Vector.getElem_eq_get, Fin.eta, getRoot, Fin.val_zero, --- -- Nat.pow_zero, Fin.zero_eta, Vector.get_zero, bind_pure_comp, id_map', probFailure_eq_zero_iff, --- -- neverFails_bind_iff, buildMerkleTree, generateProof, LawfulMonad.bind_assoc, bind_map_left, +-- -- Nat.pow_zero, Fin.zero_eta, Vector.get_zero, bind_pure_comp, +-- id_map', probFailure_eq_zero_iff, +-- -- neverFails_bind_iff, buildMerkleTree, generateProof, +-- LawfulMonad.bind_assoc, bind_map_left, -- -- Cache.upper_cons, Cache.leaves_cons] -- -- refine ⟨?_, ?_⟩ -- -- · @@ -312,78 +319,26 @@ end MerkleTree -- Alternative definition of Merkle tree using inductive type -/-- A binary tree with (possibly null) values stored at both leaf and internal nodes. -/ -inductive BinaryTree (α : Type*) where - | leaf : α → BinaryTree α - | node : α → BinaryTree α → BinaryTree α → BinaryTree α - | nil : BinaryTree α - deriving Inhabited, Repr - --- Example: --- 1 --- / \ --- 2 3 --- / \ \ --- 4 5 6 --- / --- 7 --- A tree with values at both leaf and internal nodes -def BinaryTree.example : BinaryTree (Fin 10) := - .node 1 - (.node 2 - (.leaf 4) - (.node 5 (.leaf 7) .nil)) - (.node 3 - .nil - (.leaf 6)) - -/-- A binary tree where only leaf nodes can have values. - -Used as input to Merkle tree construction. -/ -inductive LeafTree (α : Type*) where - | leaf : α → LeafTree α - | node : LeafTree α → LeafTree α → LeafTree α - | nil : LeafTree α -deriving Inhabited, Repr - variable {α : Type} -/-- Get the root value of a Merkle tree, if it exists. -/ -def getRoot : BinaryTree α → Option α - | BinaryTree.nil => none - | BinaryTree.leaf a => some a - | BinaryTree.node a _ _ => some a - -/-- Find the path from root to a leaf with the given value. -/ -def findPath [DecidableEq α] (a : α) : BinaryTree α → Option (List Bool) - | BinaryTree.nil => none - | BinaryTree.leaf b => if a = b then some [] else none - | BinaryTree.node _ left right => - match findPath a left with - | some path => some (false :: path) - | none => - match findPath a right with - | some path => some (true :: path) - | none => none - /-- Helper function to get the proof for a value at a given path. -/ def getProofHelper [DecidableEq α] : List Bool → BinaryTree α → List α | _, BinaryTree.nil => [] | _, BinaryTree.leaf _ => [] | [], BinaryTree.node _ _ _ => [] | false :: rest, BinaryTree.node _ l r => - match getRoot r with + match BinaryTree.getRoot r with | none => getProofHelper rest l | some v => v :: getProofHelper rest l | true :: rest, BinaryTree.node _ l r => - match getRoot l with + match BinaryTree.getRoot l with | none => getProofHelper rest r | some v => v :: getProofHelper rest r /-- Generate a Merkle proof for a leaf with value 'a'. The proof consists of the sibling hashes needed to recompute the root. -/ def generateProof [DecidableEq α] (a : α) (tree : BinaryTree α) : Option (List α) := - match findPath a tree with + match BinaryTree.findPath a tree with | none => none | some path => some (getProofHelper path tree) diff --git a/ArkLib/CommitmentScheme/SimpleRO.lean b/ArkLib/CommitmentScheme/SimpleRO.lean index ba968bd5e..c6f091588 100644 --- a/ArkLib/CommitmentScheme/SimpleRO.lean +++ b/ArkLib/CommitmentScheme/SimpleRO.lean @@ -55,9 +55,7 @@ local instance : OracleInterface α where Response := α oracle := fun x _ => x -def emptyPSpec : ProtocolSpec 0 := fun i => Fin.elim0 i - -def commitmentScheme : Commitment.Scheme emptyPSpec (oSpec α β γ) α β γ where +def commitmentScheme : Commitment.Scheme (oSpec α β γ) α β γ ![] where commit := fun v r => commit v opening := .mk (sorry) (.mk (sorry)) diff --git a/ArkLib/Data/Math/DepCast.lean b/ArkLib/Data/Classes/DCast.lean similarity index 73% rename from ArkLib/Data/Math/DepCast.lean rename to ArkLib/Data/Classes/DCast.lean index 521a92455..a26aea891 100644 --- a/ArkLib/Data/Math/DepCast.lean +++ b/ArkLib/Data/Classes/DCast.lean @@ -38,23 +38,23 @@ theorem congrArg₄ {α β γ δ ε : Sort*} {f : α → β → γ → δ → ε end Prelude -/-- `DepCast` is a type class for custom cast operations on an indexed type family `β a` +/-- `DCast` is a type class for custom cast operations on an indexed type family `β a` We require the cast operation `dcast`, along with the property that `dcast` reduces to `id` under reflexivity. -/ -class DepCast (α : Sort*) (β : α → Sort*) where +class DCast (α : Sort*) (β : α → Sort*) where dcast : ∀ {a a' : α}, a = a' → β a → β a' dcast_id : ∀ {a : α}, dcast (Eq.refl a) = id -export DepCast (dcast dcast_id) +export DCast (dcast dcast_id) attribute [simp] dcast_id -section DepCast +section DCast -variable {α : Sort*} {β : α → Sort*} [DepCast α β] {a a' a'' : α} {b : β a} {b' : β a'} +variable {α : Sort*} {β : α → Sort*} [DCast α β] {a a' a'' : α} {b : β a} {b' : β a'} -/-- The default instance for `DepCast` -/ -instance (priority := low) : DepCast α β where +/-- The default instance for `DCast` -/ +instance (priority := low) : DCast α β where dcast h := cast (congrArg β h) dcast_id := by intro a; funext b; simp only [cast_eq, id_eq] @@ -92,28 +92,28 @@ theorem dcast_fun₂ {a₁ a₂ a₁' a₂' : α} {h₁ : a₁ = a₁'} {h₂ : theorem dcast_eq_root_cast (h : a = a') : dcast h b = _root_.cast (congrArg β h) b := by cases h; simp -end DepCast +end DCast -/-- `DepCast₂` is a type class for custom cast operations on a doubly-indexed type family `γ a b`, - given an underlying dependent cast `DepCast α β` +/-- `DCast₂` is a type class for custom cast operations on a doubly-indexed type family `γ a b`, + given an underlying dependent cast `DCast α β` We require the cast operation `dcast₂`, along with the property that `dcast₂` reduces to `id` under reflexivity. -/ -class DepCast₂ (α : Sort*) (β : α → Sort*) (γ : (a : α) → β a → Sort*) [DepCast α β] where +class DCast₂ (α : Sort*) (β : α → Sort*) (γ : (a : α) → β a → Sort*) [DCast α β] where dcast₂ : ∀ {a a' : α} {b : β a} {b' : β a'}, (ha : a = a') → (dcast ha b = b') → γ a b → γ a' b' dcast₂_id : ∀ {a : α} {b : β a}, dcast₂ (Eq.refl a) dcast_eq = (id : γ a b → γ a b) -export DepCast₂ (dcast₂ dcast₂_id) +export DCast₂ (dcast₂ dcast₂_id) attribute [simp] dcast₂_id -section DepCast₂ +section DCast₂ -variable {α : Sort*} {β : α → Sort*} {γ : (a : α) → β a → Sort*} [DepCast α β] [DepCast₂ α β γ] +variable {α : Sort*} {β : α → Sort*} {γ : (a : α) → β a → Sort*} [DCast α β] [DCast₂ α β γ] {a a' a'' : α} {b : β a} {b' : β a'} {b'' : β a''} {c : γ a b} {c' : γ a' b'} {c'' : γ a'' b''} -/-- Default instance for `DepCast₂` -/ -instance (priority := low) : DepCast₂ α β γ where +/-- Default instance for `DCast₂` -/ +instance (priority := low) : DCast₂ α β γ where dcast₂ ha hb c := by refine cast ?_ c cases ha; simp at hb; cases hb; rfl @@ -147,20 +147,20 @@ theorem dcast₂_eq_dcast₂_iff (ha : a = a'') (ha' : a' = a'') theorem dcast₂_dcast : dcast₂ rfl rfl c = dcast dcast_eq.symm c := by rw! [dcast_eq]; simp -instance instDepCast₁₂ : DepCast (β a) (γ a) where +instance instDCast₁₂ : DCast (β a) (γ a) where dcast hb c := dcast₂ (Eq.refl a) (by simp [hb]) c dcast_id := by intros; ext c; exact dcast₂_eq -instance instDepCastPSigma : DepCast ((a : α) ×' β a) (fun a => γ a.1 a.2) where +instance instDCastPSigma : DCast ((a : α) ×' β a) (fun a => γ a.1 a.2) where dcast hab c := dcast₂ (by cases hab; simp) (by cases hab; simp) c dcast_id := by intros; ext c; simp -instance instDepCastSigma {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} - [DepCast α β] [DepCast₂ α β γ] : DepCast ((a : α) × β a) (fun a => γ a.1 a.2) where +instance instDCastSigma {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} + [DCast α β] [DCast₂ α β γ] : DCast ((a : α) × β a) (fun a => γ a.1 a.2) where dcast hab c := dcast₂ (by cases hab; simp) (by cases hab; simp) c dcast_id := by intros; ext c; simp -instance instDepCastForall : DepCast α (fun a => ∀ b : β a, γ a b) where +instance instDCastForall : DCast α (fun a => ∀ b : β a, γ a b) where dcast ha f := fun b => dcast₂ ha ((dcast_trans ha.symm ha).trans dcast_eq) (f (dcast ha.symm b)) dcast_id := by intros a; funext f; funext b @@ -168,34 +168,34 @@ instance instDepCastForall : DepCast α (fun a => ∀ b : β a, γ a b) where rw! [dcast_eq] simp -end DepCast₂ +end DCast₂ -/-- `DepCast₃` is a type class for custom cast operations on a triply-indexed type family `δ a b c`, - given underlying dependent casts `DepCast α β` and `DepCast₂ α β γ` +/-- `DCast₃` is a type class for custom cast operations on a triply-indexed type family `δ a b c`, + given underlying dependent casts `DCast α β` and `DCast₂ α β γ` We require the cast operation `dcast₃`, along with the property that `dcast₃` reduces to `id` under reflexivity. -/ -class DepCast₃ (α : Sort*) (β : α → Sort*) (γ : (a : α) → β a → Sort*) - (δ : (a : α) → (b : β a) → γ a b → Sort*) [DepCast α β] [DepCast₂ α β γ] where +class DCast₃ (α : Sort*) (β : α → Sort*) (γ : (a : α) → β a → Sort*) + (δ : (a : α) → (b : β a) → γ a b → Sort*) [DCast α β] [DCast₂ α β γ] where dcast₃ : ∀ {a a' : α} {b : β a} {b' : β a'} {c : γ a b} {c' : γ a' b'}, (ha : a = a') → (hb : dcast ha b = b') → (hc : dcast₂ ha hb c = c') → δ a b c → δ a' b' c' dcast₃_id : ∀ {a : α} {b : β a} {c : γ a b}, dcast₃ (Eq.refl a) dcast_eq dcast₂_eq = (id : δ a b c → δ a b c) -export DepCast₃ (dcast₃ dcast₃_id) +export DCast₃ (dcast₃ dcast₃_id) attribute [simp] dcast₃_id -section DepCast₃ +section DCast₃ variable {α : Sort*} {β : α → Sort*} {γ : (a : α) → β a → Sort*} {δ : (a : α) → (b : β a) → γ a b → Sort*} - [DepCast α β] [DepCast₂ α β γ] [DepCast₃ α β γ δ] + [DCast α β] [DCast₂ α β γ] [DCast₃ α β γ δ] {a a' a'' : α} {b : β a} {b' : β a'} {b'' : β a''} {c : γ a b} {c' : γ a' b'} {c'' : γ a'' b''} {d : δ a b c} {d' : δ a' b' c'} {d'' : δ a'' b'' c''} -/-- Default instance for `DepCast₃` -/ -instance (priority := low) : DepCast₃ α β γ δ where +/-- Default instance for `DCast₃` -/ +instance (priority := low) : DCast₃ α β γ δ where dcast₃ ha hb hc d := by refine cast ?_ d cases ha; simp at hb; cases hb; simp at hc; cases hc; rfl @@ -225,70 +225,70 @@ theorem dcast₃_eq_dcast₃_iff (ha : a = a') (hb : dcast ha b = b') (hc : dcas theorem dcast₃_dcast₂ : dcast₃ rfl rfl rfl d = dcast₂ dcast_eq.symm dcast₂_dcast.symm d := by rw! [dcast_eq]; rw! [dcast₂_eq]; simp -instance instDepCast₂₃ {a : α} : DepCast₂ (β a) (γ a) (δ a) where +instance instDCast₂₃ {a : α} : DCast₂ (β a) (γ a) (δ a) where dcast₂ ha hb c := dcast₃ (Eq.refl a) (by cases ha; simp) (by cases ha; cases hb; simp) c dcast₂_id := by intros; funext; simp -instance instDepCast₁₃ {a : α} {b : β a} : DepCast (γ a b) (δ a b) where +instance instDCast₁₃ {a : α} {b : β a} : DCast (γ a b) (δ a b) where dcast hc d := dcast₃ (Eq.refl a) dcast_eq (by cases hc; simp) d dcast_id := by intros; ext; simp -instance instDepCast₂PSigma : DepCast₂ ((a : α) ×' β a) (fun a => γ a.1 a.2) (fun a => δ a.1 a.2) +instance instDCast₂PSigma : DCast₂ ((a : α) ×' β a) (fun a => γ a.1 a.2) (fun a => δ a.1 a.2) where dcast₂ ha hb c := dcast₃ (by cases ha; simp) (by cases ha; simp) (by cases ha; cases hb; simp) c dcast₂_id := by intros; funext; simp -instance instDepCast₂Sigma {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} +instance instDCast₂Sigma {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} {δ : (a : α) → (b : β a) → γ a b → Type*} - [DepCast α β] [DepCast₂ α β γ] [DepCast₃ α β γ δ] : - DepCast₂ ((a : α) × β a) (fun a => γ a.1 a.2) (fun a => δ a.1 a.2) where + [DCast α β] [DCast₂ α β γ] [DCast₃ α β γ δ] : + DCast₂ ((a : α) × β a) (fun a => γ a.1 a.2) (fun a => δ a.1 a.2) where dcast₂ ha hb c := dcast₃ (by cases ha; simp) (by cases ha; simp) (by cases ha; cases hb; simp) c dcast₂_id := by intros; funext; simp -instance instDepCast₂Forall : - DepCast₂ α (fun a => ∀ b : β a, γ a b) (fun a f => ∀ b : β a, δ a b (f b)) where +instance instDCast₂Forall : + DCast₂ α (fun a => ∀ b : β a, γ a b) (fun a f => ∀ b : β a, δ a b (f b)) where dcast₂ ha hb c := fun b => dcast₃ ha (by simp) (by cases ha; cases hb; rw! [dcast_eq]; simp) (c (dcast ha.symm b)) dcast₂_id := by intros; funext; simp; rw! [dcast_eq]; simp -instance instDepCastPSigmaPSigma : - DepCast ((a : α) ×' (b : β a) ×' γ a b) (fun a => δ a.1 a.2.1 a.2.2) where +instance instDCastPSigmaPSigma : + DCast ((a : α) ×' (b : β a) ×' γ a b) (fun a => δ a.1 a.2.1 a.2.2) where dcast hc d := dcast₃ (by cases hc; simp) (by cases hc; simp) (by cases hc; simp) d dcast_id := by intros; ext; simp -instance instDepCastSigmaSigma {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} +instance instDCastSigmaSigma {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} {δ : (a : α) → (b : β a) → γ a b → Type*} - [DepCast α β] [DepCast₂ α β γ] [DepCast₃ α β γ δ] : - DepCast ((a : α) × (b : β a) × γ a b) (fun a => δ a.1 a.2.1 a.2.2) where + [DCast α β] [DCast₂ α β γ] [DCast₃ α β γ δ] : + DCast ((a : α) × (b : β a) × γ a b) (fun a => δ a.1 a.2.1 a.2.2) where dcast hc d := dcast₃ (by cases hc; simp) (by cases hc; simp) (by cases hc; simp) d dcast_id := by intros; ext; simp -instance instDepCastForallForall : - DepCast α (fun a => ∀ b : β a, ∀ c : γ a b, δ a b c) where +instance instDCastForallForall : + DCast α (fun a => ∀ b : β a, ∀ c : γ a b, δ a b c) where dcast ha d := fun b c => dcast₃ ha (by cases ha; simp) (by cases ha; simp) (d (dcast ha.symm b) (dcast₂ ha.symm rfl c)) dcast_id := by intros; funext; simp; rw! [dcast_eq]; rw! [dcast₂_eq]; simp -instance instDepCastPSigmaForall : - DepCast ((a : α) ×' (β a)) (fun ab => ∀ c : γ ab.1 ab.2, δ ab.1 ab.2 c) where +instance instDCastPSigmaForall : + DCast ((a : α) ×' (β a)) (fun ab => ∀ c : γ ab.1 ab.2, δ ab.1 ab.2 c) where dcast hab d := fun c => dcast₃ (by cases hab; simp) (by cases hab; simp) (by cases hab; simp) (d (dcast₂ (by cases hab; simp) (by cases hab; simp) c)) dcast_id := by intros; funext; simp; rw! [dcast₂_eq]; simp -instance instDepCastSigmaForallForall {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} +instance instDCastSigmaForallForall {α : Type*} {β : α → Type*} {γ : (a : α) → β a → Type*} {δ : (a : α) → (b : β a) → γ a b → Type*} - [DepCast α β] [DepCast₂ α β γ] [DepCast₃ α β γ δ] : - DepCast ((a : α) × (β a)) (fun ab => ∀ c : γ ab.1 ab.2, δ ab.1 ab.2 c) where + [DCast α β] [DCast₂ α β γ] [DCast₃ α β γ δ] : + DCast ((a : α) × (β a)) (fun ab => ∀ c : γ ab.1 ab.2, δ ab.1 ab.2 c) where dcast hab d := fun c => dcast₃ (by cases hab; simp) (by cases hab; simp) (by cases hab; simp) (d (dcast₂ (by cases hab; simp) (by cases hab; simp) c)) dcast_id := by intros; funext; simp; rw! [dcast₂_eq]; simp -end DepCast₃ +end DCast₃ namespace Fin -/-- `Fin.cast` as a `DepCast` (which should override the default instance). -/ -instance instDepCast : DepCast Nat Fin where +/-- `Fin.cast` as a `DCast` (which should override the default instance). -/ +instance instDCast : DCast Nat Fin where dcast h := Fin.cast h dcast_id := by simp only [Fin.cast_refl, implies_true] diff --git a/ArkLib/Data/Classes/HasSize.lean b/ArkLib/Data/Classes/HasSize.lean new file mode 100644 index 000000000..23bc827c9 --- /dev/null +++ b/ArkLib/Data/Classes/HasSize.lean @@ -0,0 +1,17 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Logic.Embedding.Basic + +/-! + # `HasSize` class +-/ + +/-- Type class for types that has an injective mapping to a vector of a given length `size` of + another type (often `UInt8`). -/ +class HasSize (α : Type*) (β : Type*) where + size : Nat + toFun : α ↪ Vector β size diff --git a/ArkLib/Data/Classes/Initialize.lean b/ArkLib/Data/Classes/Initialize.lean new file mode 100644 index 000000000..7b5ca7612 --- /dev/null +++ b/ArkLib/Data/Classes/Initialize.lean @@ -0,0 +1,23 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Init + +/-! + # `Initialize` class +-/ + +universe u v + +/-- Type class for types that can be initialized from a value of type `β`. + +This abstraction allows different types to be initialized from the same input type, +providing a uniform interface for construction. In the context of cryptographic sponges, +this is typically used to initialize states from a seed or initialization vector. +-/ +class Initialize (α : Type u) (β : Type v) where + /-- Initialize a value of type `α` from a value of type `β`. -/ + new : β → α diff --git a/ArkLib/Data/Classes/Serde.lean b/ArkLib/Data/Classes/Serde.lean new file mode 100644 index 000000000..9b06a74d1 --- /dev/null +++ b/ArkLib/Data/Classes/Serde.lean @@ -0,0 +1,61 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Init +import Mathlib.Logic.Embedding.Basic +import Mathlib.Probability.Distributions.Uniform + +/-! + # Serialization and Deserialization + + This file contains simple APIs for serialization and deserialization of types in terms of other + types. +-/ + +universe u v + +/-- Type class for types that can be serialized to another type (most often `ByteArray` or + `String`). -/ +class Serialize (α : Type u) (β : Type v) where + serialize : α → β + +/-- Type class for injective serialization. -/ +class Serialize.IsInjective (α : Type u) (β : Type v) [inst : Serialize α β] : Prop where + serialize_inj : Function.Injective inst.serialize + +/-- Type class for types that can be deserialized from another type (most often `ByteArray` or + `String`), which _never_ fails. -/ +class Deserialize (α : Type u) (β : Type v) where + deserialize : β → α + +-- Local instance for now, will need to develop statistical distance a lot more +instance {α : Type*} [Fintype α] : Dist (PMF α) where + dist := fun a b => ∑ x, abs ((a x).toReal - (b x).toReal) + +open NNReal in +/-- Type class for deserialization on two non-empty finite types `α`, `β`, which pushes forward the + uniform distribution of `β` to the uniform distribution of `α`, up to some error -/ +class Deserialize.CloseToUniform (α : Type u) (β : Type u) + [Fintype α] [Fintype β] [Nonempty α] [Nonempty β] [Deserialize α β] where + ε : ℝ≥0 + ε_close : dist (PMF.uniformOfFintype α) (deserialize <$> PMF.uniformOfFintype β) ≤ ε + + +/-- Type class for types that can be deserialized from another type (most often `ByteArray` or + `String`), returning an `Option` if the deserialization fails. -/ +class DeserializeOption (α : Type u) (β : Type v) where + deserialize : β → Option α + +/-- Type class for types that can be serialized and deserialized (with potential failure) to/from + another type (most often `ByteArray` or `String`). -/ +class Serde (α : Type u) (β : Type v) extends Serialize α β, DeserializeOption α β + +-- Note: for codecs into an alphabet `σ`, we basically want the following: +-- variable {α σ : Type*} {n : ℕ} [inst : Serialize α (Vector σ n)] [inst.IsInjective] + +-- Note: for codecs out of an alphabet `σ`, we basically want the following: +-- variable {α σ : Type u} [Fintype α] [Nonempty α] [Fintype σ] [Nonempty σ] {n : ℕ} [NeZero n] +-- [inst : Deserialize α (Vector σ n)] [inst.CloseToUniform] diff --git a/ArkLib/Data/Classes/Zeroize.lean b/ArkLib/Data/Classes/Zeroize.lean new file mode 100644 index 000000000..27fc39496 --- /dev/null +++ b/ArkLib/Data/Classes/Zeroize.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Algebra.Notation.Pi + +/-! + # `Zeroize` class +-/ + +/-- Type class for types that can be zeroized. -/ +class Zeroize (α : Type*) where + zeroize : α → α + +/-- Derive `Zeroize` from `Zero`. -/ +instance {α : Type*} [Zero α] : Zeroize α where + zeroize := Zero.zero diff --git a/ArkLib/Data/CodingTheory.lean b/ArkLib/Data/CodingTheory.lean index cb4f11200..ee850ffb8 100644 --- a/ArkLib/Data/CodingTheory.lean +++ b/ArkLib/Data/CodingTheory.lean @@ -1,11 +1,11 @@ import ArkLib.Data.CodingTheory.Basic import ArkLib.Data.CodingTheory.BerlekampWelch -import ArkLib.Data.CodingTheory.BivariatePoly import ArkLib.Data.CodingTheory.DivergenceOfSets import ArkLib.Data.CodingTheory.InterleavedCode +import ArkLib.Data.CodingTheory.JohnsonBound.Basic import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.Data.CodingTheory.PolishchukSpielman import ArkLib.Data.CodingTheory.Prelims import ArkLib.Data.CodingTheory.ProximityGap -import ArkLib.Data.CodingTheory.PolishchukSpielman import ArkLib.Data.CodingTheory.ReedMuller import ArkLib.Data.CodingTheory.ReedSolomon diff --git a/ArkLib/Data/CodingTheory/Basic.lean b/ArkLib/Data/CodingTheory/Basic.lean index aaf1fbb4a..39e29bdb1 100644 --- a/ArkLib/Data/CodingTheory/Basic.lean +++ b/ArkLib/Data/CodingTheory/Basic.lean @@ -6,10 +6,11 @@ Authors: Quang Dao, Katerina Hristova, František Silváši, Julian Sutherland, import ArkLib.Data.CodingTheory.Prelims import Mathlib.Algebra.Polynomial.Roots -import Mathlib.Analysis.Normed.Field.Lemmas +import Mathlib.Analysis.InnerProductSpace.PiL2 import Mathlib.Data.ENat.Lattice import Mathlib.InformationTheory.Hamming import Mathlib.Topology.MetricSpace.Infsep +import Mathlib.Tactic.Qify import ArkLib.Data.Fin.Basic @@ -25,10 +26,10 @@ import ArkLib.Data.Fin.Basic ## Main Definitions - - `codeDist C`: The Hamming distance of a code `C`, defined as the infimum (in `ℕ∞`) of the + - `dist C`: The Hamming distance of a code `C`, defined as the infimum (in `ℕ∞`) of the Hamming distances between any two distinct elements of `C`. This is noncomputable. - - `codeDist' C`: A computable version of `codeDist C`, assuming `C` is a `Fintype`. + - `dist' C`: A computable version of `dist C`, assuming `C` is a `Fintype`. We define the block length, rate, and distance of `C`. We prove simple properties of linear codes such as the singleton bound. @@ -37,7 +38,7 @@ import ArkLib.Data.Fin.Basic variable {n : Type*} [Fintype n] {R : Type*} [DecidableEq R] -section Distance +namespace Code -- Notation for Hamming distance notation "Δ₀(" u ", " v ")" => hammingDist u v @@ -50,7 +51,7 @@ cd We formalize this as the infimum `sInf` over all `d : ℕ` such that there exist `u v : n → R` in the code with `u ≠ v` and `hammingDist u v ≤ d`. If none exists, then we define the distance to be `0`. -/ -noncomputable def codeDist (C : Set (n → R)) : ℕ := +noncomputable def dist (C : Set (n → R)) : ℕ := sInf {d | ∃ u ∈ C, ∃ v ∈ C, u ≠ v ∧ Δ₀( u, v ) ≤ d} -- TODO: rewrite this file using existing `(e)infsep` definitions @@ -65,7 +66,7 @@ noncomputable def eCodeDistNew (C : Set (n → R)) : ENNReal := C.einfsep noncomputable def codeDistNew (C : Set (n → R)) : ℝ := C.infsep -notation "‖" C "‖₀" => codeDist C +notation "‖" C "‖₀" => dist C /-- The distance from a vector `u` to a code `C` is the minimum Hamming distance between `u` and any element of `C`. -/ @@ -74,24 +75,27 @@ noncomputable def distFromCode (u : n → R) (C : Set (n → R)) : ℕ∞ := notation "Δ₀(" u ", " C ")" => distFromCode u C +noncomputable def minDist (C : Set (n → R)) : ℕ := + sInf {d | ∃ u ∈ C, ∃ v ∈ C, u ≠ v ∧ hammingDist u v = d} + @[simp] -theorem codeDist_empty : ‖ (∅ : Set (n → R) ) ‖₀ = 0 := by simp [codeDist] +theorem dist_empty : ‖ (∅ : Set (n → R) ) ‖₀ = 0 := by simp [dist] @[simp] -theorem codeDist_subsingleton {C : Set (n → R)} [Subsingleton C] : ‖C‖₀ = 0 := by - simp only [codeDist] +theorem dist_subsingleton {C : Set (n → R)} [Subsingleton C] : ‖C‖₀ = 0 := by + simp only [Code.dist] have {d : ℕ} : (∃ u ∈ C, ∃ v ∈ C, u ≠ v ∧ hammingDist u v ≤ d) = False := by have h := @Subsingleton.allEq C _ simp_all; intro a ha b hb hab have hEq : a = b := h a ha b hb simp_all have : {d | ∃ u ∈ C, ∃ v ∈ C, u ≠ v ∧ hammingDist u v ≤ d} = (∅ : Set ℕ) := by - apply Set.eq_empty_iff_forall_not_mem.mpr + apply Set.eq_empty_iff_forall_notMem.mpr simp [this] simp [this] @[simp] -theorem codeDist_le_card (C : Set (n → R)) : codeDist C ≤ Fintype.card n := by +theorem dist_le_card (C : Set (n → R)) : dist C ≤ Fintype.card n := by by_cases h : Subsingleton C · simp · simp at h @@ -101,11 +105,11 @@ theorem codeDist_le_card (C : Set (n → R)) : codeDist C ≤ Fintype.card n := simp refine ⟨u, And.intro hu ⟨v, And.intro hv ⟨huv, hammingDist_le_card_fintype⟩⟩⟩ -/-- If `u` and `v` are two codewords of `C` with distance less than `codeDist C`, +/-- If `u` and `v` are two codewords of `C` with distance less than `dist C`, then they are the same. -/ -theorem eq_of_lt_codeDist {C : Set (n → R)} {u v : n → R} (hu : u ∈ C) (hv : v ∈ C) +theorem eq_of_lt_dist {C : Set (n → R)} {u v : n → R} (hu : u ∈ C) (hv : v ∈ C) (huv : Δ₀(u, v) < ‖C‖₀) : u = v := by - simp only [codeDist] at huv + simp only [dist] at huv by_contra hNe push_neg at hNe revert huv @@ -122,7 +126,7 @@ theorem distFromCode_eq_top_iff_empty (u : n → R) (C : Set (n → R)) : Δ₀( apply Iff.intro · simp only [distFromCode] intro h - apply Set.eq_empty_iff_forall_not_mem.mpr + apply Set.eq_empty_iff_forall_notMem.mpr intro v hv apply sInf_eq_top.mp at h revert h @@ -147,9 +151,9 @@ theorem distFromCode_eq_zero_iff_mem (C : Set (n → R)) (u : n → R) : Δ₀(u simp · intro h; exact distFromCode_of_mem C h -theorem distFromCode_eq_of_lt_half_codeDist (C : Set (n → R)) (u : n → R) {v w : n → R} +theorem distFromCode_eq_of_lt_half_dist (C : Set (n → R)) (u : n → R) {v w : n → R} (hv : v ∈ C) (hw : w ∈ C) (huv : Δ₀(u, v) < ‖C‖₀ / 2) (hvw : Δ₀(u, w) < ‖C‖₀ / 2) : v = w := by - apply eq_of_lt_codeDist hv hw + apply eq_of_lt_dist hv hw calc Δ₀(v, w) ≤ Δ₀(v, u) + Δ₀(u, w) := by exact hammingDist_triangle v u w _ = Δ₀(u, v) + Δ₀(u, w) := by simp only [hammingDist_comm] @@ -161,32 +165,32 @@ section Computable /-- Computable version of the Hamming distance of a code `C`, assuming `C` is a `Fintype`. The return type is `ℕ∞` since we use `Finset.min`. -/ -def codeDist' (C : Set (n → R)) [Fintype C] : ℕ∞ := +def dist' (C : Set (n → R)) [Fintype C] : ℕ∞ := Finset.min <| ((@Finset.univ (C × C) _).filter (fun p => p.1 ≠ p.2)).image (fun ⟨u, v⟩ => hammingDist u.1 v.1) -notation "‖" C "‖₀'" => codeDist' C +notation "‖" C "‖₀'" => dist' C variable {C : Set (n → R)} [Fintype C] @[simp] -theorem codeDist'_empty : ‖(∅ : Set (n → R))‖₀' = ⊤ := by - simp [codeDist'] +theorem dist'_empty : ‖(∅ : Set (n → R))‖₀' = ⊤ := by + simp [dist'] @[simp] theorem codeDist'_subsingleton [Subsingleton C] : ‖C‖₀' = ⊤ := by - simp [codeDist'] + simp [dist'] apply Finset.min_eq_top.mpr simp [Finset.filter_eq_empty_iff] have h := @Subsingleton.elim C _ simp_all exact h -theorem codeDist'_eq_codeDist : ‖C‖₀'.toNat = ‖C‖₀ := by +theorem dist'_eq_dist : ‖C‖₀'.toNat = ‖C‖₀ := by by_cases h : Subsingleton C · simp - · simp [codeDist, codeDist'] - have : codeDist' C ≠ ⊤ := by sorry + · simp [dist, dist'] + have : dist' C ≠ ⊤ := by sorry sorry -- apply (ENat.toNat_eq_iff this).mp -- apply Finset.min_eq_top.mp @@ -204,14 +208,12 @@ variable {α : Type*} {F : Type*} [DecidableEq F] {ι : Type*} [Fintype ι] -/-- - The set of possible distances `δf` from a vector `w` to a code `C`. +/-- The set of possible distances `δf` from a vector `w` to a code `C`. -/ -def possibleDistsToC (w : ι → F) (C : Set (ι → F)) (δf : (ι → F) → (ι → F) → α) : Set α := +def possibleDistsToCode (w : ι → F) (C : Set (ι → F)) (δf : (ι → F) → (ι → F) → α) : Set α := {d : α | ∃ c ∈ C, c ≠ w ∧ δf w c = d} -/-- - The set of possible distances `δf` between distinct codewords in a code `C`. +/-- The set of possible distances `δf` between distinct codewords in a code `C`. - TODO: This allows us to express distance in non-ℝ, which is quite convenient. Extending to `(E)Dist` forces this into `ℝ`; give some thought. @@ -219,15 +221,14 @@ def possibleDistsToC (w : ι → F) (C : Set (ι → F)) (δf : (ι → F) → ( def possibleDists (C : Set (ι → F)) (δf : (ι → F) → (ι → F) → α) : Set α := {d : α | ∃ p ∈ Set.offDiag C, δf p.1 p.2 = d} -/-- - A generalisation of `distFromCode` for an arbitrary distance function `δf`. +/-- A generalisation of `distFromCode` for an arbitrary distance function `δf`. -/ noncomputable def distToCode [LinearOrder α] [Zero α] (w : ι → F) (C : Set (ι → F)) (δf : (ι → F) → (ι → F) → α) - (h : (possibleDistsToC w C δf).Finite) : WithTop α := + (h : (possibleDistsToCode w C δf).Finite) : WithTop α := haveI := @Fintype.ofFinite _ h - (possibleDistsToC w C δf).toFinset.min + (possibleDistsToCode w C δf).toFinset.min end @@ -235,15 +236,15 @@ lemma distToCode_of_nonempty {α : Type*} [LinearOrder α] [Zero α] {ι F : Type*} {w : ι → F} {C : Set (ι → F)} {δf : (ι → F) → (ι → F) → α} - (h₁ : (possibleDistsToC w C δf).Finite) - (h₂ : (possibleDistsToC w C δf).Nonempty) : + (h₁ : (possibleDistsToCode w C δf).Finite) + (h₂ : (possibleDistsToCode w C δf).Nonempty) : haveI := @Fintype.ofFinite _ h₁ - distToCode w C δf h₁ = .some ((possibleDistsToC w C δf).toFinset.min' (by simpa)) := by + distToCode w C δf h₁ = .some ((possibleDistsToCode w C δf).toFinset.min' (by simpa)) := by simp [distToCode, Finset.min'_eq_inf', Finset.min_eq_inf_withTop] rfl /-- Computable version of the distance from a vector `u` to a code `C`, assuming `C` is a `Fintype`. - -/ +-/ def distFromCode' (C : Set (n → R)) [Fintype C] (u : n → R) : ℕ∞ := Finset.min <| (@Finset.univ C _).image (fun v => hammingDist u v.1) @@ -251,67 +252,200 @@ notation "Δ₀'(" u ", " C ")" => distFromCode' C u end Computable -end Distance +noncomputable section -section Linear +variable {ι : Type*} [Fintype ι] + {F : Type*} + {u v w c : ι → F} + {C : Set (ι → F)} -variable {k : Type*} [Fintype k] [CommSemiring R] +section -/-- Define a linear code from its generator matrix -/ -def codeByGenMatrix (G : Matrix k n R) : Submodule R (n → R) := - LinearMap.range G.vecMulLinear - -- Submodule.span R (Set.range G) +variable [Nonempty ι] [DecidableEq F] -/-- Define a linear code from its (parity) check matrix -/ -def codeByCheckMatrix (H : Matrix k n R) : Submodule R (n → R) := - LinearMap.ker H.mulVecLin +def relHammingDist (u v : ι → F) : ℚ≥0 := + hammingDist u v / Fintype.card ι -/-- The Hamming distance of a linear code can also be defined as the minimum Hamming norm of a - non-zero vector in the code -/ -noncomputable def linearCodeDist (C : Submodule R (n → R)) : ℕ := - sInf {d | ∃ u ∈ C, u ≠ 0 ∧ hammingNorm u ≤ d} +/-- + `δᵣ(u,v)` denotes the relative Hamming distance between vectors `u` and `v`. +-/ +notation "δᵣ(" u ", " v ")" => relHammingDist u v --- Require `[CommRing R]` -theorem codeDist_eq_linearCodeDist (C : Submodule R (n → R)) : - codeDist C.carrier = linearCodeDist C := by - simp [codeDist, linearCodeDist] - congr; unfold setOf; funext d - apply Eq.propIntro <;> intro h - · obtain ⟨u, hu, v, hv, huv, hDist⟩ := h - -- let w := u - v - -- have hw : w ∈ C := by simp [Submodule.add_mem] - -- refine ⟨w, And.intro hw ⟨v, And.intro hv ⟨huv, ?_⟩⟩⟩ - sorry - · obtain ⟨u, hu, hNorm, hDist⟩ := h - -- refine ⟨u, And.intro hu ⟨v, And.intro hv ⟨huv, ?_⟩⟩⟩ - sorry +/-- The relative Hamming distance between two vectors is at most `1`. +-/ +@[simp] +lemma relHammingDist_le_one : δᵣ(u, v) ≤ 1 := by + unfold relHammingDist + qify + rw [div_le_iff₀ (by simp)] + simp [hammingDist_le_card_fintype] -section Computable +/-- The relative Hamming distance between two vectors is non-negative. +-/ +@[simp] +lemma zero_le_relHammingDist : 0 ≤ δᵣ(u, v) := by + unfold relHammingDist + qify + rw [le_div_iff₀ (by simp)] + simp -variable [DecidableEq n] [Fintype R] +end -/-- A computable version of the Hamming distance of a linear code `C`. -/ -def linearCodeDist' (C : Submodule R (n → R)) [DecidablePred (· ∈ C)] : ℕ∞ := - Finset.min <| ((Finset.univ (α := C)).filter (fun v => v ≠ 0)).image (fun v => hammingNorm v.1) +/-- The range of the relative Hamming distance function. +-/ +def relHammingDistRange (ι : Type*) [Fintype ι] : Set ℚ≥0 := + {d : ℚ≥0 | ∃ d' : ℕ, d' ≤ Fintype.card ι ∧ d = d' / Fintype.card ι} -end Computable +/-- The range of the relative Hamming distance is well-defined. +-/ +@[simp] +lemma relHammingDist_mem_relHammingDistRange [DecidableEq F] : δᵣ(u, v) ∈ relHammingDistRange ι := + ⟨hammingDist _ _, Finset.card_filter_le _ _, rfl⟩ -/-- The interleaving of a linear code `C` over index set `ι` is the submodule spanned by -`ι × n`-matrices whose rows are elements of `C`. -/ -def interleaveCode (C : Submodule R (n → R)) (ι : Type*) : Submodule R ((ι × n) → R) := - Submodule.span R {v | ∀ i, ∃ c ∈ C, c = fun j => v (i, j)} +/-- The range of the relative Hamming distance function is finite. +-/ +@[simp] +lemma finite_relHammingDistRange [Nonempty ι] : (relHammingDistRange ι).Finite := by + simp only [relHammingDistRange, ← Set.finite_coe_iff, Set.coe_setOf] + exact + finite_iff_exists_equiv_fin.2 + ⟨Fintype.card ι + 1, + ⟨⟨ + fun ⟨s, _⟩ ↦ ⟨(s * Fintype.card ι).num, by aesop (add safe (by omega))⟩, + fun n ↦ ⟨n / Fintype.card ι, by use n; simp [Nat.le_of_lt_add_one n.2]⟩, + fun ⟨_, _, _, h₂⟩ ↦ by field_simp [h₂], + fun _ ↦ by simp + ⟩⟩ + ⟩ -notation:20 C "^⋈" ι => interleaveCode C ι +/-- The set of pairs of distinct elements from a finite set is finite. +-/ +@[simp] +lemma finite_offDiag [Finite F] : C.offDiag.Finite := Set.Finite.offDiag (Set.toFinite _) --- instance : Fintype (interleaveCode C ι) := sorry +section -/-- Interleave two functions `u v : α → β`. -/ -def Function.interleave₂ {α β : Type*} (u v : α → β) : (Fin 2) × α → β := - Function.uncurry (fun a => if a = 0 then u else v) +variable [DecidableEq F] -notation:20 u "⋈" v => Function.interleave₂ u v +/-- The set of possible distances between distinct codewords in a code. +-/ +def possibleRelHammingDists (C : Set (ι → F)) : Set ℚ≥0 := + possibleDists C relHammingDist + +/-- The set of possible distances between distinct codewords in a code is a subset of the range of + the relative Hamming distance function. +-/ +@[simp] +lemma possibleRelHammingDists_subset_relHammingDistRange : + possibleRelHammingDists C ⊆ relHammingDistRange ι := fun _ ↦ by + aesop (add simp [possibleRelHammingDists, possibleDists]) + +variable [Nonempty ι] + +/-- The set of possible distances between distinct codewords in a code is a finite set. +-/ +@[simp] +lemma finite_possibleRelHammingDists : (possibleRelHammingDists C).Finite := + Set.Finite.subset finite_relHammingDistRange possibleRelHammingDists_subset_relHammingDistRange + +open Classical in +/-- The minimum relative Hamming distance of a code. +-/ +def minRelHammingDistCode (C : Set (ι → F)) : ℚ≥0 := + haveI : Fintype (possibleRelHammingDists C) := @Fintype.ofFinite _ finite_possibleRelHammingDists + if h : (possibleRelHammingDists C).Nonempty + then (possibleRelHammingDists C).toFinset.min' (Set.toFinset_nonempty.2 h) + else 0 + +end + +/-- + `δᵣ C` denotes the minimum relative Hamming distance of a code `C`. +-/ +notation "δᵣ" C => minRelHammingDistCode C + +/-- The range set of possible relative Hamming distances from a vector to a code is a subset + of the range of the relative Hamming distance function. +-/ +@[simp] +lemma possibleRelHammingDistsToC_subset_relHammingDistRange [DecidableEq F] : + possibleDistsToCode w C relHammingDist ⊆ relHammingDistRange ι := fun _ ↦ by + aesop (add simp Code.possibleDistsToCode) + +/-- The set of possible relative Hamming distances from a vector to a code is a finite set. +-/ +@[simp] +lemma finite_possibleRelHammingDistsToCode [Nonempty ι] [DecidableEq F] : + (possibleDistsToCode w C relHammingDist).Finite := + Set.Finite.subset finite_relHammingDistRange possibleRelHammingDistsToC_subset_relHammingDistRange + +instance [Nonempty ι] [DecidableEq F] : Fintype (possibleDistsToCode w C relHammingDist) + := @Fintype.ofFinite _ finite_possibleRelHammingDistsToCode + +open Classical in +/-- The relative Hamming distance from a vector to a code. +-/ +def relHammingDistToCode [Nonempty ι] [DecidableEq F] (w : ι → F) (C : Set (ι → F)) : ℚ≥0 := + if h : (possibleDistsToCode w C relHammingDist).Nonempty + then distToCode w C relHammingDist finite_possibleRelHammingDistsToCode |>.get (p h) + else 0 + where p (h : (possibleDistsToCode w C relHammingDist).Nonempty) := by + by_contra c + simp [distToCode] at c ⊢ + rw [WithTop.none_eq_top, Finset.min_eq_top, Set.toFinset_eq_empty] at c + simp_all + +/-- + `δᵣ(w,C)` denotes the relative Hamming distance between a vector `w` and a code `C`. +-/ +notation "δᵣ(" w ", " C ")" => relHammingDistToCode w C + +@[simp] +lemma zero_mem_relHammingDistRange : 0 ∈ relHammingDistRange ι := by use 0; simp + +/-- The relative Hamming distances between a vector and a codeword is in the + range of the relative Hamming distance function. +-/ +@[simp] +lemma relHammingDistToCode_mem_relHammingDistRange [Nonempty ι] [DecidableEq F] : + δᵣ(c, C) ∈ relHammingDistRange ι := by + unfold relHammingDistToCode + split_ifs with h + · exact Set.mem_of_subset_of_mem + (s₁ := (possibleDistsToCode c C relHammingDist).toFinset) + (by simp) + (by simp_rw [distToCode_of_nonempty (h₁ := by simp) (h₂ := h)] + simp [←WithTop.some_eq_coe] + have := Finset.min'_mem + (s := (possibleDistsToCode c C relHammingDist).toFinset) + (H := by simpa) + simpa) + · simp + +end -end Linear +noncomputable section + +variable {F : Type*}[DecidableEq F] + {ι : Type*} [Fintype ι] + + +open Finset + +def wt [Zero F] + (v : ι → F) : ℕ := #{i | v i ≠ 0} + +lemma wt_eq_hammingNorm [Zero F] {v : ι → F} : + wt v = hammingNorm v := rfl + +lemma wt_eq_zero_iff [Zero F] {v : ι → F} : + wt v = 0 ↔ Fintype.card ι = 0 ∨ ∀ i, v i = 0 := by + by_cases IsEmpty ι <;> + aesop (add simp [wt, Finset.filter_eq_empty_iff]) + +end + +end Code variable [Finite R] @@ -332,7 +466,7 @@ omit [Finite R] in theorem projection_injective by_contra hne have hdiff : hammingDist u v ≥ ‖C‖₀ := by - simp [codeDist] + simp [Code.dist] refine Nat.sInf_le ?_ refine Set.mem_setOf.mpr ?_ use u @@ -375,7 +509,7 @@ omit [Finite R] in theorem projection_injective simp at * rw[hS] have stronger : ‖C‖₀ ≤ card n := by - apply codeDist_le_card + apply Code.dist_le_card omega have hsizes: card D ≤ @card diff (ofFinite diff) := by @@ -441,324 +575,178 @@ theorem singleton_bound (C : Set (n → R)) : let huniv := @set_fintype_card_le_univ (n → R) ?_ C (ofFinite C) exact huniv -variable [DivisionRing R] - -/-- **Singleton bound** for linear codes -/ -theorem singleton_bound_linear (C : Submodule R (n → R)) : - Module.finrank R C ≤ card n - (codeDist C.carrier) + 1 := by sorry - -- have : (ofFinite C).card = (ofFinite R).card ^ (Module.finrank R C) := by - -namespace RelativeHamming - -variable {ι : Type*} [Fintype ι] - {F : Type*} - {u v w c : ι → F} - {C : Set (ι → F)} - -noncomputable section - -section - -variable [Nonempty ι] [DecidableEq F] - -def dist (u v : ι → F) : ℚ := - (hammingDist u v : ℚ) / (Fintype.card ι : ℚ) - -/-- - `δᵣ(u,v)` denotes the relative Hamming distance between vectors `u` and `v`. --/ -notation "δᵣ(" u ", " v ")" => dist u v - -/-- - The relative Hamming distance between two vectors is at most `1`. --/ -@[simp] -lemma relHammingDist_le_one : δᵣ(u, v) ≤ 1 := by - unfold dist - rw [div_le_iff₀ (by simp)] - simp [hammingDist_le_card_fintype] - -/-- - The relative Hamming distance between two vectors is non-negative. --/ -@[simp] -lemma zero_le_relHammingDist : 0 ≤ δᵣ(u, v) := by - unfold dist - rw [le_div_iff₀ (by simp)] - simp - -end - -/-- - The range of the relative Hamming distance function. --/ -def relHammingDistRange (ι : Type*) [Fintype ι] : Set ℚ := - {d : ℚ | ∃ d' : ℕ, d' ≤ Fintype.card ι ∧ d = d' / Fintype.card ι} - -/-- - The range of the relative Hamming distance is well-defined. --/ -@[simp] -lemma relHammingDist_mem_relHammingDistRange [DecidableEq F] : δᵣ(u, v) ∈ relHammingDistRange ι := - ⟨hammingDist _ _, Finset.card_filter_le _ _, rfl⟩ - -/-- - The range of the relative Hamming distance function is finite. --/ -@[simp] -lemma finite_relHammingDistRange [Nonempty ι] : (relHammingDistRange ι).Finite := by - simp only [relHammingDistRange, ← Set.finite_coe_iff, Set.coe_setOf] - exact - finite_iff_exists_equiv_fin.2 - ⟨Fintype.card ι + 1, - ⟨⟨ - fun ⟨s, _⟩ ↦ ⟨(s * Fintype.card ι).num.toNat, by aesop (add safe (by omega))⟩, - fun n ↦ ⟨n / Fintype.card ι, by use n; simp [Nat.le_of_lt_add_one n.2]⟩, - fun ⟨_, _, _, h₂⟩ ↦ by field_simp [h₂], - fun _ ↦ by simp - ⟩⟩ - ⟩ - -/-- - The set of pairs of distinct elements from a finite set is finite. --/ -@[simp] -lemma finite_offDiag [Finite F] : C.offDiag.Finite := Set.Finite.offDiag (Set.toFinite _) - -section - -variable [DecidableEq F] - -/-- - The set of possible distances between distinct codewords in a code. --/ -def possibleDists (C : Set (ι → F)) : Set ℚ := - _root_.possibleDists C dist - -/-- - The set of possible distances between distinct codewords in a code is a subset of the range of the - relative Hamming distance function. --/ -@[simp] -lemma possibleDists_subset_relHammingDistRange : - possibleDists C ⊆ relHammingDistRange ι := fun _ ↦ by - aesop (add simp [possibleDists, _root_.possibleDists]) - -variable [Nonempty ι] - -/-- - The set of possible distances between distinct codewords in a code is a finite set. --/ -@[simp] -lemma finite_possibleDists : (possibleDists C).Finite := - Set.Finite.subset finite_relHammingDistRange possibleDists_subset_relHammingDistRange - -open Classical in -/-- - The minimum relative Hamming distance of a code. --/ -def minRelHammingDistCode (C : Set (ι → F)) : ℚ := - haveI : Fintype (possibleDists C) := @Fintype.ofFinite _ finite_possibleDists - if h : (possibleDists C).Nonempty - then (possibleDists C).toFinset.min' (Set.toFinset_nonempty.2 h) - else 0 - -end - -/-- - `δᵣ C` denotes the minimum relative Hamming distance of a code `C`. --/ -notation "δᵣ" C => minRelHammingDistCode C - -/-- - The range set of possible relative Hamming distances from a vector to a code is a subset - of the range of the relative Hamming distance function. --/ -@[simp] -lemma possibleDistsToC_subset_relHammingDistRange [DecidableEq F] : - possibleDistsToC w C dist ⊆ relHammingDistRange ι := fun _ ↦ by - aesop (add simp possibleDistsToC) - -/-- - The set of possible relative Hamming distances from a vector to a code is a finite set. --/ -@[simp] -lemma finite_possibleDistsToC [Nonempty ι] [DecidableEq F] : - (possibleDistsToC w C dist).Finite := - Set.Finite.subset finite_relHammingDistRange possibleDistsToC_subset_relHammingDistRange - -instance [Nonempty ι] [DecidableEq F] : Fintype (possibleDistsToC w C dist) - := @Fintype.ofFinite _ finite_possibleDistsToC - -open Classical in -/-- - The relative Hamming distance from a vector to a code. --/ -def relHammingDistToCode [Nonempty ι] [DecidableEq F] (w : ι → F) (C : Set (ι → F)) : ℚ := - if h : (possibleDistsToC w C dist).Nonempty - then distToCode w C dist finite_possibleDistsToC |>.get (p h) - else 0 - where p (h : (possibleDistsToC w C dist).Nonempty) := by - by_contra c - simp [distToCode] at c ⊢ - rw [WithTop.none_eq_top, Finset.min_eq_top, Set.toFinset_eq_empty] at c - simp_all - -/-- - `δᵣ(w,C)` denotes the relative Hamming distance between a vector `w` and a code `C`. --/ -notation "δᵣ(" w ", " C ")" => relHammingDistToCode w C - -@[simp] -lemma zero_mem_relHammingDistRange : 0 ∈ relHammingDistRange ι := by use 0; simp - -/-- - The relative Hamming distances between a vector and a codeword is in the - range of the relative Hamming distance function. --/ -@[simp] -lemma relHammingDistToCode_mem_relHammingDistRange [Nonempty ι] [DecidableEq F] : - δᵣ(c, C) ∈ relHammingDistRange ι := by - unfold relHammingDistToCode - split_ifs with h - · exact Set.mem_of_subset_of_mem - (s₁ := (possibleDistsToC c C dist).toFinset) - (by simp) - (by simp_rw [distToCode_of_nonempty (h₁ := by simp) (h₂ := h)] - simp [←WithTop.some_eq_coe] - have := Finset.min'_mem - (α := ℚ) - (s := (possibleDistsToC c C dist).toFinset) - (H := by simpa) - simpa) - · simp - -end - -end RelativeHamming - abbrev LinearCode.{u, v} (ι : Type u) [Fintype ι] (F : Type v) [Semiring F] : Type (max u v) := Submodule F (ι → F) namespace LinearCode -open Finset +section variable {F : Type*} {ι : Type*} [Fintype ι] - -noncomputable def wt [DecidableEq F] [Zero F] - (v : ι → F) : ℕ := #{i | v i ≠ 0} - -lemma wt_eq_zero_iff [DecidableEq F] [Zero F] {v : ι → F} : - wt v = 0 ↔ Fintype.card ι = 0 ∨ ∀ i, v i = 0 := by - by_cases IsEmpty ι <;> - aesop (add simp [wt, Finset.filter_eq_empty_iff]) - -section - -variable {ι : Type*} [Fintype ι] {κ : Type*} [Fintype κ] -noncomputable def minDist [Semiring F] [DecidableEq F] (LC : LinearCode ι F) : ℕ := - sInf { d | ∃ u ∈ LC, ∃ v ∈ LC, u ≠ v ∧ hammingDist u v = d } - /-- Linear code defined by left multiplication by its generator matrix. -/ -def fromRowGenMat [Semiring F] (G : Matrix κ ι F) : LinearCode ι F := +noncomputable def fromRowGenMat [Semiring F] (G : Matrix κ ι F) : LinearCode ι F := LinearMap.range G.vecMulLinear -/-- - Linear code defined by right multiplication by a generator matrix. +/-- Linear code defined by right multiplication by a generator matrix. -/ -def fromColGenMat [CommRing F] (G : Matrix ι κ F) : LinearCode ι F := +noncomputable def fromColGenMat [CommRing F] (G : Matrix ι κ F) : LinearCode ι F := LinearMap.range G.mulVecLin +/-- Define a linear code from its (parity) check matrix -/ +noncomputable def byCheckMatrix [CommRing F] (H : Matrix ι κ F) : LinearCode κ F := + LinearMap.ker H.mulVecLin + +/-- The Hamming distance of a linear code can also be defined as the minimum Hamming norm of a + non-zero vector in the code -/ +noncomputable def disFromHammingNorm [Semiring F] [DecidableEq F] (LC : LinearCode ι F) : ℕ := + sInf {d | ∃ u ∈ LC, u ≠ 0 ∧ hammingNorm u ≤ d} + +-- Require `[CommRing R]` +theorem dist_eq_dist_from_HammingNorm [Semiring F] [DecidableEq F] (LC : LinearCode ι F) : + Code.dist LC.carrier = disFromHammingNorm LC := by + simp [Code.dist, disFromHammingNorm] + congr; unfold setOf; funext d + apply Eq.propIntro <;> intro h + · obtain ⟨u, hu, v, hv, huv, hDist⟩ := h + -- let w := u - v + -- have hw : w ∈ C := by simp [Submodule.add_mem] + -- refine ⟨w, And.intro hw ⟨v, And.intro hv ⟨huv, ?_⟩⟩⟩ + sorry + · obtain ⟨u, hu, hNorm, hDist⟩ := h + -- refine ⟨u, And.intro hu ⟨v, And.intro hv ⟨huv, ?_⟩⟩⟩ + sorry + +/-- +The dimension of a linear code. +-/ noncomputable def dim [Semiring F] (LC : LinearCode ι F) : ℕ := Module.finrank F LC -/-- - The dimension of a linear code equals the rank of its associated generator matrix. +/-- The dimension of a linear code equals the rank of its associated generator matrix. -/ lemma rank_eq_dim_fromColGenMat [CommRing F] {G : Matrix κ ι F} : G.rank = dim (fromColGenMat G) := rfl +/-- +The length of a linear code. +-/ def length [Semiring F] (_ : LinearCode ι F) : ℕ := Fintype.card ι -noncomputable def rate [Semiring F] (LC : LinearCode ι F) : ℚ := - (dim LC : ℚ) / (length LC : ℚ) +/-- +The rate of a linear code. +-/ +noncomputable def rate [Semiring F] (LC : LinearCode ι F) : ℚ≥0 := + (dim LC : ℚ≥0) / length LC /-- `ρ LC` is the rate of the linear code `LC`. -/ notation "ρ" LC => rate LC +end + section -variable [DecidableEq F] +variable {F : Type*}[DecidableEq F] + {ι : Type*} [Fintype ι] -/-- - The minimum taken over the weight of codewords in a linear code. +/-- The minimum taken over the weight of codewords in a linear code. -/ noncomputable def minWtCodewords [Semiring F] (LC : LinearCode ι F) : ℕ := - sInf {w | ∃ c ∈ LC, c ≠ 0 ∧ wt c = w} + sInf {w | ∃ c ∈ LC, c ≠ 0 ∧ Code.wt c = w} /-- The Hamming distance between codewords equals to the weight of their difference. -/ -lemma hammingDist_eq_wt_sub [CommRing F] {u v : ι → F} : hammingDist u v = wt (u - v) := by - aesop (add simp [hammingDist, wt, sub_eq_zero]) - +lemma hammingDist_eq_wt_sub [CommRing F] {u v : ι → F} : hammingDist u v = Code.wt (u - v) := by + aesop (add simp [hammingDist, Code.wt, sub_eq_zero]) + /-- - The min distance of a linear code equals to the minimum of the weights of non-zero codewords. + The min distance of a linear code equals the minimum of the weights of non-zero codewords. -/ -lemma minDist_eq_minWtCodewords [CommRing F] {LC : LinearCode ι F} : - minDist LC = minWtCodewords LC := by - unfold minDist minWtCodewords +lemma dist_eq_minWtCodewords [CommRing F] {LC : LinearCode ι F} : + Code.minDist (LC : Set (ι → F)) = minWtCodewords LC := by + unfold Code.minDist minWtCodewords refine congrArg _ (Set.ext fun _ ↦ ⟨fun ⟨u, _, v, _⟩ ↦ ⟨u - v, ?p₁⟩, fun _ ↦ ⟨0, ?p₂⟩⟩) <;> aesop (add simp [hammingDist_eq_wt_sub, sub_eq_zero]) + open Finset in -lemma minDist_UB [CommRing F] {LC : LinearCode ι F} : minDist LC ≤ length LC := by - rw [minDist_eq_minWtCodewords, minWtCodewords] + +lemma dist_UB [CommRing F] {LC : LinearCode ι F} : + Code.minDist (LC : Set (ι → F)) ≤ length LC := by + rw [dist_eq_minWtCodewords, minWtCodewords] exact sInf.sInf_UB_of_le_UB fun s ⟨_, _, _, s_def⟩ ↦ - s_def ▸ le_trans (card_le_card (subset_univ _)) (le_refl _) + s_def ▸ le_trans (card_le_card (subset_univ _)) (le_refl _) theorem singletonBound [Semiring F] (LC : LinearCode ι F) : - dim LC ≤ length LC - minDist LC + 1 := by sorry + dim LC ≤ length LC - Code.minDist (LC : Set (ι → F)) + 1 := by sorry -end + +/-- The interleaving of a linear code `LC` over index set `ι` is the submodule spanned by +`ι × n`-matrices whose rows are elements of `LC`. -/ +def interleaveCode [Semiring F] [DecidableEq F] (C : Submodule F (n → F)) (ι : Type*) + : Submodule F ((ι × n) → F) := + Submodule.span F {v | ∀ i, ∃ c ∈ C, c = fun j => v (i, j)} + +notation:20 C "^⋈" ι => interleaveCode C ι + +-- instance : Fintype (interleaveCode C ι) := sorry + +/-- Interleave two functions `u v : α → β`. -/ +def Function.interleave₂ {α β : Type*} (u v : α → β) : (Fin 2) × α → β := + Function.uncurry (fun a => if a = 0 then u else v) + +notation:20 u "⋈" v => Function.interleave₂ u v + +/-- **Singleton bound** for linear codes -/ +theorem singleton_bound_linear [Semiring F] (LC : LinearCode ι F) : + Module.finrank F LC ≤ card n - (Code.dist LC.carrier) + 1 := by sorry + -- have : (ofFinite C).card = (ofFinite R).card ^ (Module.finrank R C) := by end +section Computable + +/-- A computable version of the Hamming distance of a linear code `LC`. -/ +def linearCodeDist' {F} {ι} [Fintype ι] [Semiring F] [DecidableEq F] [Fintype F] [DecidableEq ι] + (LC : LinearCode ι F) [DecidablePred (· ∈ LC)] : ℕ∞ := + Finset.min <| ((Finset.univ (α := LC)).filter (fun v => v ≠ 0)).image (fun v => hammingNorm v.1) + +end Computable + end LinearCode lemma poly_eq_zero_of_dist_lt {n k : ℕ} {F : Type*} [DecidableEq F] [CommRing F] [IsDomain F] {p : Polynomial F} {ωs : Fin n → F} (h_deg : p.natDegree < k) (hn : k ≤ n) - (h_inj: Function.Injective ωs) + (h_inj: Function.Injective ωs) (h_dist : Δ₀(p.eval ∘ ωs, 0) < n - k + 1) : p = 0 := by by_cases hk : k = 0 · simp [hk] at h_deg · have h_n_k_1 : n - k + 1 = n - (k - 1) := by omega - rw [h_n_k_1] at h_dist + rw [h_n_k_1] at h_dist simp [hammingDist] at * rw [←Finset.compl_filter, Finset.card_compl] at h_dist - simp at h_dist + simp at h_dist have hk : 1 ≤ k := by omega - rw [←Finset.card_image_of_injective _ h_inj + rw [←Finset.card_image_of_injective _ h_inj ] at h_dist - have h_dist_p : k ≤ - (@Finset.image (Fin n) F _ ωs {i | Polynomial.eval (ωs i) p = 0} : Finset F).card + have h_dist_p : k ≤ + (@Finset.image (Fin n) F _ ωs {i | Polynomial.eval (ωs i) p = 0} : Finset F).card := by omega by_cases heq_0 : p = 0 <;> try simp [heq_0] have h_dist := Nat.le_trans h_dist_p (by { apply Polynomial.card_le_degree_of_subset_roots (p := p) - intro x hx + intro x hx aesop }) omega diff --git a/ArkLib/Data/CodingTheory/BerlekampWelch/ElocPoly.lean b/ArkLib/Data/CodingTheory/BerlekampWelch/ElocPoly.lean index 44e34565e..d7b9bb041 100644 --- a/ArkLib/Data/CodingTheory/BerlekampWelch/ElocPoly.lean +++ b/ArkLib/Data/CodingTheory/BerlekampWelch/ElocPoly.lean @@ -10,10 +10,10 @@ import Mathlib.Algebra.Polynomial.Degree.Definitions import Mathlib.Algebra.Polynomial.FieldDivision import Mathlib.Data.Finset.Insert import Mathlib.Data.Fintype.Card -import Mathlib.Data.Matrix.Mul +import Mathlib.Data.Matrix.Mul import ArkLib.Data.CodingTheory.Basic -import ArkLib.Data.Fin.Basic +import ArkLib.Data.Fin.Lift namespace BerlekampWelch @@ -21,12 +21,12 @@ variable {F : Type} [Field F] {m n : ℕ} {p : Polynomial F} variable [DecidableEq F] -section ElocPoly +section ElocPoly open Polynomial protected noncomputable def ElocPoly (n : ℕ) (ωs f : ℕ → F) (p : Polynomial F) : Polynomial F := - List.prod <| (List.range n).map fun i => + List.prod <| (List.range n).map fun i => if f i = p.eval (ωs i) then 1 else X - C (ωs i) @@ -47,9 +47,9 @@ protected lemma elocPoly_one : @[simp] protected lemma elocPoly_two : - ElocPoly 2 ωs f p = - if f 1 = eval (ωs 1) p - then if f 0 = eval (ωs 0) p then 1 + ElocPoly 2 ωs f p = + if f 1 = eval (ωs 1) p + then if f 0 = eval (ωs 0) p then 1 else X - C (ωs 0) else if f 0 = eval (ωs 0) p then X - C (ωs 1) else (X - C (ωs 0)) * (X - C (ωs 1)) := by @@ -58,7 +58,7 @@ protected lemma elocPoly_two : @[simp] protected lemma elocPoly_succ : ElocPoly (n + 1) ωs f p = - ElocPoly n ωs f p * + ElocPoly n ωs f p * if f n = p.eval (ωs n) then 1 else X - C (ωs n) := by @@ -68,7 +68,7 @@ protected lemma elocPoly_succ : open BerlekampWelch (elocPoly_succ) in protected lemma roots_of_eloc_poly {x : F} - (h : (ElocPoly n ωs f p).eval x = 0) : + (h : (ElocPoly n ωs f p).eval x = 0) : ∃ i, i < n ∧ f i ≠ p.eval (ωs i) := by induction' n with n ih generalizing x · aesop @@ -95,13 +95,13 @@ protected lemma elocPoly_ne_zero : ElocPoly n ωs f p ≠ 0 := by @[simp] protected lemma elocPoly_leading_coeff_one : (ElocPoly n ωs f p).leadingCoeff = 1 := by - induction' n with n _ + induction' n with n _ · simp · aesop section -open Fin +open Fin protected lemma elocPoly_congr {ωs' f' : ℕ → F} (h₁ : ∀ {m}, m < n → ωs m = ωs' m) (h₂ : ∀ {m}, m < n → f m = f' m) : @@ -122,7 +122,7 @@ noncomputable def ElocPolyF (ωs f : Fin n → F) (p : Polynomial F) : Polynomia @[simp] protected lemma elocPolyF_eq_elocPoly : - ElocPolyF (n := n) (liftF' ωs) (liftF' f) = ElocPoly n ωs f := + ElocPolyF (n := n) (liftF' ωs) (liftF' f) = ElocPoly n ωs f := elocPoly_congr liftF_liftF'_of_lt liftF_liftF'_of_lt @[simp] @@ -142,8 +142,8 @@ protected lemma errors_are_roots_of_elocPolyF {i : Fin n} {ωs f : Fin n → F} (h : f i ≠ p.eval (ωs i)) : (ElocPolyF ωs f p).eval (ωs i) = 0 := by rw [←liftF_eval (f := ωs)] aesop (config := {warnOnNonterminal := false}) - rw [BerlekampWelch.errors_are_roots_of_elocPoly - (i.isLt) + rw [BerlekampWelch.errors_are_roots_of_elocPoly + (i.isLt) (by aesop (add simp [liftF_eval]))] @[simp] @@ -174,7 +174,7 @@ lemma elocPolyF_deg {ωs f : Fin n → F} : (ElocPolyF ωs f p).natDegree = Δ hammingDist.eq_def, Finset.card_filter, Finset.sum_fin_eq_sum_range, Finset.sum_range_succ ]) <;> (apply Finset.sum_congr rfl; aesop (add safe (by omega))) -end +end end diff --git a/ArkLib/Data/CodingTheory/DivergenceOfSets.lean b/ArkLib/Data/CodingTheory/DivergenceOfSets.lean index 69fdc8f9e..d902938e2 100644 --- a/ArkLib/Data/CodingTheory/DivergenceOfSets.lean +++ b/ArkLib/Data/CodingTheory/DivergenceOfSets.lean @@ -14,33 +14,31 @@ namespace DivergenceOfSets noncomputable section -open Classical RelativeHamming +open Classical Code variable {ι : Type*} [Fintype ι] [Nonempty ι] - {F : Type*} + {F : Type*} [DecidableEq F] {U V C : Set (ι → F)} -/-- - The set of possible relative Hamming distances between two sets. +/-- The set of possible relative Hamming distances between two sets. -/ -def possibleDeltas (U V : Set (ι → F)) : Set ℚ := - {d : ℚ | ∃ u ∈ U, δᵣ(u,V) = d} +def possibleDeltas (U V : Set (ι → F)) : Set ℚ≥0 := + {d : ℚ≥0 | ∃ u ∈ U, δᵣ(u,V) = d} -/-- - The set of possible relative Hamming distances between two sets is well-defined. +/-- The set of possible relative Hamming distances between two sets is well-defined. -/ @[simp] lemma possibleDeltas_subset_relHammingDistRange : possibleDeltas U C ⊆ relHammingDistRange ι := fun _ _ ↦ by aesop (add simp possibleDeltas) -/-- - The set of possible relative Hamming distances between two sets is finite. +/-- The set of possible relative Hamming distances between two sets is finite. -/ @[simp] lemma finite_possibleDeltas : (possibleDeltas U V).Finite := Set.Finite.subset finite_relHammingDistRange possibleDeltas_subset_relHammingDistRange +open Classical in def divergence (U V : Set (ι → F)) : ℚ := haveI : Fintype (possibleDeltas U V) := @Fintype.ofFinite _ finite_possibleDeltas if h : (possibleDeltas U V).Nonempty diff --git a/ArkLib/Data/CodingTheory/InterleavedCode.lean b/ArkLib/Data/CodingTheory/InterleavedCode.lean index ec4f105dc..b189b7564 100644 --- a/ArkLib/Data/CodingTheory/InterleavedCode.lean +++ b/ArkLib/Data/CodingTheory/InterleavedCode.lean @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Katerina Hristova, František Silváši -/ +import ArkLib.Data.CodingTheory.Basic import ArkLib.Data.CodingTheory.ReedSolomon import Mathlib.Order.CompletePartialOrder import Mathlib.Probability.Distributions.Uniform @@ -97,18 +98,34 @@ def distToCode [DecidableEq F] (U : Matrix κ ι F) (IC : MatrixSubmodule κ ι -/ notation "Δ(" U "," IC ")" => distToCode U IC +/-- +Relative distance between codewords of an interleaved code. + -/ +def relDistCodewords [DecidableEq F] (U V : Matrix κ ι F) : ℝ := + (Matrix.neqCols U V).card / Fintype.card ι + +/--list of codewords of IC r-close to U, + with respect to relative distance of interleaved codes.-/ +def relHammingBallInterleavedCode [DecidableEq F] (U : Matrix κ ι F) + (IC : MatrixSubmodule κ ι F) (r : ℝ) := + {V | V ∈ IC ∧ relDistCodewords U V < r} + +/--`Λᵢ(U, IC, r)` denotes the list of codewords of IC r-close to U-/ +notation "Λᵢ(" U "," IC "," r ")" => relHammingBallInterleavedCode U IC r + /-- The minimal distance of an interleaved code is the same as the minimal distance of its underlying linear code. -/ lemma minDist_eq_minDist [DecidableEq F] {IC : LawfulInterleavedCode κ ι F} : - LinearCode.minDist (F := F) IC.1.LC = minDist IC.1.MF := by sorry + Code.minDist (IC.1.LC : Set (ι → F)) = minDist IC.1.MF := by sorry end InterleavedCode noncomputable section open InterleavedCode +open Code variable {F : Type*} [Field F] [Finite F] [DecidableEq F] {κ : Type*} [Fintype κ] {ι : Type*} [Fintype ι] @@ -121,7 +138,7 @@ local instance : Fintype F := Fintype.ofFinite F lemma distInterleavedCodeToCodeLB {IC : LawfulInterleavedCode κ ι F} {U : Matrix κ ι F} {e : ℕ} (hF: Fintype.card F ≥ e) - (he : (e : ℚ) ≤ (codeDist (IC.1.LC : Set (ι → F)) / 3)) (hU : e < Δ(U,IC.1.MF)) : + (he : (e : ℚ) ≤ (minDist (IC.1.LC : Set (ι → F)) / 3)) (hU : e < Δ(U,IC.1.MF)) : ∃ v ∈ Matrix.rowSpan U , e < distFromCode v IC.1.LC := sorry namespace ProximityToRS diff --git a/ArkLib/Data/CodingTheory/JohnsonBound.lean b/ArkLib/Data/CodingTheory/JohnsonBound.lean deleted file mode 100644 index 4c2ce5ebc..000000000 --- a/ArkLib/Data/CodingTheory/JohnsonBound.lean +++ /dev/null @@ -1,202 +0,0 @@ -/- Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Authors: Ilia Vlasov --/ -import Mathlib.Algebra.Field.Rat -import Mathlib.Analysis.Convex.Function -import Mathlib.Data.Real.Sqrt -import Mathlib.Data.Set.Pairwise.Basic -import Mathlib.Algebra.BigOperators.Field -import Mathlib.Analysis.Convex.Jensen -import Mathlib.Algebra.Module.LinearMap.Defs - -import ArkLib.Data.CodingTheory.Basic -import ArkLib.Data.CodingTheory.JohnsonBound.Choose2 -import ArkLib.Data.CodingTheory.JohnsonBound.Expectations -import ArkLib.Data.CodingTheory.JohnsonBound.Lemmas - -namespace JohnsonBound - -/-! -This module is based on the Johnson Bound section from [listdecoding]. -In what follows we reference theorems from [listdecoding] by default. - -## References - -* [Venkatesan Guruswami, *Algorithmic Results in List Decoding*][listdecoding] --/ - -variable {n : ℕ} -variable {F : Type} [Fintype F] [DecidableEq F] - -/-- -The denominator of the bound from theorem 3.1. --/ -def JohnsonDenominator (B : Finset (Fin n → F)) (v : Fin n → F) : ℚ := - let e := e B v - let d := d B - let q : ℚ := Fintype.card F - let frac := q / (q - 1) - (1- frac * e/n) ^ 2 - (1 - frac * d/n) - -lemma johnson_denominator_def {B : Finset (Fin n → F)} {v : Fin n → F} : - JohnsonDenominator B v = ((1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (e B v / n)) ^ 2 - - (1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (d B/n))) := by - simp [JohnsonDenominator] - field_simp - -/-- -The bound from theorem 3.1 makes sense only if the denominator is positive. -This condition ensures that holds. --/ -def JohnsonConditionStrong (B : Finset (Fin n → F)) (v : Fin n → F) : Prop := - let e := e B v - let d := d B - let q : ℚ := Fintype.card F - let frac := q / (q - 1) - (1 - frac * d/n) < (1- frac * e/n) ^ 2 - -/-- -The function used for q-ary Johnson Bound. --/ -noncomputable def J (q δ : ℚ) : ℝ := - let frac := q / (q - 1) - (1 / frac) * (1 - Real.sqrt (1 - frac * δ)) - -lemma sqrt_le_J {q x : ℚ} : - 1 - ((1-x) : ℝ).sqrt ≤ J q x := by sorry - -/-- -The q-ary Johnson bound. --/ -def JohnsonConditionWeak (B : Finset (Fin n → F)) (e : ℕ) : Prop := - let d := sInf { d | ∃ u ∈ B, ∃ v ∈ B, u ≠ v ∧ hammingDist u v = d } - let q : ℚ := Fintype.card F - (e : ℚ) / n < J q (d / n) - -lemma johnson_condition_weak_implies_strong {B : Finset (Fin n → F)} {v : Fin n → F} {e : ℕ} - (h : JohnsonConditionWeak B e) - : - JohnsonConditionStrong (B ∩ ({ x | Δ₀(x, v) ≤ e } : Finset _)) v := by - sorry - -private lemma johnson_condition_strong_implies_n_pos {B : Finset (Fin n → F)} {v : Fin n → F} - (h_johnson : JohnsonConditionStrong B v) - : - 0 < n := by - cases n <;> try simp [JohnsonConditionStrong] at * - -private lemma johnson_condition_strong_implies_2_le_F_card {B : Finset (Fin n → F)} {v : Fin n → F} - (h_johnson : JohnsonConditionStrong B v) - : - 2 ≤ Fintype.card F := by - generalize h : Fintype.card F = card - rcases card with _ | card - · simp [JohnsonConditionStrong] at h_johnson - rw [h] at h_johnson - simp at h_johnson - · rcases card with _ | card - · simp [JohnsonConditionStrong] at h_johnson - rw [h] at h_johnson - simp at h_johnson - · omega - -private lemma johnson_condition_strong_implies_2_le_B_card {B : Finset (Fin n → F)} {v : Fin n → F} - (h_johnson : JohnsonConditionStrong B v) - : - 2 ≤ B.card := by - generalize h : B.card = card - rcases card with _ | card - · simp [JohnsonConditionStrong] at * - simp [e, d] at * - subst h - simp at h_johnson - · rcases card with _ | card - · simp [JohnsonConditionStrong] at * - simp [e, d] at * - rw [h] at h_johnson - simp [choose_2] at h_johnson - have h_set := Finset.card_eq_one.1 h - rcases h_set with ⟨a, h_set⟩ - rw [h_set] at h_johnson - simp at h_johnson - generalize hq : Fintype.card F = q - rcases q with _ | q - · simp [hq] at h_johnson - · rcases q with _ | q - · simp [hq] at h_johnson - · have h : (Fintype.card F : ℚ) /((Fintype.card F : ℚ) - 1) = - (1 : ℚ) + (1:ℚ)/((Fintype.card F : ℚ) - 1) := by - have h : (Fintype.card F : ℚ) = ((Fintype.card F : ℚ) - 1) + 1 := by ring - conv => - lhs - congr - rw [h] - rfl - rfl - rw [Field.div_eq_mul_inv, Field.div_eq_mul_inv] - rw [add_mul] - rw [Field.mul_inv_cancel _ (by { - rw [hq] - simp - aesop - })] - rw [h] at h_johnson - have h' := JohnsonBound.a_lemma_im_not_proud_of (v := v) (a := a) (by omega) - have h : (1 :ℚ) < (1 : ℚ) := by - apply lt_of_lt_of_le h_johnson - assumption - simp at h - · omega - -/-- -`JohnsonConditionStrong` is equvalent to `JohnsonDenominator` being positive. --/ -lemma johnson_condition_strong_iff_johnson_denom_pos {B : Finset (Fin n → F)} {v : Fin n → F} : - JohnsonConditionStrong B v ↔ 0 < JohnsonDenominator B v := by - simp [JohnsonDenominator, JohnsonConditionStrong] - -/-- -Theorem 3.1. ---/ -theorem johnson_bound [Field F] {B : Finset (Fin n → F)} {v : Fin n → F} - (h_condition : JohnsonConditionStrong B v) - : - let d := d B - let q : ℚ := Fintype.card F - let frac := q / (q - 1) - B.card ≤ (frac * d/n) / JohnsonDenominator B v - := by - simp - have h_condition' := h_condition - rw [johnson_condition_strong_iff_johnson_denom_pos] at h_condition - rw [Field.div_eq_mul_inv] - apply le_of_mul_le_mul_right (a0 := h_condition) - rw [mul_assoc, mul_comm ((_)⁻¹) _, Field.mul_inv_cancel _ (by linarith)] - simp - rw [johnson_denominator_def] - apply JohnsonBound.johnson_bound_lemma - (johnson_condition_strong_implies_n_pos h_condition') - (johnson_condition_strong_implies_2_le_B_card h_condition') - (johnson_condition_strong_implies_2_le_F_card h_condition') - -/-- -Alphabet-free Johnson bound from [codingtheory]. -## References - -* [Venkatesan Guruswami, Atri Rudra, Madhu Sudan, *Essential Coding Theory*][codingtheory] --/ -theorem johnson_bound_alphabet_free [Field F] [DecidableEq F] - {B : Finset (Fin n → F)} - {v : Fin n → F} - {e : ℕ} - : - let d := sInf { d | ∃ u ∈ B, ∃ v ∈ B, u ≠ v ∧ hammingDist u v = d } - let q : ℚ := Fintype.card F - let frac := q / (q - 1) - e ≤ n - ((n * (n - d)) : ℝ).sqrt - → - (B ∩ ({ x | Δ₀(x, v) ≤ e } : Finset _)).card ≤ q * d * n := by - sorry - -end JohnsonBound diff --git a/ArkLib/Data/CodingTheory/JohnsonBound/Basic.lean b/ArkLib/Data/CodingTheory/JohnsonBound/Basic.lean new file mode 100644 index 000000000..9e68ac6bc --- /dev/null +++ b/ArkLib/Data/CodingTheory/JohnsonBound/Basic.lean @@ -0,0 +1,153 @@ +/- Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Ilia Vlasov, František Silváši +-/ +import ArkLib.Data.CodingTheory.JohnsonBound.Lemmas + +namespace JohnsonBound + +/-! +This module is based on the Johnson Bound section from [listdecoding]. +In what follows we reference theorems from [listdecoding] by default. + +## References + +* [Venkatesan Guruswami, *Algorithmic Results in List Decoding*][listdecoding] +-/ + +variable {n : ℕ} + {F : Type} [Fintype F] [DecidableEq F] + {B : Finset (Fin n → F)} {v : Fin n → F} + +/-- +The denominator of the bound from theorem 3.1. +-/ +def JohnsonDenominator (B : Finset (Fin n → F)) (v : Fin n → F) : ℚ := + let e := e B v + let d := d B + let q : ℚ := Fintype.card F + let frac := q / (q - 1) + (1- frac * e/n) ^ 2 - (1 - frac * d/n) + +lemma johnson_denominator_def : + JohnsonDenominator B v = ((1 - ((Fintype.card F) / (Fintype.card F - 1)) * (e B v / n)) ^ 2 + - (1 - ((Fintype.card F) / (Fintype.card F - 1)) * (d B/n))) := by + simp [JohnsonDenominator] + field_simp + +/-- +The bound from theorem 3.1 makes sense only if the denominator is positive. +This condition ensures that holds. +-/ +def JohnsonConditionStrong (B : Finset (Fin n → F)) (v : Fin n → F) : Prop := + let e := e B v + let d := d B + let q : ℚ := Fintype.card F + let frac := q / (q - 1) + (1 - frac * d/n) < (1- frac * e/n) ^ 2 + +/-- +The function used for q-ary Johnson Bound. +-/ +noncomputable def J (q δ : ℚ) : ℝ := + let frac := q / (q - 1) + (1 / frac) * (1 - Real.sqrt (1 - frac * δ)) + +lemma sqrt_le_J {q x : ℚ} : + 1 - ((1-x) : ℝ).sqrt ≤ J q x := by sorry + +/-- +The q-ary Johnson bound. +-/ +def JohnsonConditionWeak (B : Finset (Fin n → F)) (e : ℕ) : Prop := + let d := sInf { d | ∃ u ∈ B, ∃ v ∈ B, u ≠ v ∧ hammingDist u v = d } + let q : ℚ := Fintype.card F + (e : ℚ) / n < J q (d / n) + +lemma johnson_condition_weak_implies_strong {B : Finset (Fin n → F)} {v : Fin n → F} {e : ℕ} + (h : JohnsonConditionWeak B e) + : + JohnsonConditionStrong (B ∩ ({ x | Δ₀(x, v) ≤ e } : Finset _)) v := by + sorry + +private lemma johnson_condition_strong_implies_n_pos + (h_johnson : JohnsonConditionStrong B v) + : + 0 < n := by + cases n <;> try simp [JohnsonConditionStrong] at * + +private lemma johnson_condition_strong_implies_2_le_F_card + (h_johnson : JohnsonConditionStrong B v) + : + 2 ≤ Fintype.card F := by + revert h_johnson + dsimp [JohnsonConditionStrong] + rcases Fintype.card F with _ | _ | _ <;> aesop + +private lemma johnson_condition_strong_implies_2_le_B_card + (h_johnson : JohnsonConditionStrong B v) + : + 2 ≤ B.card := by + dsimp [JohnsonConditionStrong] at h_johnson + rcases eq : B.card with _ | card | _ <;> [simp_all [e, d]; skip; omega] + obtain ⟨a, ha⟩ := Finset.card_eq_one.1 eq + replace h_johnson : 1 < |1 - (Fintype.card F) / ((Fintype.card F) - 1) * Δ₀(v, a) / (n : ℚ)| := by + simp_all [e, d, choose_2] + generalize eq₁ : Fintype.card F = q + rcases q with _ | _ | q <;> [simp_all; simp_all; skip] + have h : (Fintype.card F : ℚ) / (Fintype.card F - 1) = 1 + 1 / (Fintype.card F - 1) := by + have : (Fintype.card F : ℚ) - 1 ≠ 0 := by simp [sub_eq_zero]; omega + field_simp + have h' := JohnsonBound.abs_one_sub_div_le_one (v := v) (a := a) (by omega) + exact absurd (lt_of_lt_of_le (h ▸ h_johnson) h') (lt_irrefl _) + +/-- +`JohnsonConditionStrong` is equvalent to `JohnsonDenominator` being positive. +-/ +lemma johnson_condition_strong_iff_johnson_denom_pos {B : Finset (Fin n → F)} {v : Fin n → F} : + JohnsonConditionStrong B v ↔ 0 < JohnsonDenominator B v := by + simp [JohnsonDenominator, JohnsonConditionStrong] + +/-- +Theorem 3.1. +--/ +theorem johnson_bound [Field F] + (h_condition : JohnsonConditionStrong B v) + : + let d := d B + let q : ℚ := Fintype.card F + let frac := q / (q - 1) + B.card ≤ (frac * d/n) / JohnsonDenominator B v + := by + suffices B.card * JohnsonDenominator B v ≤ + Fintype.card F / (Fintype.card F - 1) * d B / n by + rw [johnson_condition_strong_iff_johnson_denom_pos] at h_condition + rw [←mul_le_mul_right h_condition] + convert this using 1 + field_simp; rw [mul_div_mul_right]; linarith + rw [johnson_denominator_def] + exact JohnsonBound.johnson_bound_lemma + (johnson_condition_strong_implies_n_pos h_condition) + (johnson_condition_strong_implies_2_le_B_card h_condition) + (johnson_condition_strong_implies_2_le_F_card h_condition) + +/-- +Alphabet-free Johnson bound from [codingtheory]. +## References + +* [Venkatesan Guruswami, Atri Rudra, Madhu Sudan, *Essential Coding Theory*][codingtheory] +-/ +theorem johnson_bound_alphabet_free [Field F] [DecidableEq F] + {B : Finset (Fin n → F)} + {v : Fin n → F} + {e : ℕ} + : + let d := sInf { d | ∃ u ∈ B, ∃ v ∈ B, u ≠ v ∧ hammingDist u v = d } + let q : ℚ := Fintype.card F + let frac := q / (q - 1) + e ≤ n - ((n * (n - d)) : ℝ).sqrt + → + (B ∩ ({ x | Δ₀(x, v) ≤ e } : Finset _)).card ≤ q * d * n := by + sorry + +end JohnsonBound diff --git a/ArkLib/Data/CodingTheory/JohnsonBound/Choose2.lean b/ArkLib/Data/CodingTheory/JohnsonBound/Choose2.lean index a401ae1bb..b0f7d14cb 100644 --- a/ArkLib/Data/CodingTheory/JohnsonBound/Choose2.lean +++ b/ArkLib/Data/CodingTheory/JohnsonBound/Choose2.lean @@ -1,6 +1,6 @@ /- Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Ilia Vlasov +Authors: Ilia Vlasov, František Silváši -/ import Mathlib.Algebra.Field.Rat import Mathlib.Analysis.Convex.Function diff --git a/ArkLib/Data/CodingTheory/JohnsonBound/Expectations.lean b/ArkLib/Data/CodingTheory/JohnsonBound/Expectations.lean index c1db5a711..639e188f0 100644 --- a/ArkLib/Data/CodingTheory/JohnsonBound/Expectations.lean +++ b/ArkLib/Data/CodingTheory/JohnsonBound/Expectations.lean @@ -1,6 +1,6 @@ /- Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Ilia Vlasov +Authors: Ilia Vlasov, František Silváši -/ import Mathlib.Algebra.Field.Rat import Mathlib.Analysis.Convex.Function @@ -56,7 +56,7 @@ lemma lin_shift_d [Field F] [Fintype F] field_simp apply Finset.sum_bij (fun x _ => (x.1 - v, x.2 -v)) <;> try aesop -lemma e_ball_le_radius [Field F] {B : Finset (Fin n → F)} (v : Fin n → F) (r : ℚ) +lemma e_ball_le_radius [Field F] [Fintype F] {B : Finset (Fin n → F)} (v : Fin n → F) (r : ℚ) : e (B ∩ ({ x | Δ₀(x, v) ≤ r} : Finset _)) v ≤ r := by sorry diff --git a/ArkLib/Data/CodingTheory/JohnsonBound/Lemmas.lean b/ArkLib/Data/CodingTheory/JohnsonBound/Lemmas.lean index 818640702..48b01dea3 100644 --- a/ArkLib/Data/CodingTheory/JohnsonBound/Lemmas.lean +++ b/ArkLib/Data/CodingTheory/JohnsonBound/Lemmas.lean @@ -1,17 +1,7 @@ /- Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Ilia Vlasov +Authors: Ilia Vlasov, František Silváši -/ -import Mathlib.Algebra.Field.Rat -import Mathlib.Analysis.Convex.Function -import Mathlib.Data.Real.Sqrt -import Mathlib.Data.Set.Pairwise.Basic -import Mathlib.Algebra.BigOperators.Field -import Mathlib.Analysis.Convex.Jensen -import Mathlib.Algebra.Module.LinearMap.Defs - -import ArkLib.Data.CodingTheory.Basic -import ArkLib.Data.CodingTheory.JohnsonBound.Choose2 import ArkLib.Data.CodingTheory.JohnsonBound.Expectations namespace JohnsonBound @@ -47,262 +37,84 @@ private lemma K_eq_sum {α : F} : K B i α = ∑ (x : B), if x.1 i = α then 1 e simp_rw [Finset.card_filter, Finset.sum_attach_eq_sum_dite] apply Finset.sum_congr <;> aesop -private lemma sum_choose_K' [Zero F] - (h_card : 2 ≤ (Fintype.card F)) - : - (Fintype.card (α := F) - 1) * choose_2 ((B.card - K B i 0) / (Fintype.card (α := F) - 1)) ≤ - ∑ (α : F) with α ≠ 0, choose_2 (K B i α) := by - - rw [←sum_K_eq_card (i := i)] - simp - have h_univ : Finset.univ = insert 0 ({x : F | x ≠ 0} : Finset F) := by - ext x - simp - tauto - rw [h_univ, Finset.sum_insert (by simp)] - field_simp - - have h : ((∑ x ∈ {x | ¬x = 0}, ↑(K B i x)) : ℚ) / (↑(Fintype.card F) - 1) - = ∑ x ∈ {x : F | ¬x = 0}, ((1 : ℚ)/((Fintype.card F) - 1)) * ↑(K B i x) := by - rw [Finset.sum_div] - congr - field_simp - rw [h] - let w : F → ℚ := fun _ => (1 : ℚ) / (↑(Fintype.card F) - 1) - let p : F → ℚ := fun x => K B i x - have h : ∑ x ∈ {x : F | ¬x = 0}, ((1 : ℚ)/((Fintype.card F) - 1)) * ↑(K B i x) - = ∑ x ∈ {x : F | ¬x = 0}, w x • p x := by simp [w, p] - rw [h] - rw [mul_comm] - apply le_trans - rewrite [mul_le_mul_right (by field_simp; linarith)] - apply ConvexOn.map_sum_le choose_2_convex (by { - simp [w] - intro i _ - linarith - }) - (by { - simp [w] - have h : (Finset.univ (α := F)).card = Fintype.card F := by rfl - conv => - congr - congr - rfl - rw [←h, h_univ] - rfl - rfl - simp - rw [Field.mul_inv_cancel] - simp - rw [←ne_eq] - rw [←Finset.nonempty_iff_ne_empty] - simp [Finset.Nonempty] - have h_two := (Finset.one_lt_card (s := Finset.univ (α := F))).1 (by omega) - rcases h_two with ⟨a, ha, b, hb, hab⟩ - by_cases h_ne_a : a ≠ 0 <;> try tauto - simp at h_ne_a - rw [h_ne_a] at hab - tauto - }) - (by simp) - rw [mul_comm] - simp [w, p] - rw [Finset.mul_sum] - conv => - lhs - congr - rfl - ext α - rw [←mul_assoc] - rw [Field.mul_inv_cancel _ (by { - intro contr - have contr : (↑(Fintype.card F) : ℚ) = 1 := by - rw [←zero_add 1, ←contr] - field_simp - simp at contr - omega - })] - rw [one_mul] - rfl - have h : ({x ∈ insert 0 ({x | ¬x = 0} : Finset F) | ¬x = 0} : Finset F) - = ({ x : F | ¬ x = 0 } : Finset F) := - by - ext x - simp - tauto - rw [h] +@[simp] +private lemma K_le_card {α : F} : K B i α ≤ B.card := by + simp [K, Fi] + exact Finset.card_le_card fun _ ha ↦ by simp at ha; exact ha.1 -private lemma sum_choose_K'' [Zero F] +open Finset in +private lemma sum_choose_K' [Zero F] (h_card : 2 ≤ (Fintype.card F)) : (Fintype.card (α := F) - 1) * choose_2 ((B.card - K B i 0) / (Fintype.card (α := F) - 1)) ≤ ∑ (α : F) with α ≠ 0, choose_2 (K B i α) := by rw [←sum_K_eq_card (i := i), Nat.cast_sum] set X₁ : ℚ := Fintype.card F - 1 + have X₁h : X₁ ≠ 0 := by simp [X₁, sub_eq_zero]; omega set X₂ := K B i - suffices X₁ * choose_2 ((∑ x with x ≠ 0, ↑(X₂ x)) / X₁) ≤ + suffices X₁ * choose_2 (∑ x with x ≠ 0, (fun _ ↦ X₁⁻¹) x • (Nat.cast (R := ℚ) ∘ X₂) x) ≤ ∑ α with α ≠ 0, choose_2 ↑(X₂ α) by + simp at this convert this - rw [Finset.sum_eq_sum_diff_singleton_add (i := 0) (by simp)] - ring_nf; apply Finset.sum_congr (Finset.ext _) <;> simp - - - - have h : ((∑ x ∈ {x : F | ¬x = 0}, ↑(K B i x)) : ℚ) / (↑(Fintype.card F) - 1) - = ∑ x ∈ {x : F | ¬x = 0}, ((1 : ℚ)/((Fintype.card F) - 1)) * ↑(K B i x) := by - rw [Finset.sum_div] - congr - field_simp - rw [h] - - let w : F → ℚ := fun _ => (1 : ℚ) / (↑(Fintype.card F) - 1) - let p : F → ℚ := fun x => K B i x - have h : ∑ x ∈ {x : F | ¬x = 0}, ((1 : ℚ)/((Fintype.card F) - 1)) * ↑(K B i x) - = ∑ x ∈ {x : F | ¬x = 0}, w x • p x := by simp [w, p] - rw [h] - rw [mul_comm] - apply le_trans - rewrite [mul_le_mul_right (by field_simp; linarith)] - apply ConvexOn.map_sum_le choose_2_convex (by { - simp [w] - intro i _ - linarith - }) - (by { - simp [w] - have h : (Finset.univ (α := F)).card = Fintype.card F := by rfl - conv => - congr - congr - rfl - rw [←h, h_univ] - rfl - rfl - simp - rw [Field.mul_inv_cancel] - simp - rw [←ne_eq] - rw [←Finset.nonempty_iff_ne_empty] - simp [Finset.Nonempty] - have h_two := (Finset.one_lt_card (s := Finset.univ (α := F))).1 (by omega) - rcases h_two with ⟨a, ha, b, hb, hab⟩ - by_cases h_ne_a : a ≠ 0 <;> try tauto - simp at h_ne_a - rw [h_ne_a] at hab - tauto - }) - (by simp) - rw [mul_comm] - simp [w, p] - rw [Finset.mul_sum] - conv => - lhs - congr - rfl - ext α - rw [←mul_assoc] - rw [Field.mul_inv_cancel _ (by { - intro contr - have contr : (↑(Fintype.card F) : ℚ) = 1 := by - rw [←zero_add 1, ←contr] - field_simp - simp at contr - omega - })] - rw [one_mul] - rfl - have h : ({x ∈ insert 0 ({x | ¬x = 0} : Finset F) | ¬x = 0} : Finset F) - = ({ x : F | ¬ x = 0 } : Finset F) := - by - ext x - simp - tauto - rw [h] + rw [sum_eq_sum_diff_singleton_add (i := 0) (by simp)] + ring_nf + rw [sum_mul] + apply sum_congr (ext _) <;> field_simp + apply le_trans + · rewrite [mul_le_mul_left (by aesop)] + apply ConvexOn.map_sum_le choose_2_convex + (by aesop (add safe (by omega))) + (by simp [X₁]; rw [Field.mul_inv_cancel _ X₁h]) + (by simp) + · rw [mul_sum]; field_simp private def sum_choose_K_i (B : Finset (Fin n → F)) (i : Fin n) : ℚ := ∑ (α : F), choose_2 (K B i α) -private lemma le_sum_choose_K_i [Zero F] {B : Finset (Fin n → F)} {i : Fin n} - (h_card : 2 ≤ (Fintype.card F)) - : - choose_2 (K B i 0) + ((Finset.univ (α := F)).card - 1 : ℚ) - * choose_2 ((B.card - K B i 0)/((Finset.univ (α := F)).card-1)) - ≤ sum_choose_K_i B i := by - simp [sum_choose_K_i] - have h : insert 0 ({ x : F | ¬x=0} : Finset F) = Finset.univ := by - ext x - simp - tauto - conv => - rhs - rw [←h] - rfl - simp [Finset.sum_insert] - exact sum_choose_K' h_card +private lemma le_sum_choose_K [Zero F] + (h_card : 2 ≤ (Fintype.card F)) : + choose_2 (K B i 0) + (Fintype.card (α := F) - 1) * + choose_2 ((B.card - K B i 0) / (Fintype.card (α := F) - 1)) ≤ sum_choose_K_i B i := by + unfold sum_choose_K_i + rw [show Finset.univ = {0} ∪ {x : F | x ≠ 0}.toFinset by ext; simp; tauto] + simp [Finset.sum_union, sum_choose_K' h_card] private def k [Zero F] (B : Finset (Fin n → F)) : ℚ := - (1 : ℚ)/n * ∑ i, K B i 0 + (1 : ℚ) / n * ∑ i, K B i 0 +omit [Fintype F] in private lemma hamming_weight_eq_sum [Zero F] {x : Fin n → F} : ‖x‖₀ = ∑ i, if x i = 0 then 0 else 1 := by simp [hammingNorm, Finset.sum_ite] -private lemma sum_hamming_weight_sum [Zero F] {B : Finset (Fin n → F)} +private lemma sum_hamming_weight_sum [Zero F] : - ∑ x ∈ B, ( ↑‖x‖₀ : ℚ) = n * B.card - ∑ i, K B i 0 := by - conv => - lhs - congr - rfl - ext x - rw [hamming_weight_eq_sum] - simp - rfl - rw [Finset.sum_comm] - conv => - rhs - simp - congr - rfl - congr - rfl - ext x - rw [K_eq_sum] - rfl - have h : (↑n : ℚ) = ∑ i : Fin n, 1 := by simp - rw [h, Finset.sum_mul] - rw [←Finset.sum_sub_distrib] - congr - ext i - rw [one_mul _] - simp - rw [Finset.sum_ite] - simp - have h_card : B.card = B.attach.card := by simp - rw [h_card] - have h_card - : B.attach.card = {x ∈ B.attach | x.1 i = 0}.card + ({x ∈ B.attach | x.1 i = 0}ᶜ).card := by - simp - rw [h_card] - rw [Nat.cast_add] - field_simp - simp_rw [Finset.attach_eq_univ, Finset.compl_filter, Finset.card_filter] - rw [Finset.sum_bij' (i := fun a b ↦ (⟨a, b⟩ : {x : Fin n → F // x ∈ B})) (j := fun a b ↦ a)] <;> + ∑ x ∈ B, (‖x‖₀ : ℚ) = n * B.card - ∑ i, K B i 0 := by + simp only [hamming_weight_eq_sum, Nat.cast_sum, Nat.cast_ite, CharP.cast_eq_zero, Nat.cast_one, + K_eq_sum, Finset.sum_boole, Nat.cast_id] + simp_rw [Finset.card_filter] + rw [Finset.sum_comm, eq_sub_iff_add_eq] + simp_rw [Nat.cast_sum, Nat.cast_ite] + conv in Finset.sum _ _ => arg 2; ext; arg 2; ext; rw [←ite_not] + simp_rw [Finset.univ_eq_attach, Finset.sum_attach_eq_sum_dite] + simp only [Nat.cast_one, CharP.cast_eq_zero, dite_eq_ite, Finset.sum_ite_mem, Finset.univ_inter] + rw [←Finset.sum_add_distrib] + simp_rw [←Finset.sum_filter, add_comm, Finset.sum_filter_add_sum_filter_not] simp -private lemma k_and_e [Zero F] {B : Finset (Fin n → F)} +private lemma k_and_e [Zero F] (h_n : n ≠ 0) (h_B : B.card ≠ 0) : - k B = B.card * (n - e B 0)/n := by + k B = B.card * (n - e B 0) / n := by simp [e, k, sum_hamming_weight_sum] field_simp -private lemma k_and_e' [Zero F] {B : Finset (Fin n → F)} +private lemma k_and_e' [Zero F] (h_n : n ≠ 0) (h_B : B.card ≠ 0) : - k B / B.card = (n - e B 0)/n := by + k B / B.card = (n - e B 0) / n := by rw [k_and_e h_n h_B] field_simp ring @@ -312,139 +124,80 @@ private lemma k_choose_2 [Zero F] {B : Finset (Fin n → F)} (h_B : B.card ≠ 0) : n * choose_2 (k B) ≤ ∑ i, choose_2 (K B i 0) := by - simp [k] - rw [Finset.mul_sum] - let w : Fin n → ℚ := fun _ => (1:ℚ)/n - let p : Fin n → ℚ := fun i => ↑(K B i 0) - have h : ∑ i, (↑n : ℚ)⁻¹ * ↑(K B i 0) = ∑ i, w i • p i := by simp [w, p] - rw [h] - rw [mul_comm] - apply le_trans + suffices choose_2 (∑ i, (fun _ ↦ (1:ℚ) / n) i • (fun i ↦ K B i 0) i) * n ≤ + ∑ i, choose_2 (K B i 0) by + rw [mul_comm]; convert this + simp [k, Finset.mul_sum] + transitivity apply (mul_le_mul_right (by simp; omega)).2 - apply ConvexOn.map_sum_le (choose_2_convex) (by simp [w]) - (by { - simp [w] - rw [Field.mul_inv_cancel] - aesop - }) - (by simp) - simp [w, p] + (ConvexOn.map_sum_le + choose_2_convex + (by simp) + (by field_simp) + (by simp)) rw [Finset.sum_mul] - conv => - lhs - congr - rfl - ext i - rw [mul_comm] - rw [←mul_assoc] - rw [Field.mul_inv_cancel _ (by aesop)] - simp - rfl + field_simp private def aux_frac (B : Finset (Fin n → F)) (x : ℚ) : ℚ := - ((↑B.card : ℚ) - x)/(Fintype.card F - 1) + (B.card - x) / (Fintype.card F - 1) -private lemma sum_1_over_n_aux_frac_k_i [Zero F] {B : Finset (Fin n → F)} - (h_n : 0 < n) - (h_card : 2 ≤ Fintype.card F) - : ∑ i, (1 : ℚ)/n * aux_frac B (K B i 0) = aux_frac B (k B) := by +private lemma sum_1_over_n_aux_frac_k_i [Zero F] + (h_n : 0 < n) : ∑ i, 1/n * aux_frac B (K B i 0) = aux_frac B (k B) := by simp [aux_frac] - rw [←Finset.mul_sum, ←Finset.sum_div] - have h : (↑(Fintype.card F) : ℚ) - 1 > 0 := by - simp - omega - rw [Field.div_eq_mul_inv] - rw [Field.div_eq_mul_inv] - rw [←mul_assoc] - have h_ne : (↑(Fintype.card F) - (1 : ℚ))⁻¹ ≠ 0 := by field_simp - suffices h : ((↑n : ℚ)⁻¹ * ∑ i, ((↑B.card : ℚ) - ↑(K B i 0))) - = (↑B.card - k B) by rw [h] - simp - ring_nf - rw [Field.mul_inv_cancel _ (by { - simp - omega - })] - simp [k] + suffices (n : ℚ)⁻¹ * (↑n * B.card - ∑ x, JohnsonBound.K B x 0) = B.card - k B by + rw [←Finset.mul_sum, ←Finset.sum_div, ←this] + field_simp + field_simp [k]; ac_rfl -private lemma aux_sum [Zero F] {B : Finset (Fin n → F)} +private lemma aux_sum [Zero F] (h_n : 0 < n) - (h_card : 2 ≤ Fintype.card F) - : ↑n * choose_2 (aux_frac B (k B)) ≤ ∑ i, choose_2 (aux_frac B (K B i 0)) := by - rw [←sum_1_over_n_aux_frac_k_i h_n h_card] - let w : Fin n → ℚ := fun _ => (1 : ℚ)/n - let p : Fin n → ℚ := fun i => aux_frac B ↑(K B i 0) - have h : (∑ i, 1 / ↑n * aux_frac B ↑(K B i 0)) = ∑ i, w i • p i := by - simp [w,p] - rw [h] - rw [mul_comm] - apply le_trans + : n * choose_2 (aux_frac B (k B)) ≤ ∑ i, choose_2 (aux_frac B (K B i 0)) := by + suffices choose_2 (∑ i, (fun _ ↦ (1:ℚ)/n) i • (fun x ↦ aux_frac B (K B x 0)) i) * ↑n ≤ + ∑ i, choose_2 (JohnsonBound.aux_frac B (JohnsonBound.K B i 0)) by + rw [←sum_1_over_n_aux_frac_k_i h_n, mul_comm] + convert this + transitivity apply (mul_le_mul_right (by simp; omega)).2 - apply ConvexOn.map_sum_le choose_2_convex (by simp [w]) - (by { - simp [w] - rw [Field.mul_inv_cancel] - simp - omega - }) - (by simp) - simp [w, p] + (ConvexOn.map_sum_le + choose_2_convex + (by simp) + (by field_simp ) + (by simp)) rw [Finset.sum_mul] - conv => - lhs - congr - rfl - ext i - rw [mul_comm] - rw [←mul_assoc] - rw [Field.mul_inv_cancel _ (by { - simp - omega - })] - simp - rfl - + field_simp -private lemma le_sum_sum_choose_K_i [Zero F] {B : Finset (Fin n → F)} +private lemma le_sum_sum_choose_K [Zero F] (h_n : 0 < n) (h_B : B.card ≠ 0) - (h_card : 2 ≤ (Fintype.card F)) + (h_card : 2 ≤ Fintype.card F) : - n * (choose_2 (k B) + ((Finset.univ (α := F)).card - 1 : ℚ) - * choose_2 ((B.card - k B)/((Finset.univ (α := F)).card-1))) + n * (choose_2 (k B) + (Fintype.card (α := F) - 1) + * choose_2 ((B.card - k B) / ((Fintype.card (α := F) - 1)))) ≤ ∑ i, sum_choose_K_i B i := by - rw [mul_add] - apply le_trans - apply add_le_add_right - exact k_choose_2 (n := n) (by omega) h_B - apply le_trans - apply add_le_add_left (by { - have h := aux_sum (B := B) h_n h_card - simp [aux_frac] at h - rewrite [←mul_assoc] - rewrite [mul_comm (↑n : ℚ)] - rewrite [mul_assoc] - apply le_trans - apply (mul_le_mul_left (by simp; omega)).2 - exact h + rw [mul_add] + transitivity + apply add_le_add_right (k_choose_2 (n := n) (by omega) h_B) + transitivity + apply add_le_add_left (by + rewrite [←mul_assoc, mul_comm (n : ℚ), mul_assoc] + transitivity + apply (mul_le_mul_left (by simp; omega)).2 (aux_sum h_n) rfl - }) - rw [Finset.mul_sum] - rw [←Finset.sum_add_distrib] - apply Finset.sum_le_sum - intro i _ - exact le_sum_choose_K_i h_card + ) + rw [Finset.mul_sum, ←Finset.sum_add_distrib] + exact Finset.sum_le_sum fun _ _ ↦ le_sum_choose_K h_card private def F2i (B : Finset (Fin n → F)) (i : Fin n) (α : F) : Finset ((Fin n → F) × (Fin n → F)) := - { x | x ∈ Finset.product B B ∧ x.1 i = α ∧ x.2 i = α ∧ x.1 ≠ x.2 } + { x | x ∈ B ×ˢ B ∧ x.1 i = α ∧ x.2 i = α ∧ x.1 ≠ x.2 } private def Bi (B : Finset (Fin n → F)) (i : Fin n) : Finset ((Fin n → F) × (Fin n → F)) := - { x | x ∈ Finset.product B B ∧ x.1 i = x.2 i ∧ x.1 ≠ x.2 } + { x | x ∈ B ×ˢ B ∧ x.1 i = x.2 i ∧ x.1 ≠ x.2 } -private lemma Bi_biUnion_F2i {B : Finset (Fin n → F)} {i : Fin n} : +private lemma Bi_biUnion_F2i : Bi B i = Finset.univ.biUnion (F2i B i) := by aesop (add simp [Bi, F2i]) -private lemma F2i_disjoint {B : Finset (Fin n → F)} {i : Fin n} : +@[simp] +private lemma F2i_disjoint : Set.PairwiseDisjoint Set.univ (F2i B i) := by simp [Set.PairwiseDisjoint @@ -454,329 +207,143 @@ private lemma F2i_disjoint {B : Finset (Fin n → F)} {i : Fin n} : , Finset.Nonempty , Finset.subset_iff ] - intro α₁ α₂ h_ne x h1 h2 x₁ x₂ contr - specialize (h1 x₁ x₂ contr) - specialize (h2 x₁ x₂ contr) + intro _ _ _ _ h1 h2 x₁ x₂ contr + specialize h1 x₁ x₂ contr + specialize h2 x₁ x₂ contr aesop -private lemma F2i_card {B : Finset (Fin n → F)} {i : Fin n} {α : F} : +private lemma F2i_card {α : F} : (F2i B i α).card = 2 * choose_2 (K B i α) := by simp [F2i] - have h : ({x | (x.1 ∈ B ∧ x.2 ∈ B) ∧ x.1 i = α ∧ x.2 i = α ∧ ¬x.1 = x.2} : Finset ((Fin n → F) × (Fin n → F))) - = ({x | (x.1 ∈ B ∧ x.2 ∈ B) ∧ x.1 i = α ∧ x.2 i = α } : Finset _) - \ ({x | (x.1 ∈ B ∧ x.2 ∈ B) ∧ x.1 i = α ∧ x.2 i = α ∧ x.1 = x.2} : Finset _) := by - aesop - rw [h] - rw [Finset.card_sdiff (by { - intro x hx - simp at hx - simp - aesop - })] - have h : ({x | (x.1 ∈ B ∧ x.2 ∈ B) ∧ x.1 i = α ∧ x.2 i = α} : Finset ((Fin n → F) × (Fin n → F))) - = (Finset.product (Fi B i α) (Fi B i α)) := by - simp [Fi] - ext x - simp - tauto - rw [h] - simp - have h : ({x | (x.1 ∈ B ∧ x.2 ∈ B) ∧ x.1 i = α ∧ x.2 i = α ∧ x.1 = x.2} : Finset ((Fin n → F) × (Fin n → F))) - = ({ x | (x.1 = x.2) ∧ x.1 ∈ Fi B i α } : Finset ((Fin n → F) × (Fin n → F))) := by - ext x - simp [Fi] - aesop - rw [h] - have h : ({ x | (x.1 = x.2) ∧ x.1 ∈ Fi B i α } : Finset ((Fin n → F) × (Fin n → F))).card - = (Fi B i α).card := Finset.card_bij - (i := fun a _ => a.1) - (by simp) - (by simp) - (by simp [Fi]) - rw [h] - simp [choose_2, K] - ring_nf - rw [Nat.cast_sub (by { - by_cases h0 : (Fi B i α).card = 0 - · simp [h0] - · conv => - lhs - rw [←one_mul (Fi B i α).card] - rfl - apply le_trans - apply Nat.mul_le_mul_right (m := (Fi B i α).card) (Fi B i α).card - omega - ring_nf - rfl - })] - rw [pow_two] - simp - ring - -private lemma sum_of_not_equals {B : Finset (Fin n → F)} {i : Fin n} : - ∑ x ∈ (Finset.product B B) with x.1 ≠ x.2, (if x.1 i ≠ x.2 i then 1 else 0) + letI Tα := (Fin n → F) × (Fin n → F) + let S₁ : Finset Tα := {x | (x.1 ∈ B ∧ x.2 ∈ B) ∧ x.1 i = α ∧ x.2 i = α} + let S₂ : Finset _ := {x | x ∈ S₁ ∧ x.1 ≠ x.2} + set A := Fi B i α with eqA + suffices S₂.card = 2 * choose_2 (K B i α) by simp [S₂, S₁, ←this]; congr; ext; tauto + rw [ + show S₂ = S₁ \ ({x | x ∈ S₁ ∧ x.1 = x.2} : Finset _) by aesop, + Finset.card_sdiff fun _ _ ↦ by aesop, + show S₁ = A ×ˢ A by ext; rw [Finset.mem_product]; simp [S₁, Fi, A]; tauto, + Finset.filter_and + ] + simp; rw [Finset.card_prod_self_eq (s := A), Finset.card_product] + simp [choose_2, K, eqA.symm] + have : A.card ≤ A.card * A.card := Nat.le_mul_self _ + field_simp [this]; ring + +open Finset in +private lemma sum_of_not_equals : + ∑ x ∈ B ×ˢ B with x.1 ≠ x.2, (if x.1 i ≠ x.2 i then 1 else 0) = - 2 * choose_2 B.card - 2 * ∑ α, choose_2 (K B i α) - := by - have h : (∑ x ∈ {x ∈ B.product B | x.1 ≠ x.2}, if x.1 i ≠ x.2 i then (1 : ℚ) else 0) - = (∑ x ∈ ({x ∈ B.product B | x.1 ≠ x.2}), ((1 : ℚ) - if x.1 i = x.2 i then 1 else 0)) := by - congr - ext x - simp - aesop - rw [h] - rw [Finset.sum_sub_distrib] - simp - have h : {x ∈ B ×ˢ B | ¬x.1 = x.2} = (B ×ˢ B) \ {x ∈ B ×ˢ B | x.1 = x.2} := by - ext x - simp - tauto - conv => - lhs - congr - rw [h] - simp - rw [Finset.card_sdiff (by aesop)] - simp - congr - congr - rfl - rw [Finset.card_bij (t := B) - (i := fun a _ => a.1) - (by aesop) - (by { - simp - intro a b ha hb heq - subst heq - intro α₁ b hα₁ hb heq - subst heq - tauto - }) - (by simp) + 2 * choose_2 #B - 2 * ∑ α, choose_2 (K B i α) + := by + generalize eq₁ : {x ∈ B ×ˢ B | x.1 ≠ x.2} = s₁ + suffices #s₁ - #(Bi B i) = 2 * choose_2 #B - 2 * ∑ α, choose_2 (JohnsonBound.K B i α) by + rw [ + show (∑ x ∈ s₁, if x.1 i ≠ x.2 i then 1 else 0) + = (∑ x ∈ s₁, ((1 : ℚ) - if x.1 i = x.2 i then 1 else 0)) by congr; aesop ] - rfl - suffices h : (↑{x ∈ {x ∈ B ×ˢ B | ¬x.1 = x.2} | x.1 i = x.2 i}.card : ℚ) - = 2 * ∑ α, choose_2 ↑(K B i α) by { - rw [h] - simp [choose_2] - field_simp - rw [Nat.cast_sub (by { - by_cases h0 : B.card = 0 - · simp [h0] - · conv => - lhs - rw [←one_mul B.card] - rfl - apply le_trans - apply Nat.mul_le_mul_right (m := B.card) B.card - omega - ring_nf - rfl - })] - simp - ring_nf - } - have h : ({x ∈ {x ∈ B ×ˢ B | ¬x.1 = x.2} | x.1 i = x.2 i} : Finset _) - = Bi B i := by - ext x - simp [Bi] - tauto - rw [h] - rw [Bi_biUnion_F2i] - rw [Finset.card_biUnion (by simp [F2i_disjoint])] - simp - conv => - lhs - congr - rfl - ext α - rw [F2i_card] - rfl - rw [Finset.mul_sum] - + simp; convert this + ext; simp [←eq₁, Bi]; tauto + rw [ + show #s₁ = 2 * choose_2 #B by + rw [ + show s₁ = (B ×ˢ B) \ {x ∈ B ×ˢ B | x.1 = x.2} by ext; simp [eq₁.symm]; tauto, + card_sdiff (by simp) + ] + simp [choose_2] + zify [Nat.le_mul_self #B] + ring + ] + rw [Bi_biUnion_F2i, Finset.card_biUnion (by simp [F2i_disjoint])] + simp; simp_rw [F2i_card, mul_sum] + +omit [Fintype F] in private lemma hamming_dist_eq_sum {x y : Fin n → F} : - ↑Δ₀(x, y) = ∑ i, if x i = y i then 0 else 1 := by + Δ₀(x, y) = ∑ i, if x i = y i then 0 else 1 := by simp [hammingDist, Finset.sum_ite] +omit [Fintype F] [DecidableEq F] in +private lemma choose_2_card_ne_zero (h : 2 ≤ B.card) : choose_2 ↑B.card ≠ 0 := by + simp [choose_2, sub_eq_zero] + aesop (add safe (by omega)) + +omit [Fintype F] in private lemma d_eq_sum {B : Finset (Fin n → F)} (h_B : 2 ≤ B.card) : - 2 * choose_2 B.card * d B = ∑ i, ∑ x ∈ (Finset.product B B) with x.1 ≠ x.2, (if x.1 i ≠ x.2 i then 1 else 0) := by - simp [d] - rw [mul_assoc] - rw [←mul_assoc (choose_2 (↑B.card))] - rw [←mul_assoc (choose_2 (↑B.card)), Field.mul_inv_cancel _ (by { - simp [choose_2] - apply And.intro - aesop - intro contr - have h : ↑B.card = (1 : ℚ) := by - rw [←zero_add (1 : ℚ)] - rw [←contr] - simp - simp at h - omega - })] - field_simp + 2 * choose_2 B.card * d B = + ∑ i, ∑ x ∈ B ×ˢ B with x.1 ≠ x.2, (if x.1 i ≠ x.2 i then 1 else 0) := by + field_simp [d, choose_2_card_ne_zero h_B] rw [Finset.sum_comm] - congr - ext x - rw [hamming_dist_eq_sum] - simp + simp_rw [hamming_dist_eq_sum] + field_simp -private lemma sum_sum_K_i_eq_n_sub_d {B : Finset (Fin n → F)} +private lemma sum_sum_K_i_eq_n_sub_d (h_B : 2 ≤ B.card) : ∑ i, sum_choose_K_i B i = choose_2 B.card * (n - d B) := by - rw [mul_sub] - have h : choose_2 B.card = ((1 : ℚ)/2) * (2 * choose_2 B.card) := by - field_simp - rw [h, mul_assoc (1/2), mul_assoc (1/2), ←mul_sub (1/2)] - rw [d_eq_sum h_B] - conv => - rhs - congr - rfl - congr - rfl - congr - rfl - ext i - rw [sum_of_not_equals] - rfl - simp - ring_nf - simp [sum_choose_K_i] - rw [Finset.sum_mul] - field_simp + rw [show + choose_2 B.card * (n - d B) = choose_2 B.card * n - (2 * choose_2 B.card * d B) / 2 by ring + ] + simp_rw [d_eq_sum h_B, sum_of_not_equals] + field_simp [←Finset.mul_sum, sum_choose_K_i] + ring -private lemma almost_johnson [Zero F] {B : Finset (Fin n → F)} +private lemma almost_johnson [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) (h_card : 2 ≤ (Fintype.card F)) : - n * (choose_2 (k B) + ((Finset.univ (α := F)).card - 1 : ℚ) - * choose_2 ((B.card - k B)/((Finset.univ (α := F)).card-1))) + n * (choose_2 (k B) + (Fintype.card F - 1) + * choose_2 ((B.card - k B) / (Fintype.card F - 1))) ≤ - choose_2 B.card * (n - d B) := by - apply le_trans - exact le_sum_sum_choose_K_i h_n (by aesop) h_card - rw [sum_sum_K_i_eq_n_sub_d h_B] + choose_2 B.card * (n - d B) := + le_trans (le_sum_sum_choose_K h_n (by omega) h_card) (sum_sum_K_i_eq_n_sub_d h_B ▸ le_refl _) -private lemma almost_johnson_choose_2_elimed [Zero F] {B : Finset (Fin n → F)} +private lemma almost_johnson_choose_2_elimed [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) - (h_card : 2 ≤ (Fintype.card F)) + (h_card : 2 ≤ Fintype.card F) : (k B * (k B - 1) + - (B.card - k B) * ((B.card - k B)/((Finset.univ (α := F)).card-1) - 1)) + (B.card - k B) * ((B.card - k B)/(Fintype.card F-1) - 1)) ≤ B.card * (B.card - 1) * (n - d B)/n := by - have h := almost_johnson h_n h_B h_card - have h_den_ne_0 : (↑(Fintype.card F) - (1 : ℚ)) ≠ 0 := by - intro contr - have h : ↑(Fintype.card F) = (1 : ℚ) := by - rw [←zero_add 1, ← contr] - simp - simp at h - omega - have h_lhs : - n * ((1:ℚ)/2) * ((k B) * (k B - 1) + ((Finset.univ (α := F)).card - 1 : ℚ) - * ((B.card - k B)/((Finset.univ (α := F)).card-1)) * ((B.card - k B)/((Finset.univ (α := F)).card-1) - 1)) - = - n * (choose_2 (k B) + ((Finset.univ (α := F)).card - 1 : ℚ) - * choose_2 ((B.card - k B)/((Finset.univ (α := F)).card-1))) := by - simp [choose_2] - ring_nf - rw [←h_lhs] at h - simp [choose_2] at h - have h_rhs : ↑B.card * (↑B.card - 1) / 2 * (↑n - d B) - = ↑n * 2⁻¹ * (↑n)⁻¹ * ↑B.card * (↑B.card - 1) * (↑n - d B) := by - rw [mul_comm (↑n : ℚ)] - rw [mul_assoc, mul_assoc (2⁻¹)] - rw [Field.mul_inv_cancel _ (by simp; omega)] - field_simp - ring - rw [h_rhs] at h - apply le_of_mul_le_mul_left (a := (↑n : ℚ) * (2⁻¹)) - have h_rhs : ↑n * 2⁻¹ * (↑n)⁻¹ * ↑B.card * (↑B.card - 1) * (↑n - d B) - = ↑n * 2⁻¹ * (↑B.card * (↑B.card - 1) * (↑n - d B) / ↑n) := by - ring - rw [h_rhs] at h - apply le_trans' h - simp - conv => - rhs - congr - congr - congr - rfl - rfl - congr - rfl - congr - rw [Field.div_eq_mul_inv, mul_comm, mul_assoc] - congr - rfl - rw [mul_comm, Field.mul_inv_cancel _ (by aesop)] - rfl - rfl - simp - simp - assumption - -private lemma almost_johnson_lhs_div_B_card [Zero F] {B : Finset (Fin n → F)} + have h := almost_johnson h_n h_B h_card; simp [choose_2] at h + set C := (Fintype.card F : ℚ) - 1 + set δ := B.card - k B + exact le_of_mul_le_mul_left + (a0 := show 0 < (n : ℚ) * 2⁻¹ by simp [h_n]) + (le_trans (b := ↑n * 2⁻¹ * (k B * (k B - 1) + C * (δ / C) * (δ / C - 1))) + (by rw [mul_div_cancel₀ _ (by simp [sub_eq_zero, C]; omega)]) + (le_trans + (b := B.card * (B.card - 1) / 2 * (n - d B)) + (by convert h using 1; field_simp; ring_nf; tauto) + (by rw [show n * 2⁻¹ * (B.card * (B.card - 1) * (n - d B) / n) = + n * (↑n)⁻¹ * 2⁻¹ * (B.card * (B.card - 1) * (n - d B)) by ring] + field_simp))) + +private lemma almost_johnson_lhs_div_B_card [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) : (k B * (k B - 1) + - (B.card - k B) * ((B.card - k B)/((Finset.univ (α := F)).card-1) - 1)) / B.card + (B.card - k B) * ((B.card - k B)/(Fintype.card F - 1) - 1)) / B.card = (1 - e B 0 / n) ^ 2 * B.card + B.card * (e B 0) ^ 2 / ((Fintype.card F - 1) * n^2) - 1 := by - rw [add_div] - conv => - lhs - congr - rw [mul_comm, ←mul_div] - rw [k_and_e' (by omega) (by omega)] - rw [k_and_e (by omega) (by omega)] - rfl - rw [mul_comm, ←mul_div, sub_div (c := (B.card : ℚ))] - rw [k_and_e' (by omega) (by omega)] - rw [Field.div_eq_mul_inv _ (B.card : ℚ)] - rw [Field.mul_inv_cancel _ (by { - simp - aesop - })] - rw [k_and_e (by omega) (by omega)] - rfl - have hn : (↑n : ℚ) / ↑n = 1 := by field_simp - conv => - lhs - congr - rw [sub_mul] - rw [←mul_div, mul_assoc, ←pow_two, one_mul] - rw [sub_div, hn] - rfl - rw [←mul_div] - rw [sub_div (c := (↑n : ℚ)), hn] - simp - rw [sub_mul] - simp - rw [←mul_one (B.card : ℚ)] - rw [mul_assoc] - rw [←mul_sub (B.card : ℚ)] - simp - rw [mul_comm, mul_div, mul_div] - rfl - have h : (e B 0 / ↑n * (↑B.card * e B 0 / ↑n) / (↑(Fintype.card F) - 1) - e B 0 / ↑n) - = ↑B.card * (e B 0)^2/ (↑n^2 * (↑(Fintype.card F) - 1)) - e B 0 / ↑n:= by - field_simp - ring_nf - rw [h] - ring_nf - -private lemma johnson_unrefined [Zero F] {B : Finset (Fin n → F)} + letI E := (n - e B 0) / n + generalize eqrhs : (_ + _ - 1 : ℚ) = rhs + have eqE : E = k B / B.card := by unfold E; rw [k_and_e'] <;> omega + suffices + (B.card * E - 1) * E + ((B.card - B.card * E) / (Fintype.card F - 1) - 1) * (1 - E) = rhs by + rw [eqE, mul_div_cancel₀ _ (by simp only [ne_eq, Rat.natCast_eq_zero]; omega)] at this + rw [←this] + field_simp; ring + rw [←eqrhs, show E = 1 - (e B 0) / n by field_simp [E]] + ring_nf; field_simp; ring + +private lemma johnson_unrefined [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) (h_card : 2 ≤ (Fintype.card F)) @@ -784,112 +351,60 @@ private lemma johnson_unrefined [Zero F] {B : Finset (Fin n → F)} (1 - e B 0 / n) ^ 2 * B.card + B.card * (e B 0) ^ 2 / ((Fintype.card F - 1) * n^2) - 1 ≤ (B.card - 1) * (1 - d B/n) := by - have h : ((1 : ℚ) - d B / ↑n) = (↑n - d B) / ↑n := by - field_simp - rw [h] - rw [←almost_johnson_lhs_div_B_card h_n h_B] - apply le_of_mul_le_mul_left (a := (B.card : ℚ)) - rw [Field.div_eq_mul_inv _ (↑B.card : ℚ)] - rw [mul_comm] - rw [mul_assoc, mul_comm _ ((↑B.card) : ℚ), Field.mul_inv_cancel _ (by aesop)] - simp - rw [←mul_assoc] - apply le_trans - apply almost_johnson_choose_2_elimed h_n h_B h_card - field_simp - simp - rw [Finset.nonempty_iff_ne_empty] - aesop + suffices k B * (k B - 1) + (B.card - k B) * ((B.card - k B) / (Fintype.card F - 1) - 1) ≤ + B.card * (B.card - 1) * ((n - d B) / n) by + rw [ + show (1 - d B / n) = (n - d B) / n by field_simp, + ←almost_johnson_lhs_div_B_card h_n h_B, + div_le_iff₀ (by simp only [Nat.cast_pos]; omega) + ] + linarith + exact le_trans (almost_johnson_choose_2_elimed h_n h_B h_card) (by field_simp) -private lemma johnson_unrefined_by_M [Zero F] {B : Finset (Fin n → F)} +private lemma johnson_unrefined_by_M [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) (h_card : 2 ≤ (Fintype.card F)) : B.card * ((1 - e B 0 / n) ^ 2 + (e B 0) ^ 2 / ((Fintype.card F - 1) * n^2) - 1 + d B/n) ≤ - d B/n := by - rw [mul_add, mul_sub] - rw [sub_add] - have hh : (↑B.card * 1 - ↑B.card * (d B / ↑n)) = ↑B.card * (1 - (d B/↑n)) := by - rw [mul_sub] - rw [hh] - apply le_of_add_le_add_right (a := -1) - apply le_of_add_le_add_right (a := ↑B.card * (1 - d B / ↑n)) - have hh : d B / ↑n + -1 + ↑B.card * (1 - d B / ↑n) - = (↑B.card - 1) * (1 - d B / ↑n) := by ring - rw [hh] - apply le_trans' (johnson_unrefined h_n h_B h_card) - conv => - lhs - rw [add_comm, ←add_assoc, add_comm, add_sub] - congr - rfl - rw [add_comm, ←add_sub] - simp - rfl - simp - rw [mul_add, mul_comm, ←mul_div] - -private lemma johnson_unrefined_by_M' [Zero F] {B : Finset (Fin n → F)} + d B/n := + suffices B.card * ((1 - e B 0 / n) ^ 2 + e B 0 ^ 2 / ((Fintype.card F - 1) * n ^ 2)) - + B.card * (1 - d B / n) + -1 + B.card * (1 - d B / n) ≤ + (B.card - 1) * (1 - d B / n) by linarith + le_trans (by ring_nf; field_simp) (johnson_unrefined h_n h_B h_card) + +private lemma johnson_unrefined_by_M' [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) (h_card : 2 ≤ (Fintype.card F)) : - B.card * ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * ((1 - e B 0 / n) ^ 2 + (e B 0) ^ 2 / ((Fintype.card F - 1) * n^2) - 1 + d B/n) + B.card * (Fintype.card F / (Fintype.card F - 1)) * + ((1 - e B 0 / n) ^ 2 + e B 0 ^ 2 / ((Fintype.card F - 1) * n^2) - 1 + d B/n) ≤ - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * d B/n := by - rw [mul_comm (B.card : ℚ)] - rw [mul_assoc, ←mul_div] - apply (mul_le_mul_left (by { - field_simp - omega - })).2 - exact johnson_unrefined_by_M h_n h_B h_card - -private lemma johnson_denom [Zero F] {B : Finset (Fin n → F)} + (Fintype.card F / (Fintype.card F - 1)) * d B/n := by + rw [mul_comm (B.card : ℚ), mul_assoc, ←mul_div] + exact (mul_le_mul_left (by field_simp; omega)).2 (johnson_unrefined_by_M h_n h_B h_card) + +private lemma johnson_denom [Zero F] (h_card : 2 ≤ (Fintype.card F)) : - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * ((1 - e B 0 / n) ^ 2 + (e B 0) ^ 2 / ((Fintype.card F - 1) * n^2) - 1 + d B/n) + (Fintype.card F / (Fintype.card F - 1)) * + ((1 - e B 0 / n) ^ 2 + (e B 0) ^ 2 / ((Fintype.card F - 1) * n^2) - 1 + d B/n) = - (1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (e B 0 / n)) ^ 2 - (1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (d B/n)) := by - have h : (((1:ℚ)- e B 0 / (↑n : ℚ)) ^ 2 + e B 0 ^ 2 / ((↑(Fintype.card F) - 1) * ↑n ^ 2) - 1 + d B / ↑n) - = d B / (↑n : ℚ) - 2 * e B 0 / (↑n : ℚ) + ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (e B 0)^2/n^2 := by - have h : e B 0 ^ 2 / ((↑(Fintype.card F) - 1) * ↑n ^ 2) = (((Fintype.card F : ℚ) / (Fintype.card F - 1))- 1) * e B 0 ^ 2 / ↑n ^ 2 := by - have h : ((Fintype.card F : ℚ) / (↑(Fintype.card F) - 1) - 1) = 1 / (↑(Fintype.card F) - 1) := by - have h : (Fintype.card F : ℚ) / (Fintype.card F - 1) = ((Fintype.card F : ℚ) - 1 + 1) / (Fintype.card F - 1) := by - field_simp - rw [h] - rw [add_div] - rw [Field.div_eq_mul_inv] - rw [Field.mul_inv_cancel _ (by { - intro contr - have h : (Fintype.card F : ℚ) = 1 := by - rw [←add_zero 1] - rw [←contr] - simp - simp at h - omega - })] - field_simp - rw [h] - field_simp - rw [h] - ring_nf - rw [h] - conv => - lhs - rw [mul_add, mul_sub, sub_add] - rfl - have h : (↑(Fintype.card F) / (↑(Fintype.card F) - 1) * (2 * e B 0 / ↑n) - - ↑(Fintype.card F) / (↑(Fintype.card F) - 1) * - (↑(Fintype.card F) / (↑(Fintype.card F) - 1) * e B 0 ^ 2 / ↑n ^ 2)) = - (1 - (1 - (↑(Fintype.card F) / (↑(Fintype.card F) - 1) * e B 0 / ↑n ))^2) := by - ring_nf - rw [h] + (1 - ((Fintype.card F) / (Fintype.card F - 1)) * + (e B 0 / n)) ^ 2 - (1 - ((Fintype.card F) / (Fintype.card F - 1)) * (d B/n)) := by + set C := Fintype.card F + set C₁ := (C : ℚ) - 1 + have n₂ : C₁ ≠ 0 := by simp [C₁, C, sub_eq_zero]; omega + suffices C / C₁ * (d B / n - 2 * e B 0 / n + C / C₁ * e B 0 ^ 2 / n ^ 2) = + (1 - C / C₁ * (e B 0 / n)) ^ 2 - (1 - C / C₁ * (d B / n)) by + have eq₂ : C₁ * C₁⁻¹ = 1 := by field_simp + rw [←this, show C / C₁ = 1 + 1 / C₁ by unfold C₁; field_simp] + ring_nf ring -private lemma johnson_bound₀ [Zero F] {B : Finset (Fin n → F)} +private lemma johnson_bound₀ [Zero F] (h_n : 0 < n) (h_B : 2 ≤ B.card) (h_card : 2 ≤ (Fintype.card F)) @@ -898,12 +413,10 @@ private lemma johnson_bound₀ [Zero F] {B : Finset (Fin n → F)} ((1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (e B 0 / n)) ^ 2 - (1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (d B/n))) ≤ ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * d B/n := by - rw [←johnson_denom h_card] - rw [←mul_assoc] + rw [←johnson_denom h_card, ←mul_assoc] exact johnson_unrefined_by_M' h_n h_B h_card - -protected lemma johnson_bound_lemma [Field F] {B : Finset (Fin n → F)} {v : Fin n → F} +protected lemma johnson_bound_lemma [Field F] {v : Fin n → F} (h_n : 0 < n) (h_B : 2 ≤ B.card) (h_card : 2 ≤ (Fintype.card F)) @@ -912,16 +425,10 @@ protected lemma johnson_bound_lemma [Field F] {B : Finset (Fin n → F)} {v : Fi ((1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (e B v / n)) ^ 2 - (1 - ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * (d B/n))) ≤ ((Fintype.card F : ℚ) / (Fintype.card F - 1)) * d B/n := by - rw [lin_shift_e (B := B) (by omega)] - rw [lin_shift_d h_B] - rw [lin_shift_card (B := B) (v := v)] - exact johnson_bound₀ h_n (by { - rw [←lin_shift_card (B := B)] - assumption - }) - h_card + rw [lin_shift_e (by omega), lin_shift_d h_B, lin_shift_card (v := v)] + exact johnson_bound₀ h_n (lin_shift_card (B := B) ▸ h_B) h_card -protected lemma a_lemma_im_not_proud_of {v a : Fin n → F} +protected lemma a_lemma_im_not_proud_of_OLD {v a : Fin n → F} (h_card : 2 ≤ Fintype.card F) : |(1 : ℚ) - ((1 : ℚ) + (1 : ℚ) / ((Fintype.card F : ℚ) - 1)) * ↑Δ₀(v, a) / ↑n| @@ -973,8 +480,7 @@ protected lemma a_lemma_im_not_proud_of {v a : Fin n → F} apply (mul_le_mul_left (by simp)).2 have h : (↑Δ₀(v, a) : ℚ) ≤ ↑n := by simp [hammingDist] - apply le_trans - apply Finset.card_le_univ + apply le_trans (Finset.card_le_univ _) simp rw [mul_comm] apply le_trans @@ -996,6 +502,32 @@ protected lemma a_lemma_im_not_proud_of {v a : Fin n → F} left simp +protected lemma abs_one_sub_div_le_one {v a : Fin n → F} + (h_card : 2 ≤ Fintype.card F) + : + |1 - (1 + 1 / ((Fintype.card F : ℚ) - 1)) * Δ₀(v, a) / n| ≤ 1 := by + by_cases eq : n = 0 <;> [simp [eq]; skip] + by_cases heq : v = a <;> [simp [heq]; skip] + rw [abs_le] + refine ⟨?p, by simp only [tsub_le_iff_right, le_add_iff_nonneg_right] + exact div_nonneg (mul_nonneg (add_nonneg zero_le_one (by simp; omega)) + (by simp)) + (by linarith)⟩ + set a₁ := (Fintype.card F : ℚ) - 1 + set a₂ := (Δ₀(v, a) : ℚ) + have ha₂ : a₂ ≤ n := by simp [a₂, hammingNorm]; apply (le_trans (Finset.card_le_univ _) (by simp)) + set a₃ := a₂ / n + have : a₁⁻¹ ≤ 1 := by aesop (add simp [inv_le_one_iff₀, le_sub_iff_add_le]) + (add safe (by norm_cast)) + suffices (1 + a₁⁻¹) * a₃ ≤ 2 * a₃ ∧ 2 * a₃ ≤ 2 by simp [← mul_div]; linarith + suffices 1 + a₁⁻¹ ≤ 2 ∧ 0 < a₃ ∧ 2 * a₃ ≤ 2 from + ⟨(mul_le_mul_right (by field_simp [a₃] at *; tauto)).2 (by linarith), this.2.2⟩ + exact ⟨ + by aesop (add safe (by linarith)), + by field_simp [a₂, a₃]; exact heq, + by aesop (add safe [(by rw [div_le_one]), (by omega)]) + ⟩ + end end JohnsonBound diff --git a/ArkLib/Data/CodingTheory/ListDecodability.lean b/ArkLib/Data/CodingTheory/ListDecodability.lean index 74ece2fad..0c959f7d0 100644 --- a/ArkLib/Data/CodingTheory/ListDecodability.lean +++ b/ArkLib/Data/CodingTheory/ListDecodability.lean @@ -8,40 +8,40 @@ import Mathlib.InformationTheory.Hamming import Mathlib.Analysis.Normed.Field.Lemmas import ArkLib.Data.CodingTheory.Basic -open Classical - namespace ListDecodable noncomputable section -variable {ι : ℕ} +variable {ι : Type*} [Fintype ι] {F : Type*} -abbrev Code.{u} (ι : ℕ) (S : Type u) : Type u := Set (Fin ι → S) +abbrev Code.{u, v} (ι : Type u) (S : Type v) : Type (max u v) := Set (ι → S) +open Classical in /-- Hamming ball of radius `r` centred at a word `y`. -/ -def hammingBall (C : Code ι F) (y : Fin ι → F) (r : ℕ) : Code ι F := +def hammingBall (C : Code ι F) (y : ι → F) (r : ℕ) : Code ι F := { c | c ∈ C ∧ hammingDist y c ≤ r } +open Classical in /-- Ball of radius `r` centred at a word `y` with respect to the relative Hamming distance. -/ -def relHammingBall (C : Code ι F) (y : Fin ι → F) (r : ℝ) : Code ι F := +def relHammingBall (C : Code ι F) (y : ι → F) (r : ℝ) : Code ι F := { c | c ∈ C ∧ dist y c ≤ r } /-- The number of close codewords to a given word `y` with respect to the Hamming distance metric. -/ -def listOfCloseCodewords (C : Code ι F) (y : Fin ι → F) (r : ℕ) : ℕ := +def listOfCloseCodewords (C : Code ι F) (y : ι → F) (r : ℕ) : ℕ := Nat.card (hammingBall C y r) /-- The number of close codewords to a given word `y` with respect to the relative Hamming distance metric. -/ -def listOfCloseCodewordsRel (C : Code ι F) (y : Fin ι → F) (r : ℝ) : ℕ := +def listOfCloseCodewordsRel (C : Code ι F) (y : ι → F) (r : ℝ) : ℕ := Nat.card (relHammingBall C y r) /-- @@ -55,11 +55,11 @@ The code `C` is `(r,ℓ)`-list decodable. since the cardinality of the set of close codewords is a natural number anyway. -/ def listDecodable (C : Code ι F) (r : ℝ) (ℓ : ℝ) : Prop := - ∀ y : Fin ι → F, listOfCloseCodewordsRel C y r ≤ ℓ + ∀ y : ι → F, listOfCloseCodewordsRel C y r ≤ ℓ -section +section Lemmas -variable {C : Code ι F} {y : Fin ι → F} {n : ℕ} {r : ℝ} {ℓ : ℝ} +variable {C : Code ι F} {y : ι → F} {n : ℕ} {r : ℝ} {ℓ : ℝ} lemma listOfCloseCodewords_eq_zero : listOfCloseCodewords C y n = 0 ↔ IsEmpty (hammingBall C y n) ∨ Infinite (hammingBall C y n) := by @@ -70,7 +70,7 @@ lemma listOfCloseCodewordsRel_eq_zero : IsEmpty (relHammingBall C y r) ∨ Infinite (relHammingBall C y r) := by simp [listOfCloseCodewordsRel, Nat.card_eq_zero] -end +end Lemmas end diff --git a/ArkLib/Data/CodingTheory/PolishchukSpielman.lean b/ArkLib/Data/CodingTheory/PolishchukSpielman.lean index d33488fc7..79abeae08 100644 --- a/ArkLib/Data/CodingTheory/PolishchukSpielman.lean +++ b/ArkLib/Data/CodingTheory/PolishchukSpielman.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Katerina Hristova, František Silváši, Julian Sutherland -/ -import ArkLib.Data.CodingTheory.BivariatePoly +import ArkLib.Data.Polynomial.Bivariate open Polynomial.Bivariate Polynomial @@ -19,7 +19,7 @@ lemma Polishchuk_Spielman {F : Type} [Semiring F] [Field F] (h_f_degX : a_x ≥ Bivariate.degreeX f) (h_g_degX : b_x ≥ Bivariate.degreeX g) (h_f_degY : a_y ≥ Bivariate.degreeY f) (h_g_degY : b_y ≥ Bivariate.degreeY g) (h_card_Px : n_x ≤ P_x.card) (h_card_Py : n_y ≤ P_y.card) - (h_le_1 : 1 > (b_x : ℚ)/(n_x : ℚ) + (b_y : ℚ)/(n_y : ℚ)) + (h_le_1 : (1 : ℚ) > b_x / n_x + b_y / n_y) (h_quot_X : ∀ y ∈ P_y, (quot_X y).natDegree ≤ (b_x - a_x) ∧ Bivariate.evalY y g = (quot_X y) * (Bivariate.evalY y f)) (h_quot_Y : ∀ x ∈ P_x, @@ -30,4 +30,5 @@ lemma Polishchuk_Spielman {F : Type} [Semiring F] [Field F] ∀ x ∈ Q_x, Bivariate.evalX x q = quot_Y x) ∧ (∃ Q_y : Finset F, Q_y.card ≥ n_y - a_y ∧ Q_y ⊆ P_y ∧ ∀ y ∈ Q_y, Bivariate.evalX y q = quot_X y) - := sorry + := by + sorry diff --git a/ArkLib/Data/CodingTheory/Prelims.lean b/ArkLib/Data/CodingTheory/Prelims.lean index 4076cba1e..fc093449b 100644 --- a/ArkLib/Data/CodingTheory/Prelims.lean +++ b/ArkLib/Data/CodingTheory/Prelims.lean @@ -68,11 +68,10 @@ end Matrix end -/-- - Affine line between two vectors with coefficients in a semiring. +/-- Affine line between two vectors with coefficients in a semiring. -/ def Affine.line {F : Type*} {ι : Type*} [Ring F] (u v : ι → F) : Submodule F (ι → F) := - vectorSpan _ {u, v} + vectorSpan _ {u, v} namespace sInf diff --git a/ArkLib/Data/CodingTheory/ReedSolomon.lean b/ArkLib/Data/CodingTheory/ReedSolomon.lean index a0afcfebf..300c2d8c3 100644 --- a/ArkLib/Data/CodingTheory/ReedSolomon.lean +++ b/ArkLib/Data/CodingTheory/ReedSolomon.lean @@ -1,13 +1,15 @@ /- Copyright (c) 2024 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Quang Dao, Katerina Hristova, František Silváši, Julian Sutherland, Ilia Vlasov +Authors: Quang Dao, Katerina Hristova, František Silváši, Julian Sutherland, Ilia Vlasov, +Mirco Richter -/ import ArkLib.Data.CodingTheory.Basic import Mathlib.LinearAlgebra.Lagrange import Mathlib.RingTheory.Henselian -import ArkLib.Data.Fin.Basic +import ArkLib.Data.Fin.Lift +import ArkLib.Data.MvPolynomial.LinearMvExtension import ArkLib.Data.Polynomial.Interface /-! @@ -33,7 +35,7 @@ def evalOnPoints : F[X] →ₗ[F] (ι → F) where map_smul' := fun m x => by simp; congr /-- The Reed-Solomon code for polynomials of degree less than `deg` and evaluation points `domain`. - -/ +-/ def code (deg : ℕ) : Submodule F (ι → F) := (Polynomial.degreeLT F deg).map (evalOnPoints domain) @@ -50,10 +52,9 @@ def checkMatrix (deg : ℕ) [Fintype ι] : Matrix (Fin (Fintype.card ι - deg)) -- simp [codeByGenMatrix, code] -- rw [LinearMap.range_eq_map] -- sorry +end ReedSolomon -open Classical -open Polynomial -open Matrix +open Polynomial Matrix Code LinearCode variable {F ι ι' : Type*} {C : Set (ι → F)} @@ -72,22 +73,16 @@ lemma nonsquare_mulVecLin [CommSemiring F] {ι' : ℕ} {α₁ : ι ↪ F} {α₂ (nonsquare ι' α₁).mulVecLin α₂ i = ∑ x, α₂ x * α₁ i ^ x.1 := by simp [nonsquare, mulVecLin_apply, mulVec_eq_sum] -/-- - The transpose of a non-square Vandermonde matrix. +/-- The transpose of a non-square Vandermonde matrix. -/ def nonsquareTranspose [Field F] (ι' : ℕ) (α : ι ↪ F) : Matrix (Fin ι') ι F := (Vandermonde.nonsquare ι' α)ᵀ -private lemma todoMoveOut {k : ℕ} : (List.finRange k).dedup = List.finRange k := by - induction k <;> - aesop (add simp [List.finRange_succ, List.dedup_map_of_injective, Fin.succ_injective]) - section variable [CommRing F] {m n : ℕ} {α : Fin m → F} -/-- - The maximal upper square submatrix of a Vandermonde matrix is a Vandermonde matrix. +/-- The maximal upper square submatrix of a Vandermonde matrix is a Vandermonde matrix. -/ lemma subUpFull_of_vandermonde_is_vandermonde (h : n ≤ m) : Matrix.vandermonde (α ∘ Fin.castLE h) = @@ -95,8 +90,7 @@ lemma subUpFull_of_vandermonde_is_vandermonde (h : n ≤ m) : ext r c simp [Matrix.vandermonde, Matrix.subUpFull, nonsquare] -/-- - The maximal left square submatrix of a Vandermonde matrix is a Vandermonde matrix. +/-- The maximal left square submatrix of a Vandermonde matrix is a Vandermonde matrix. -/ lemma subLeftFull_of_vandermonde_is_vandermonde (h : m ≤ n) : Matrix.vandermonde α = Matrix.subLeftFull (nonsquare n α) (Fin.castLE h) := by @@ -107,8 +101,8 @@ section variable [IsDomain F] -/-- - The rank of a non-square Vandermonde matrix with more rows than columns is the number of columns. +/-- The rank of a non-square Vandermonde matrix with more rows than columns is the number of + columns. -/ lemma rank_nonsquare_eq_deg_of_deg_le (inj : Function.Injective α) (h : n ≤ m) : (Vandermonde.nonsquare (ι' := n) α).rank = n := by @@ -119,9 +113,8 @@ lemma rank_nonsquare_eq_deg_of_deg_le (inj : Function.Injective α) (h : n ≤ m Matrix.det_vandermonde_ne_zero_iff ] apply Function.Injective.comp <;> aesop (add simp Fin.castLE_injective) - -/-- - The rank of a non-square Vandermonde matrix with more columns than rows is the number of rows. + +/-- The rank of a non-square Vandermonde matrix with more columns than rows is the number of rows. -/ lemma rank_nonsquare_eq_deg_of_ι_le (inj : Function.Injective α) (h : m ≤ n) : (Vandermonde.nonsquare (ι' := n) α).rank = m := by @@ -151,7 +144,8 @@ theorem mulVecLin_coeff_vandermondens_eq_eval_matrixOfPolynomials nonsquare_mulVecLin, Finset.sum_fin_eq_sum_range, eval_eq_sum ] refine Eq.symm (Finset.sum_of_injOn (·%n) ?p₁ ?p₂ (fun i _ h ↦ ?p₃) (fun i _ ↦ ?p₄)) - · aesop (add simp [Set.InjOn]) + · stop -- TODO: fix this + aesop (add simp [Set.InjOn]) (add safe forward [le_natDegree_of_mem_supp, lt_of_le_of_lt, Nat.lt_add_one_of_le]) (add 10% apply (show ∀ {a b c : ℕ}, a < c → b < c → a % c = b % c → a = b from fun h₁ h₂ ↦ by aesop (add simp Nat.mod_eq_of_lt))) @@ -182,10 +176,11 @@ lemma natDegree_lt_of_mem_degreeLT [NeZero deg] (h : p ∈ degreeLT F deg) : p.n · cases deg <;> aesop · aesop (add simp [natDegree_lt_iff_degree_lt, mem_degreeLT]) -def encode (msg : Fin deg → F) (domain : Fin m ↪ F) : Fin m → F := +def encode [DecidableEq F] (msg : Fin deg → F) (domain : Fin m ↪ F) : Fin m → F := (polynomialOfCoeffs msg).eval ∘ ⇑domain -lemma encode_mem_ReedSolomon_code [NeZero deg] {msg : Fin deg → F} {domain : Fin m ↪ F} : +lemma encode_mem_ReedSolomon_code [DecidableEq F] [NeZero deg] + {msg : Fin deg → F} {domain : Fin m ↪ F} : encode msg domain ∈ ReedSolomon.code domain deg := ⟨polynomialOfCoeffs msg, ⟨by simp, by ext i; simp [encode, ReedSolomon.evalOnPoints]⟩⟩ @@ -199,10 +194,9 @@ lemma codewordIsZero_makeZero {ι : ℕ} {F : Type*} [Zero F] : open LinearCode -/-- - The Vandermonde matrix is the generator matrix for an RS code of length `ι` and dimension `deg`. +/-- The Vandermonde matrix is the generator matrix for an RS code of length `ι` and dimension `deg`. -/ -lemma genMatIsVandermonde [Fintype ι] [Field F] [inst : NeZero m] {α : ι ↪ F} : +lemma genMatIsVandermonde [Fintype ι] [Field F] [DecidableEq F] [inst : NeZero m] {α : ι ↪ F} : fromColGenMat (Vandermonde.nonsquare (ι' := m) α) = ReedSolomon.code α m := by unfold fromColGenMat ReedSolomon.code ext x; rw [LinearMap.mem_range, Submodule.mem_map] @@ -224,6 +218,7 @@ variable [Field F] lemma dim_eq_deg_of_le [NeZero n] (inj : Function.Injective α) (h : n ≤ m) : dim (ReedSolomon.code ⟨α, inj⟩ n) = n := by + classical rw [ ← genMatIsVandermonde, ← rank_eq_dim_fromColGenMat, Vandermonde.rank_nonsquare_rows_eq_min ] <;> simp [inj, h] @@ -238,18 +233,21 @@ lemma rateOfLinearCode_eq_div [NeZero n] (inj : Function.Injective α) (h : n rwa [rate, dim_eq_deg_of_le, length_eq_domain_size] @[simp] -lemma dist_le_length (inj : Function.Injective α) : minDist (ReedSolomon.code ⟨α, inj⟩ n) ≤ m := by - convert minDist_UB +lemma dist_le_length [DecidableEq F] (inj : Function.Injective α) : + minDist ((ReedSolomon.code ⟨α, inj⟩ n) : Set (Fin m → F)) ≤ m := by + convert dist_UB simp end -lemma card_le_card_of_count_inj {α β : Type*} {s : Multiset α} {s' : Multiset β} +lemma card_le_card_of_count_inj {α β : Type*} [DecidableEq α] [DecidableEq β] + {s : Multiset α} {s' : Multiset β} {f : α → β} (inj : Function.Injective f) (h : ∀ a : α, s.count a ≤ s'.count (f a)) : s.card ≤ s'.card := by + classical simp only [←Multiset.toFinset_sum_count_eq] apply le_trans (b := ∑ x ∈ s.toFinset, s'.count (f x)) (Finset.sum_le_sum (by aesop)) - rw [←Finset.sum_image (f := s'.count) (by aesop)] + rw [←Finset.sum_image (f := s'.count) (by aesop (add simp [Set.InjOn]))] have : s.toFinset.image f ⊆ s'.toFinset := suffices ∀ x ∈ s, f x ∈ s' by simpa [Finset.image_subset_iff] by simp_rw [←Multiset.count_pos] @@ -263,7 +261,7 @@ def constantCode {α : Type*} (x : α) (ι' : Type*) [Fintype ι'] : ι' → α variable [Semiring F] {x : F} [Fintype ι] {α : ι ↪ F} @[simp] -lemma weight_constantCode : +lemma weight_constantCode [DecidableEq F] : wt (constantCode x ι) = 0 ↔ IsEmpty ι ∨ x = 0 := by by_cases eq : IsEmpty ι <;> aesop (add simp [constantCode, wt_eq_zero_iff]) @@ -280,17 +278,16 @@ lemma constantCode_eq_ofNat_zero_iff [Nonempty ι] : exact ⟨fun x ↦ Eq.mp (by simp) (congrFun x), (· ▸ rfl)⟩ @[simp] -lemma wt_constantCode [NeZero x] : +lemma wt_constantCode [DecidableEq F] [NeZero x] : wt (constantCode x ι) = Fintype.card ι := by unfold constantCode wt; aesop end open Finset in -/-- - The minimal code distance of an RS code of length `ι` and dimension `deg` is `ι - deg + 1` +/-- The minimal code distance of an RS code of length `ι` and dimension `deg` is `ι - deg + 1` -/ -theorem minDist [Field F] (inj : Function.Injective α) [NeZero n] (h : n ≤ m) : - minDist (ReedSolomon.code ⟨α, inj⟩ n) = m - n + 1 := by +theorem minDist [Field F] [DecidableEq F] (inj : Function.Injective α) [NeZero n] (h : n ≤ m) : + minDist ((ReedSolomon.code ⟨α, inj⟩ n) : Set (Fin m → F)) = m - n + 1 := by have : NeZero m := by constructor; aesop refine le_antisymm ?p₁ ?p₂ case p₁ => @@ -300,7 +297,7 @@ theorem minDist [Field F] (inj : Function.Injective α) [NeZero n] (h : n ≤ m) zify [dist_le_length] at distUB omega case p₂ => - rw [minDist_eq_minWtCodewords] + rw [dist_eq_minWtCodewords] apply le_csInf (by use m, constantCode 1 _; simp) intro b ⟨msg, ⟨p, p_deg, p_eval_on_α_eq_msg⟩, msg_neq_0, wt_c_eq_b⟩ let zeroes : Finset _ := {i | msg i = 0} @@ -326,5 +323,113 @@ end end ReedSolomonCode end +section -end ReedSolomon +open LinearMap Finset + +variable {F : Type*} [Field F] + {ι : Type*} [Fintype ι] [DecidableEq ι] + {domain : ι ↪ F} + {deg : ℕ} + +/-- The linear map that maps a codeword `f : ι → F` to a degree < |ι| polynomial p, + such that p(x) = f(x) for all x ∈ ι -/ +private noncomputable def interpolate : (ι → F) →ₗ[F] F[X] := + Lagrange.interpolate univ domain + +/-- The linear map that maps a ReedSolomon codeword to its associated polynomial -/ +noncomputable def decode : (ReedSolomon.code domain deg) →ₗ[F] F[X] := + domRestrict + (interpolate (domain := domain)) + (ReedSolomon.code domain deg) + +/- ReedSolomon codewords are decoded into degree < deg polynomials-/ +lemma decoded_polynomial_lt_deg (c : ReedSolomon.code domain deg) : + decode c ∈ (degreeLT F deg : Submodule F F[X]) := by sorry + +/-- The linear map that maps a Reed Solomon codeword to its associated polynomial + of degree < deg -/ +noncomputable def decodeLT : (ReedSolomon.code domain deg) →ₗ[F] (Polynomial.degreeLT F deg) := + codRestrict + (Polynomial.degreeLT F deg) + decode + (fun c => decoded_polynomial_lt_deg c) + +end + +section + +open LinearMvExtension + +variable {F : Type*} [Semiring F] [DecidableEq F] + {ι : Type*} [Fintype ι] + +/-- A domain `ι ↪ F` is `smooth`, if `ι ⊆ F`, `|ι| = 2^k` for some `k` and + there exists a subgroup `H` in the group of units `Rˣ` + and an invertible element `a ∈ R` such that `ι = a • H` -/ +class Smooth + (domain : ι ↪ F) where + H : Subgroup (Units F) + a : Units F + h_coset : Finset.image domain Finset.univ + = (fun h : Units F => (a : F) * (h : F)) '' (H : Set (Units F)) + h_card_pow2 : ∃ k : ℕ, Fintype.card ι = 2 ^ k + +variable {F : Type*} [Field F] [DecidableEq F] + {ι : Type*} [Fintype ι] [DecidableEq ι] + {domain : ι ↪ F} [Smooth domain] + {m : ℕ} + +/-- Definition 4.2, WHIR[ACFY24] + Smooth ReedSolomon Codes are ReedSolomon Codes defined over Smooth Domains, such that + their decoded univariate polynomials are of degree < 2ᵐ for some m ∈ ℕ. -/ +def smoothCode + (domain : ι ↪ F) [Smooth domain] + (m : ℕ): Submodule F (ι → F) := ReedSolomon.code domain (2^m) + +/-- The linear map that maps Smooth Reed Solomon Code words + to their decoded degree wise linear `m`-variate polynomial -/ +noncomputable def mVdecode : + (smoothCode domain m) →ₗ[F] MvPolynomial (Fin m) F := + linearMvExtension.comp decodeLT + +/-- Auxiliary function to assign values to the weight polynomial variables: + index `0` ↦ `p.eval b`, index `j+1` ↦ `b j`. -/ +private def toWeightAssignment + (p : MvPolynomial (Fin m) F) + (b : Fin m → Fin 2) : Fin (m+1) → F := + let b' : Fin m → F := fun i => ↑(b i : ℕ) + Fin.cases (MvPolynomial.eval b' p) + (fun i => ↑(b i : ℕ)) + +/-- constraint is true, if ∑ {b ∈ {0,1}^m} w(f(b),b) = σ for given + m-variate polynomial `f` and `(m+1)`-variate polynomial `w` -/ +def weightConstraint + (f : MvPolynomial (Fin m) F) + (w : MvPolynomial (Fin (m + 1)) F) (σ : F) : Prop := + ∑ b : Fin m → Fin 2 , w.eval (toWeightAssignment f b) = σ + +/-- Definition 4.5, WHIR[ACFY24] + Constrained Reed Solomon codes are smooth codes who's decoded m-variate + polynomial satisfies the weight constraint for given `w` and `σ`. +-/ +def constrainedCode + (domain : ι ↪ F) [Smooth domain] (m : ℕ) + (w : MvPolynomial (Fin (m + 1)) F) (σ : F) : Set (ι → F) := + { f | ∃ (h : f ∈ smoothCode domain m), + weightConstraint (mVdecode (⟨f, h⟩ : smoothCode domain m)) w σ } + +/-- Definition 4.6, WHIR[ACFY24] + Multi-constrained Reed Solomon codes are smooth codes who's decoded m-variate + polynomial satisfies the `t` weight constraints for given `w₀,...,wₜ₋₁` and + `σ₀,...,σₜ₋₁`. +-/ +def multiConstrainedCode + (domain : ι ↪ F) [Smooth domain] (m t : ℕ) + (w : Fin t → MvPolynomial (Fin (m + 1)) F) + (σ : Fin t → F) : Set (ι → F) := + { f | + ∃ (h : f ∈ smoothCode domain m), + ∀ i : Fin t, weightConstraint (mVdecode (⟨f, h⟩ : smoothCode domain m)) (w i) (σ i)} + +end diff --git a/ArkLib/Data/FieldTheory/BinaryField/AdditiveNTT/NovelPolynomialBasis.lean b/ArkLib/Data/FieldTheory/BinaryField/AdditiveNTT/NovelPolynomialBasis.lean new file mode 100644 index 000000000..1ca97710b --- /dev/null +++ b/ArkLib/Data/FieldTheory/BinaryField/AdditiveNTT/NovelPolynomialBasis.lean @@ -0,0 +1,1414 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Chung Thai Nguyen +-/ + +import ArkLib.Data.FieldTheory.BinaryField.AdditiveNTT.Prelude + +/-! +# Novel Polynomial Basis + +This file defines the components of a novel polynomial basis over a binary field `L` +with degree `r` over its prime subfield `𝔽q`, and an `𝔽q`-basis `β` for `L`. + +## Main Definitions +- `Uᵢ`: `𝔽q`-linear span of the initial `i` vectors of our basis `β` +- `Wᵢ(X)`: subspace vanishing polynomial over `Uᵢ`, with normalized form `Ŵᵢ(X)` +- `{Xⱼ(X), j ∈ Fin 2^ℓ}`: basis vectors of `L⦃<2^ℓ⦄[X]` over `L` + constructed from `Ŵᵢ(X)` +- `novel_polynomial_basis`: the novel polynomial basis for `L⦃<2^ℓ⦄[X]` +- `W_recursive_decomposition`: decomposition of `Wᵢ` into a product of compositions + `Π c ∈ Uᵢ, (Wᵢ₋₁ ∘ (X - c • βᵢ₋₁))` +- `W_linearity`: `Wᵢ` is `𝔽q`-linear and satisfies the recursion formula + `Wᵢ = (Wᵢ₋₁)^|𝔽q| - ((Wᵢ₋₁)(βᵢ₋₁))^(|𝔽q|-1) * Wᵢ₋₁` + +## TODOs +- Computable novel polynomial basis + +## References + +- [LCH14] Sian-Jheng Lin, Wei-Ho Chung, and Yunghsiang S. Han. "Novel Polynomial Basis and Its + Application to Reed–Solomon Erasure Codes". In: IEEE 55th Annual Symposium on Foundations of + Computer Science. 2014, pp. 316–325. doi: 10.1109/FOCS.2014.41. + +- [GGJ96] J. von zur Gathen and J. Gerhard, "Arithmetic and factorization of polynomial + over F2 (extended abstract)", in Proceedings of the 1996 International Symposium on + Symbolic and Algebraic Computation, Zurich, Switzerland, 1996, pp. 1–9. +-/ + +open AdditiveNTT Polynomial FiniteDimensional Finset +namespace AdditiveNTT + +universe u + +-- Fix a binary field `L` of degree `r` over its prime subfield `𝔽q` +variable (L : Type u) [Field L] [Fintype L] [DecidableEq L] +variable (𝔽q : Type u) [Field 𝔽q] [Fintype 𝔽q] [DecidableEq 𝔽q] +variable [Algebra 𝔽q L] +-- We assume an `𝔽q`-basis for `L`, denoted by `(β₀, β₁, ..., β_{r-1})`, indexed by natural numbers. +variable (β : Nat → L) (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + +section LinearSubspaces + +-- # 𝔽q-linear subspaces `Uᵢ` +-- ∀ i ∈ {0, ..., r-1}, we define `Uᵢ:= <β₀, ..., βᵢ₋₁>_{𝔽q}` +-- as the `𝔽q`-linear span of the initial `i` vectors of our basis `β`. +def U (i : Nat) : Subspace 𝔽q L := Submodule.span 𝔽q (Set.image β (Set.Ico 0 i)) + +instance {i: ℕ} : Module (R:=𝔽q) (M:=U L 𝔽q β i) := Submodule.module _ +instance {i: ℕ} : DecidableEq (U L 𝔽q β i) := by exact instDecidableEqOfLawfulBEq +noncomputable instance {i: ℕ} (x: L): Decidable (x ∈ (U L 𝔽q β i : Set L)) := by + exact Classical.propDecidable (x ∈ ↑(U L 𝔽q β i)) +-- e.g. prop => boolean + +-- The dimension of `U i` is `i`. +omit [Fintype L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +lemma finrank_U (hβ_lin_indep : LinearIndependent 𝔽q β) (i: ℕ): + Module.finrank (R:=𝔽q) (M:=(U L 𝔽q β i)) = i := by + -- The dimension of the span of linearly independent vectors is the number of vectors. + have h_card : Fintype.card (Set.Ico 0 i) = i := by + simp only [Fintype.card_ofFinset, Nat.card_Ico] + rw [Nat.sub_zero] + unfold U + set basis := β '' Set.Ico 0 i + -- how to show that basis is of form: ι → L + have h_basis_card: Fintype.card (basis) = i := by + unfold basis -- ⊢ Fintype.card ↑(β '' Set.Icc 0 (i - 1)) = i + rw [Set.card_image_of_injective] -- card of image of inj function = card of domain + exact h_card -- card of domain is i + -- β is injective + have h_inj : Function.Injective β := LinearIndependent.injective (hv:=hβ_lin_indep) + exact h_inj + + show Module.finrank 𝔽q (Submodule.span 𝔽q (basis)) = i + + have h_linear_indepdendent_basis: LinearIndepOn 𝔽q id (β '' Set.Ico 0 i) := by + have h_inj : Set.InjOn β (Set.Ico 0 i) := by + intros x hx y hy hxy + apply LinearIndependent.injective hβ_lin_indep + exact hxy + let ι : Set.Ico 0 i → β '' Set.Ico 0 i := fun x => ⟨β x, Set.mem_image_of_mem β x.2⟩ + have h_bij : Function.Bijective ι := by + constructor + · intros x y hxy + simp only [ι, Subtype.mk_eq_mk] at hxy + -- ⊢ x - y + apply Subtype.ext -- bring to equality in extension type: ⊢ ↑x = ↑y + exact h_inj x.2 y.2 hxy + · intro y + rcases y with ⟨y, hy⟩ + obtain ⟨x, hx, hxy⟩ := (Set.mem_image β (Set.Ico 0 i) y).mp hy + use ⟨x, hx⟩ + simp only [ι, hxy, Subtype.mk_eq_mk] + let h_li := hβ_lin_indep.comp (Subtype.val : (Set.Ico 0 i) → ℕ) Subtype.coe_injective + have eq_subset : Set.range (β ∘ (Subtype.val : (Set.Ico 0 i) → ℕ)) + = β '' Set.Ico 0 i := by + rw [Set.range_comp] + -- ⊢ β '' Set.range Subtype.val = β '' Set.Icc 0 (i - 1) + rw [Subtype.range_coe] -- alternatively, we can unfold all defs & simp + rw [←eq_subset] + exact h_li.linearIndepOn_id + rw [finrank_span_set_eq_card (R:=𝔽q) (M:=L) (s := Set.image β (Set.Ico 0 i)) + (hs:=h_linear_indepdendent_basis)] + rw [Set.toFinset_card] + exact h_basis_card + +noncomputable instance fintype_U (i : ℕ) : Fintype (U L 𝔽q β i) := by + exact Fintype.ofFinite (U L 𝔽q β i) + +-- The cardinality of the subspace `Uᵢ` is `2ⁱ`, which follows from its dimension. +omit [DecidableEq 𝔽q] in +lemma U_card (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : ℕ): + Fintype.card (U L 𝔽q β i) = (Fintype.card 𝔽q)^i := by + -- The cardinality of a vector space V is |F|^(dim V). + rw [Module.card_eq_pow_finrank (K:=𝔽q) (V:=U (𝔽q:=𝔽q) (β:=β) (i:=i))] + rw [finrank_U (𝔽q:=𝔽q) (β:=β) (i:=i) (hβ_lin_indep:=hβ_lin_indep)] + +/-! -/ -- => mathlib documentation + +omit [Fintype L] [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- +An essential helper lemma showing that `Uᵢ` is the union of all cosets of `Uᵢ₋₁` +generated by scaling `βᵢ₋₁` by elements of `𝔽q`. +-/ +lemma U_i_is_union_of_cosets (i : Nat) (hi : i > 0) : + (U L 𝔽q β i : Set L) = ⋃ (c : 𝔽q), (fun u => c • β (i-1) + u) '' (U L 𝔽q β (i-1)) := by + have h_decomp : U L 𝔽q β i = U L 𝔽q β (i-1) ⊔ Submodule.span 𝔽q {β (i-1)} := by + unfold U + have h_ico : Set.Ico 0 i = Set.Ico 0 (i - 1) ∪ {i - 1} := by + ext k; + simp only [Set.mem_Ico, zero_le, true_and, Set.union_singleton, Set.Ico_insert_right, + Set.mem_Icc] + -- ⊢ k < i ↔ k ≤ i - 1 + constructor + · intro h; exact (Nat.le_sub_one_iff_lt hi).mpr h + · intro h; exact Nat.lt_of_le_pred hi h + rw [h_ico, Set.image_union, Set.image_singleton, Submodule.span_union] + ext x + conv_lhs => rw [h_decomp] + -- ⊢ x ∈ ↑(U 𝔽q β (i - 1) ⊔ Submodule.span 𝔽q {β (i - 1)}) + -- ↔ x ∈ ⋃ c, (fun u ↦ c • β (i - 1) + u) '' ↑(U 𝔽q β (i - 1)) + rw [Submodule.coe_sup, Set.mem_add] + constructor + · rintro ⟨u, hu, v, hv, rfl⟩ + simp only [SetLike.mem_coe] at hu hv + rw [Submodule.mem_span_singleton] at hv + rcases hv with ⟨c, rfl⟩ + simp only [Set.mem_iUnion, Set.mem_image] + exact ⟨c, u, hu, by rw [add_comm]⟩ + · intro hx + simp only [Set.mem_iUnion, Set.mem_image] at hx + rcases hx with ⟨c, u, hu, rfl⟩ + rw [add_comm] + -- ⊢ ∃ x ∈ ↑(U 𝔽q β (i - 1)), ∃ y ∈ ↑(Submodule.span 𝔽q {β (i - 1)}), x + y = u + c • β (i - 1) + exact ⟨u, hu, c • β (i-1), Submodule.smul_mem _ _ (Submodule.mem_span_singleton_self _), rfl⟩ + +omit [Fintype L] [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The basis vector `βᵢ` is not an element of the subspace `Uᵢ`. -/ +lemma βᵢ_not_in_Uᵢ + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat): + β i ∉ U L 𝔽q β i := by + -- `βᵢ` cannot be expressed as a linear combination of `<β₀, ..., βᵢ₋₁>`. + -- This follows from the definition of linear independence of `β` + have h_li := linearIndependent_iff_notMem_span.mp hβ_lin_indep i + -- Uᵢ is the span of a subset of the "other" vectors. + have h_subset : (Set.image β (Set.Ico 0 i)) ⊆ (Set.image β {i}ᶜ) := by + if h_i: i > 0 then + rw [Set.image_subset_image_iff (LinearIndependent.injective hβ_lin_indep)] + simp only [Set.subset_compl_singleton_iff, Set.mem_Ico, zero_le, true_and, not_le, + tsub_lt_self_iff, zero_lt_one, and_true] + omega + else + have h_i_eq_0: i = 0 := by exact Nat.eq_zero_of_not_pos h_i + have set_empty: Set.Ico 0 i = ∅ := by + rw [h_i_eq_0] + simp only [Set.Ico_eq_empty_iff] + exact Nat.not_lt_zero 0 + -- ⊢ β '' Set.Ico 0 i ⊆ β '' {i}ᶜ + rw [set_empty] + simp only [Set.image_empty] + simp only [Set.empty_subset] + -- Since `span` is monotonic, if `βᵢ` were in the smaller span `Uᵢ`, + -- it would be in the larger one. + exact fun h_in_U => h_li (by + -- ⊢ β i ∈ Submodule.span 𝔽q (β '' (Set.univ \ {i})) + have res := Submodule.span_mono h_subset h_in_U + rw [Set.compl_eq_univ_diff] at res + exact res + ) + +-- The main theorem +omit [Fintype L] [DecidableEq L] [Fintype 𝔽q] in +theorem root_U_lift_down (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat) (a : L): + a ∈ (U L 𝔽q β (i+1)) → ∃! x: 𝔽q, a - x • β i ∈ (U L 𝔽q β i) := by + intro h_a_mem_U_i_plus_1 + apply existsUnique_of_exists_of_unique + · -- PART 1: Existence -- ⊢ ∃ x, a - x • β i ∈ U L 𝔽q β i + have h_ico : Set.Ico 0 (i+1) = Set.Ico 0 i ∪ {i} := by + ext k; simp only [Set.mem_Ico, Set.mem_union, Set.mem_singleton]; simp only [zero_le, + true_and, Set.mem_singleton_iff]; omega + rw [U, h_ico, Set.image_union, Set.image_singleton, Submodule.span_union] at h_a_mem_U_i_plus_1 + -- h_a_mem_U_i_plus_1 : a ∈ Submodule.span 𝔽q (β '' Set.Ico 0 i) ⊔ Submodule.span 𝔽q {β i} + rw [Submodule.mem_sup] at h_a_mem_U_i_plus_1 + rcases h_a_mem_U_i_plus_1 with ⟨u, h_u_mem_U_i, v, h_v_mem, h_a_eq⟩ + rw [Submodule.mem_span_singleton] at h_v_mem + rcases h_v_mem with ⟨x, rfl⟩ + -- ⊢ ∃ x, a - x • β i ∈ U L 𝔽q β i + use x -- ⊢ a - x • β i ∈ U L 𝔽q β i, h_a_eq : u + x • β i = a + have h_a_sub_x_smul_β_i_mem_U_i : a - x • β i = u := by + rw [h_a_eq.symm] + norm_num + rw [h_a_sub_x_smul_β_i_mem_U_i] + exact h_u_mem_U_i + · -- PART 2: Uniqueness + intros x y hx hy -- ⊢ x = y + -- Let x and y be two scalars that satisfy the property. + -- hx: `a - x • β i ∈ U i` + -- hy: `a - y • β i ∈ U i` + -- Since `U i` is a subspace, the difference of these two vectors is also in `U i`. + let u_x := a - x • β i + let u_y := a - y • β i + have h_diff_mem : u_y - u_x ∈ U L 𝔽q β i := Submodule.sub_mem (U L 𝔽q β i) hy hx + + -- Let's simplify the difference: `(a - y•βi) - (a - x•βi) = x•βi - y•βi = (x-y)•βi`. + rw [sub_sub_sub_cancel_left] at h_diff_mem -- h_diff_mem : x • β i - y • β i ∈ U L 𝔽q β i + rw [←sub_smul] at h_diff_mem + -- So, we have `(x - y) • β i ∈ U i`. + by_cases h_eq : x - y = 0 + -- If `x - y = 0`, then `x = y` and we're done. + · exact sub_eq_zero.mp h_eq + -- Otherwise, we have a contradiction. + · exfalso + have h_β_i_mem := (Submodule.smul_mem_iff _ h_eq).mp h_diff_mem + have h_β_i_not_in_U_i := βᵢ_not_in_Uᵢ (hβ_lin_indep:=hβ_lin_indep) (i:=i) + exact h_β_i_not_in_U_i h_β_i_mem + +omit [Fintype L] [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +theorem root_U_lift_up (i : Nat) (a : L) (x : 𝔽q): + a - x • β i ∈ (U L 𝔽q β i) → a ∈ (U L 𝔽q β (i+1)) := by + intro h_a_sub_x_smul_β_i_mem_U_i + -- We want to show `a ∈ U(i+1)`. We can rewrite `a` as `(a - x • β i) + x • β i`. + rw [← sub_add_cancel a (x • β i)] + -- Now we just need to prove that both parts of the sum are in the subspace `U(i+1)`. + apply Submodule.add_mem + · -- Part 1: Prove `a - x • β i ∈ U(i+1)` + apply Submodule.span_mono + apply Set.image_subset + apply Set.Ico_subset_Ico_right (Nat.le_succ i) + exact h_a_sub_x_smul_β_i_mem_U_i + · -- Part 2: Prove `x • β i ∈ U(i+1)` + -- A scaled basis vector `x • β i` is in the span `U(i+1)` if the basis vector `β i` is. + apply Submodule.smul_mem + -- `β i` is in the span `U(i+1)` because it's one of its generators. + apply Submodule.subset_span + apply Set.mem_image_of_mem + simp only [Set.mem_Ico, zero_le, lt_add_iff_pos_right, zero_lt_one, and_self] + +/-- +The subspace vanishing polynomial `Wᵢ(X) := ∏_{u ∈ Uᵢ} (X - u)`. +The degree of `Wᵢ(X)` is `|Uᵢ| = 2^i`. +- [LCH14, Lemma 1]: `Wᵢ(X)` is an `𝔽q`-linearized polynomial, i.e., + `Wᵢ(x) = ∑_{j=0}^i a_{i, j} x^{2^j}` for some constants `a_{i, j} ∈ L` (Equation (3)). +- The additive property: `Wᵢ(x + y) = Wᵢ(x) + Wᵢ(y)` for all `x, y ∈ L` (Equation (4)). +- For all `y ∈ Uᵢ`, `Wᵢ(x + y) = Wᵢ(x)` (Equation (14)). +-/ +noncomputable def W (i : Nat) : L[X] := + univ.prod (fun u : U L 𝔽q β i => X - C u.val) + +omit [DecidableEq 𝔽q] in +/-- The degree of the subspace vanishing polynomial `Wᵢ(X)` is `2ⁱ`. -/ +lemma degree_W (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat): + (W L 𝔽q β i).degree = (Fintype.card 𝔽q)^i := by + have h_monic : ∀ (u: U L 𝔽q β i), Monic (X - C u.val) := + fun _ => Polynomial.monic_X_sub_C _ + have h_monic_Fin_univ: ∀ u ∈ (univ (α:=U (𝔽q:=𝔽q) (β:=β) (i:=i))), + Monic (X - C u.val) := by + intros u hu + have h_monic_u := h_monic u + have h_monic_u_Fin_univ : Monic (X - C u.val) := h_monic_u + exact h_monic_u_Fin_univ + have h_deg : ∀ (u : U (𝔽q:=𝔽q) (β:=β) (i:=i)), (X - C u.val).degree = 1 := + fun _ => degree_X_sub_C _ + unfold W + rw [degree_prod_of_monic (h:=h_monic_Fin_univ)] + -- ⊢ ∑ i_1, (X - C ↑i_1).degree = 2 ^ i + simp only [degree_X_sub_C, sum_const, card_univ, nsmul_eq_mul, mul_one] + -- ⊢ ↑(Fintype.card ↥(U β i)) = 2 ^ i + rw [U_card (𝔽q:=𝔽q) (β:=β) (i:=i) (hβ_lin_indep:=hβ_lin_indep)] + rfl + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The subspace vanishing polynomial `Wᵢ(X)` is monic. -/ +lemma W_monic (i : Nat): + (W L 𝔽q β i).Monic := by + unfold W + apply Polynomial.monic_prod_of_monic + intros u hu + exact Polynomial.monic_X_sub_C u.val + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +lemma W_ne_zero (i : Nat) : (W L 𝔽q β i) ≠ 0 := by + unfold W + by_contra h_zero + rw [prod_eq_zero_iff] at h_zero + rcases h_zero with ⟨c, hc, h_zero⟩ + have X_sub_c_ne_Zero: X - C (c: L) ≠ (0: L[X]) := by + exact Polynomial.X_sub_C_ne_zero (c: L) + contradiction + +example (i: ℕ) (h_i_eq_0 : i = 0) : Set.Ico 0 i = ∅ := by + rw [h_i_eq_0] -- ⊢ Set.Ico 0 0 = ∅ + simp only [Set.Ico_eq_empty_iff] + exact Nat.not_lt_zero 0 + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The evaluation of `Wᵢ(X)` at `βᵢ` is non-zero. -/ +lemma Wᵢ_eval_βᵢ_neq_zero (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat): + (W L 𝔽q β i).eval (β i) ≠ 0 := by + -- Since `βᵢ ∉ Uᵢ`, `eval (Wᵢ(X)) (βᵢ)` cannot be zero. + -- `eval(P*Q, x) = eval(P,x) * eval(Q,x)`. A product is non-zero iff all factors are non-zero. + rw [W, eval_prod, prod_ne_zero_iff] + intro u _ + -- We need to show `(β i - u.val) ≠ 0`, which is `β i ≠ u.val`. + -- This is true because `βᵢ ∉ Uᵢ`. + have h := βᵢ_not_in_Uᵢ L 𝔽q β (hβ_lin_indep:=hβ_lin_indep) i + intro eq + have : β i = u.val := by + have poly_eq: ((X - C u.val) : L[X]) = (1: L[X]) * (X - C u.val) := by + rw [one_mul (X - C u.val)] + rw [poly_eq] at eq + simp only [one_mul, eval_sub, eval_X, eval_C] at eq + -- eq: eq : β i - ↑u = 0 + rw [sub_eq_zero] at eq + exact eq + exact h (this ▸ u.2) + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +-- `Wᵢ(X)` vanishes on `Uᵢ` +lemma Wᵢ_vanishing (i : Nat): + ∀ u ∈ U L 𝔽q β i, (W L 𝔽q β i).eval u = 0 := by + -- The roots of `Wᵢ(X)` are precisely the elements of `Uᵢ`. + -- For any `u ∈ Uᵢ`, the product `Wᵢ(X)` contains the factor `(X - u)`. + intro u hu + rw [W, eval_prod, prod_eq_zero_iff] + -- We use `u` itself, which is in the set of factors, to make the product zero. + use ⟨u, hu⟩ + simp only [mem_univ, eval_sub, eval_X, eval_C, sub_self, and_self] + +end LinearSubspaces + +section LinearityOfSubspaceVanishingPolynomials +/-! +### Formalization of linearity of subspace vanishing polynomials + +This section formalizes the key properties of the subspace vanishing polynomials `Wᵢ`, +including their recursive structure and `𝔽q`-linearity as described in Lemma 2.3 of [GG96]. +The proofs are done by simultaneous induction on `i`. +-/ + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The subspace vanishing polynomial `Wᵢ(X)` splits into linear factors over `L`. -/ +lemma W_splits (i : Nat) : (W L 𝔽q β i).Splits (RingHom.id L) := by + unfold W + -- The `W` polynomial is a product of factors. A product splits if every factor splits. + apply Polynomial.splits_prod + -- Now we must show that each factor `(X - C j.val)` splits. + intros j hj + -- A polynomial of the form `X - a` is linear and therefore always splits. + -- The lemma for this is `Polynomial.splits_X_sub_C`. + apply Polynomial.splits_X_sub_C + +omit [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The roots of `Wᵢ(X)` are precisely the elements of the subspace `Uᵢ`. -/ +lemma roots_W (i : Nat): -- converts root Multiset into (univ: Uᵢ.val.map) + (W L 𝔽q β i).roots = (univ : Finset (U L 𝔽q β i)).val.map (fun u => u.val) := by + unfold W -- must unfold to reason on the form of `prod (X-C)` + let f_inner : U L 𝔽q β i → L := Subtype.val + let f_outer : L → L[X] := fun y => X - C y + have h_inj : Function.Injective f_inner := Subtype.val_injective + -- ⊢ (∏ u, (X - C ↑u)).roots = Multiset.map (fun u ↦ ↑u) univ.val + rw [← prod_image (g := f_inner) (f := f_outer)] + · -- ⊢ (∏ x ∈ image f_inner univ, f_outer x).roots = + -- Multiset.map (fun u ↦ ↑u) univ.val + let s := (univ : Finset (U L 𝔽q β i)).image f_inner + rw [Polynomial.roots_prod_X_sub_C (s := s)] + -- ⊢ s.val = Multiset.map (fun u ↦ ↑u) univ.val + apply image_val_of_injOn -- (H : Set.InjOn f s) : (image f s).1 = s.1.map f + -- ⊢ Set.InjOn f_inner ↑Finset.univ + unfold Set.InjOn + intro u hu x2 hx2 h_u_eq_x2 + exact h_inj h_u_eq_x2 + · -- ⊢ ∀ x ∈ univ, ∀ y ∈ univ, f_inner x = f_inner y → x = y + intro x hx y hy hfx_eq_fy + exact h_inj hfx_eq_fy + +@[simps!] +noncomputable def algEquivAevalXSubC {R : Type*} [CommRing R] (t : R) : R[X] ≃ₐ[R] R[X] := by + -- Reference: Polynomial.algEquivAevalXAddC + have h_comp_X_sub_C : (X - C t).comp (X + C t) = X := by + simp [comp_assoc, aeval_X, aeval_C, sub_add_cancel] + have h_comp_X_add_C : (X + C t).comp (X - C t) = X := by + simp [comp_assoc, aeval_X, aeval_C, add_sub_cancel] + exact algEquivOfCompEqX (p:=X - C t) (q:=X + C t) (hpq:=h_comp_X_sub_C) (hqp:=h_comp_X_add_C) + +omit [Fintype L] [DecidableEq L] in +lemma comp_X_sub_C_eq_zero_iff (p : L[X]) (a : L) : + p.comp (X - C a) = 0 ↔ p = 0 := EmbeddingLike.map_eq_zero_iff (f := algEquivAevalXSubC a) + -- Reference: Polynomial.comp_X_add_C_eq_zero_iff + +omit [Fintype L] in +/-- +The multiplicity of a root `x` in a polynomial `p` composed with `(X - a)` is equal to the +multiplicity of the root `x - a` in `p`. +-/ +lemma rootMultiplicity_comp_X_sub_C (p : L[X]) (a x : L) : + rootMultiplicity x (p.comp (X - C a)) = rootMultiplicity (x - a) p := by + -- Reference: rootMultiplicity_eq_rootMultiplicity + classical + simp only [rootMultiplicity_eq_multiplicity] + simp only [comp_X_sub_C_eq_zero_iff, map_sub] + -- ⊢ (if p = 0 then 0 else multiplicity (X - C x) (p.comp (X - C a))) + -- = if p = 0 then 0 else multiplicity (X - (C x - C a)) p + -- `(X - C x)^n | (p.comp (X - C a)) <=> (X - (C x - C a))^n | p` + by_cases hp_zero : p = 0 + · simp only [hp_zero, if_true] + · simp only [hp_zero, if_false] + have h_p_comp_zero: p.comp (X - C a) ≠ 0 := by + by_contra h_p_comp_zero_contra + simp only [comp_X_sub_C_eq_zero_iff] at h_p_comp_zero_contra + contradiction + -- ⊢ multiplicity (X - C x) (p.comp (X - C a)) = multiplicity (X - (C x - C a)) p + have res : multiplicity (X - (C x - C a)) p = multiplicity (X - C x) (p.comp (X - C a)):= by + convert (multiplicity_map_eq <| algEquivAevalXSubC a).symm using 2 + -- ⊢ X - C x = (algEquivAevalXSubC a) (X - (C x - C a)) + simp only [algEquivAevalXSubC, algEquivOfCompEqX_apply] + simp only [map_sub, aeval_X, aeval_C, algebraMap_eq] + simp only [sub_sub_sub_cancel_right, Polynomial.aeval_comp] + exact res.symm + +omit [Fintype L] in +-- The main helper lemma, now proven using the multiplicity lemma above. +lemma roots_comp_X_sub_C (p : L[X]) (a : L) : + (p.comp (X - C a)).roots = p.roots.map (fun r => r + a) := by + -- To prove two multisets are equal, we show that for any element `s`, + -- its count is the same in both sets. + ext s + rw [Polynomial.count_roots, rootMultiplicity_comp_X_sub_C] -- transform the LHS + -- ⊢ rootMultiplicity (s - a) p = Multiset.count s (p.roots.map (fun r ↦ r + a)) + rw [Multiset.count_map] + -- ⊢ rootMultiplicity (s - a) p = (Multiset.filter (fun a_1 ↦ s = a_1 + a) p.roots).card + -- Use `filter_congr` to rewrite the predicate inside the filter to isolate `r`. + rw [Multiset.filter_congr (p:=fun r => s = r + a) (q:=fun r => s - a = r) (by { + intro r hr_root + simp only + -- ⊢ s = r + a ↔ s - a = r + rw [add_comm] + have res := eq_sub_iff_add_eq (a:=r) (b:=s) (c:=a) + rw [eq_comm] at res + conv_rhs at res => rw [eq_comm, add_comm] + exact Iff.symm res + })] + -- ⊢ rootMultiplicity (s - a) p = (Multiset.filter (fun r ↦ s - a = r) p.roots).card + rw [←Multiset.countP_eq_card_filter] + -- ⊢ rootMultiplicity (s - a) p = Multiset.count (s - a) p.roots + rw [← Polynomial.count_roots, Multiset.count] + +-- The main helper lemma, now proven using the multiplicity lemma above. + +omit [DecidableEq L] [DecidableEq 𝔽q] in +lemma Prod_W_comp_X_sub_C_ne_zero (i : Nat) : + (univ : Finset 𝔽q).prod (fun c => (W L 𝔽q β i).comp (X - C (c • β i))) ≠ 0 := by + by_contra h_zero + rw [prod_eq_zero_iff] at h_zero + rcases h_zero with ⟨c, hc, h_zero⟩ + rw [Polynomial.comp_eq_zero_iff] at h_zero + cases h_zero with + | inl h1 => + exact (W_ne_zero L 𝔽q β i) h1 + | inr h1 => + simp only [coeff_sub, coeff_X_zero, coeff_C_zero, zero_sub, map_neg, sub_eq_neg_self, + X_ne_zero, and_false] at h1 + +omit [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- +The polynomial `Wᵢ(X)` has simple roots (multiplicity 1) for each element in the +subspace `Uᵢ`, and no other roots. +-/ +lemma rootMultiplicity_W (i : Nat) (a : L) : + rootMultiplicity a (W L 𝔽q β i) = if a ∈ (U L 𝔽q β i : Set L) then 1 else 0 := by + -- The multiplicity of root `a` is its count in the multiset of roots. + rw [←Polynomial.count_roots, roots_W] + -- The roots of `W` are the image of `Subtype.val` over the elements of the subspace `Uᵢ`. + -- So we need to count `a` in the multiset `map Subtype.val ...` + rw [Multiset.count_map] + -- ⊢ (Multiset.filter (fun a_1 ↦ a = ↑a_1) univ.val).card = if a ∈ ↑(U L 𝔽q β i) then 1 else 0 +-- The goal is now: + -- ⊢ (Multiset.filter (fun u ↦ a = u.val) ...).card = if a ∈ Uᵢ then 1 else 0 + + -- We prove this by cases, depending on whether `a` is in the subspace `Uᵢ`. + by_cases h_mem : a ∈ U L 𝔽q β i + + · -- Case 1: `a` is in the subspace `Uᵢ`. + -- The RHS of our goal becomes 1. + simp only [SetLike.mem_coe, h_mem, ↓reduceIte] + + -- We need to prove the cardinality of the filtered multiset is 1. + -- The filter keeps only those elements `u` from `Uᵢ` whose value is `a`. + -- Since `a ∈ Uᵢ`, we know there is at least one such `u`. + -- ⊢ (Multiset.filter (fun a_1 ↦ a = ↑a_1) univ.val).card = 1 + + -- Since `a ∈ Uᵢ`, there exists some `u : Uᵢ` such that `u.val = a` + have h_exists : ∃ u : U L 𝔽q β i, u.val = a := by + exact CanLift.prf a h_mem + rcases h_exists with ⟨u, rfl⟩ -- This gives us the `u` such that `u.val = a`. + + -- The filter now becomes: filter (fun u₁ => u.val = u₁.val) univ.val + -- This is equivalent to counting how many elements in univ have the same value as u + -- Since Subtype.val is injective, there can be at most one such element + -- And since u is in univ, there is exactly one such element + have h_filter_eq_singleton : Multiset.filter (fun u₁ => u.val = u₁.val) univ.val = {u} := by + -- Use count-based equality for multisets + ext v + -- ⊢ count v (filter (fun u₁ => u.val = u₁.val) univ.val) = count v {u} + rw [Multiset.count_filter, Multiset.count_singleton] + by_cases h_v_eq_u : v = u + · -- If v = u, then count should be 1 + rw [h_v_eq_u] + simp only [↓reduceIte, Multiset.count_univ] + · -- If v ≠ u, then count should be 0 + simp only [SetLike.coe_eq_coe, Multiset.count_univ] + -- ⊢ (if u = v then 1 else 0) = if v = u then 1 else 0 + simp only [h_v_eq_u, if_true, if_false] + simp only [ite_eq_right_iff, one_ne_zero, imp_false] + exact fun a ↦ h_v_eq_u (id (Eq.symm a)) + rw [h_filter_eq_singleton, Multiset.card_singleton] + · -- Case 2: `a` is not in the subspace `Uᵢ`. + -- The RHS of our goal becomes 0. + simp only [SetLike.mem_coe, h_mem, ↓reduceIte] + + -- Since `a ∈ Uᵢ`, there exists some `u : Uᵢ` such that `u.val = a` + have h_ne_exists_a : ¬∃ u : U L 𝔽q β i, u.val = a := by + by_contra h_u_val_eq_a -- h_u_val_eq_a : ∃ u, ↑u = a + rcases h_u_val_eq_a with ⟨u, rfl⟩ -- This gives us the `u` such that `u.val = a`. + exact h_mem u.property -- lift from `U L 𝔽q β i` to `L` to get a contradiction + have h_filter_eq_empty : + Multiset.filter (fun (u₁ : U L 𝔽q β i) => a = u₁.val) univ.val = 0 := by + -- Use count-based equality for multisets + ext v + -- ⊢ count v (filter (fun u₁ => a = u₁.val) univ.val) = count v 0 + rw [Multiset.count_filter, Multiset.count_zero] + simp only [SetLike.coe_eq_coe, Multiset.count_univ] + simp only [ite_eq_right_iff, one_ne_zero, imp_false] + by_contra h_v_eq_a + exact h_ne_exists_a ⟨v, h_v_eq_a.symm⟩ + rw [h_filter_eq_empty, Multiset.card_zero] + +omit [Fintype 𝔽q] [DecidableEq 𝔽q] in +lemma eval_W_eq_zero_iff_in_U (i : Nat) (a : L) : + (W L 𝔽q β i).eval a = 0 ↔ a ∈ U L 𝔽q β i := by + constructor + · -- Forward direction: Wᵢ(a) = 0 → a ∈ Uᵢ + intro h_eval_zero -- h_eval_zero : eval a (W L 𝔽q β i) = 0 + -- If Wᵢ(a) = 0, then a is a root of Wᵢ + have h_root_W : (W L 𝔽q β i).IsRoot a := by + rw [IsRoot.def] + exact h_eval_zero + -- theorem rootMultiplicity_pos {p : R[X]} (hp : p ≠ 0) {x : R} : + -- 0 < rootMultiplicity x p ↔ IsRoot p x := + have h_root_W_pos : 0 < rootMultiplicity a (W L 𝔽q β i) := by + simp [rootMultiplicity_pos] + constructor + · push_neg; exact W_ne_zero L 𝔽q β i + · exact h_root_W + rw [rootMultiplicity_W] at h_root_W_pos + by_cases h_a_in_U : a ∈ U L 𝔽q β i + · simp only [h_a_in_U, if_true] + · simp only [SetLike.mem_coe, h_a_in_U, ↓reduceIte, lt_self_iff_false] at h_root_W_pos + · -- Reverse direction: a ∈ Uᵢ → Wᵢ(a) = 0 + intro h_a_in_U + -- This is exactly what Wᵢ_vanishing proves + exact Wᵢ_vanishing L 𝔽q β i a h_a_in_U + +lemma rootMultiplicity_prod_W_comp_X_sub_C + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat) (a : L) : + rootMultiplicity a ((univ : Finset 𝔽q).prod (fun c => (W L 𝔽q β i).comp (X - C (c • β i)))) = + if a ∈ (U L 𝔽q β (i+1) : Set L) then 1 else 0 := by + rw [←Polynomial.count_roots] + set f := fun c: 𝔽q => (W L 𝔽q β i).comp (X - C (c • β i)) with hf + -- ⊢ Multiset.count a (univ.prod f).roots = if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 + have h_prod_ne_zero: univ.prod f ≠ 0 := Prod_W_comp_X_sub_C_ne_zero L 𝔽q β i + rw [roots_prod (f:=f) (s:=univ (α:=𝔽q)) h_prod_ne_zero] + set roots_f := fun c: 𝔽q => (f c).roots with hroots_f + rw [Multiset.count_bind] + -- ⊢ (Multiset.map (fun b ↦ Multiset.count a (roots_f b)) univ.val).sum + -- = if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 + have h_roots_f_eq_roots_W : ∀ b : 𝔽q, + roots_f b = (W L 𝔽q β i).roots.map (fun r => r + (b • β i)) := by + intro b + rw [hroots_f, hf] + exact roots_comp_X_sub_C (p:=(W L 𝔽q β i)) (a:=(b • β i)) + simp_rw [h_roots_f_eq_roots_W] + + set shift_up := fun x: 𝔽q => fun r: L => r + x • β i with hshift_up + have h_shift_up_all: ∀ x: 𝔽q, ∀ r: L, shift_up x r = r + x • β i := by + intro x r + rw [hshift_up] + simp only [sum_map_val, SetLike.mem_coe] + have h_a: ∀ x: 𝔽q, a = shift_up x (a - x • β i) := by + intro x + rw [hshift_up] + exact Lean.Grind.IntModule.sub_eq_iff.mp rfl + conv_lhs => + enter [2, x] -- focus on the inner Multiset.count + rw [h_a x] + enter [2] + enter [1] + enter [r] + rw [←h_shift_up_all x r] -- rewrite to another notation + -- ⊢ ∑ x, Multiset.count (shift_up x (a - x • β i)) (Multiset.map (shift_up x) (W L 𝔽q β i).roots) + -- = if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 + have h_shift_up_inj: ∀ x: 𝔽q, Function.Injective (shift_up x) := by + intro x + unfold shift_up + exact add_left_injective (x • β i) + have h_count_map: ∀ x: 𝔽q, + Multiset.count (shift_up x (a - x • β i)) (Multiset.map (shift_up x) (W L 𝔽q β i).roots) = + Multiset.count (a - x • β i) (W L 𝔽q β i).roots := by + -- transform to counting (a - x • β i) in the roots of Wᵢ + intro x + have h_shift_up_inj_x: Function.Injective (shift_up x) := h_shift_up_inj x + simp only [Multiset.count_map_eq_count' (hf := h_shift_up_inj_x), count_roots] + conv_lhs => + enter [2, x] + rw [h_count_map x] + -- ⊢ ∑ x, Multiset.count (a - x • β i) (W L 𝔽q β i).roots + -- = if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 + have h_root_lift_down := root_U_lift_down L 𝔽q β hβ_lin_indep i a + have h_root_lift_up := root_U_lift_up L 𝔽q β i a + conv_lhs => + enter [2, x] + simp only [count_roots] + rw [rootMultiplicity_W] + by_cases h_a_mem_U_i: a ∈ ↑(U L 𝔽q β (i + 1)) + · -- ⊢ (∑ x, if a - x • β i ∈ ↑(U L 𝔽q β i) then 1 else 0) + -- = if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 + have h_true: (a ∈ ↑(U L 𝔽q β (i + 1))) = True := by simp only [h_a_mem_U_i] + rcases h_root_lift_down h_a_mem_U_i with ⟨x0, hx0, hx0_unique⟩ + conv => + rhs + -- | if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 => reduce this to 1 + enter [1] + exact h_true -- maybe there can be a better way to do this + rw [ite_true] + -- ⊢ (∑ x, if a - x • β i ∈ ↑(U L 𝔽q β i) then 1 else 0) = 1 + have h_true: ∀ x: 𝔽q, + if x = x0 then a - x • β i ∈ ↑(U L 𝔽q β i) else a - x • β i ∉ ↑(U L 𝔽q β i) := by + intro x + by_cases h_x_eq_x0 : x = x0 + · rw [if_pos h_x_eq_x0] -- ⊢ a - x • β i ∈ U L 𝔽q β i + rw [←h_x_eq_x0] at hx0 + exact hx0 + · rw [if_neg h_x_eq_x0] -- ⊢ a - x • β i ∉ U L 𝔽q β i + by_contra h_mem + have h1 := hx0_unique x + simp only [h_mem, forall_const] at h1 + contradiction + + have h_true_x: ∀ x: 𝔽q, (a - x • β i ∈ ↑(U L 𝔽q β i)) = if x = x0 then True else False := by + intro x + by_cases h_x_eq_x0 : x = x0 + · rw [if_pos h_x_eq_x0] + rw [←h_x_eq_x0] at hx0 + simp only [hx0] + · rw [if_neg h_x_eq_x0] + by_contra h_mem + push_neg at h_mem + simp only [ne_eq, eq_iff_iff, iff_false, not_not] at h_mem + have h2 := hx0_unique x + simp only [h_mem, forall_const] at h2 + contradiction + conv => + lhs + enter [2, x] + simp only [SetLike.mem_coe, h_true_x x, if_false_right, and_true] + rw [sum_ite_eq'] + simp only [mem_univ, ↓reduceIte] + · -- ⊢ (∑ x, if a - x • β i ∈ ↑(U L 𝔽q β i) then 1 else 0) + -- = if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 + have h_false: (a ∈ ↑(U L 𝔽q β (i + 1))) = False := by simp only [h_a_mem_U_i] + conv => + rhs -- | if a ∈ ↑(U L 𝔽q β (i + 1)) then 1 else 0 => reduce this to 1 + enter [1] + exact h_false -- maybe there can be a better way to do this + rw [ite_false] + + have h_zero_x: ∀ x: 𝔽q, (a - x • β i ∈ ↑(U L 𝔽q β i)) = False := by + intro x + by_contra h_mem + simp only [eq_iff_iff, iff_false, not_not] at h_mem -- h_mem : a - x • β i ∈ U L 𝔽q β i + have h_a_mem_U_i := h_root_lift_up x h_mem + contradiction + + conv => + lhs + enter [2, x] + simp only [SetLike.mem_coe, h_zero_x x, if_false_right, and_true] + simp only [↓reduceIte, sum_const_zero] + +/-- +The generic product form of the recursion for `Wᵢ`. +This follows the first line of the proof for (i) in the description. +`Wᵢ(X) = ∏_{c ∈ 𝔽q} Wᵢ₋₁ ∘ (X - cβᵢ₋₁)`. +-/ +lemma W_recursive_decomposition (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat) (hi : i > 0) : + (W L 𝔽q β i) = ∏ c: 𝔽q, (W L 𝔽q β (i-1)).comp (X - C (c • β (i-1))) := by + -- ⊢ W 𝔽q β i = ∏ c, (W 𝔽q β (i - 1)).comp (X - C (c • β (i - 1))) + -- Define P and Q for clarity + set P := W L 𝔽q β i + set Q := ∏ c: 𝔽q, (W L 𝔽q β (i-1)).comp (X - C (c • β (i-1))) + +-- c : 𝔽q => univ +-- c ∈ finsetX + + -- STRATEGY: Prove P = Q by showing they are monic, split, and have the same roots. + + -- 1. Show P and Q are MONIC. + have hP_monic : P.Monic := W_monic (𝔽q:=𝔽q) (β:=β) (i:=i) + have hQ_monic : Q.Monic := by + apply Polynomial.monic_prod_of_monic; intro c _ + apply Monic.comp + · exact W_monic (𝔽q:=𝔽q) (β:=β) (i:=(i-1)) + · -- ⊢ (X - C (c • β (i - 1))).Monic + exact Polynomial.monic_X_sub_C (c • β (i - 1)) + · conv_lhs => rw [natDegree_sub_C, natDegree_X] + norm_num + -- 2. Show P and Q SPLIT over L. + have hP_splits : P.Splits (RingHom.id L) := W_splits L 𝔽q β i + have hQ_splits : Q.Splits (RingHom.id L) := by + apply Polynomial.splits_prod + intro c _ + -- Composition of a splitting polynomial with a linear polynomial also splits. + -- ⊢ Splits (RingHom.id L) ((W 𝔽q β (i - 1)).comp (X - C (c • β (i - 1)))) + apply Splits.comp_of_degree_le_one + · exact degree_X_sub_C_le (c • β (i - 1)) + · -- ⊢ Splits (RingHom.id L) (W 𝔽q β (i - 1)) + exact W_splits L 𝔽q β (i-1) + + -- 3. Show P and Q have the same ROOTS. + have h_roots_eq : P.roots = Q.roots := by + -- First, characterize the roots of P. They are the elements of Uᵢ. + unfold P Q + ext u + rw [Polynomial.count_roots, Polynomial.count_roots] + rw [rootMultiplicity_W] + conv_rhs => rw [rootMultiplicity_prod_W_comp_X_sub_C (hβ_lin_indep:=hβ_lin_indep)] + -- ⊢ (if u ∈ ↑(U L 𝔽q β i) then 1 else 0) = if u ∈ ↑(U L 𝔽q β (i - 1 + 1)) then 1 else 0 + have h_i: i - 1 + 1 = i := by omega + rw [h_i] + + -- 4. CONCLUSION: Since P and Q are monic, split, and have the same roots, they are equal. + have hP_eq_prod := Polynomial.eq_prod_roots_of_monic_of_splits_id hP_monic hP_splits + have hQ_eq_prod := Polynomial.eq_prod_roots_of_monic_of_splits_id hQ_monic hQ_splits + rw [hP_eq_prod, hQ_eq_prod, h_roots_eq] + +omit [Fintype L] [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +-- A helper lemma that IsLinearMap implies the composition property. +-- This follows from the fact that a polynomial whose evaluation map is linear +-- must be a "linearized polynomial" (or q-polynomial). +lemma comp_sub_C_of_linear_eval (p : L[X]) + (h_lin : IsLinearMap 𝔽q (f:=fun inner_p ↦ p.comp inner_p)) (a : L) : + p.comp (X - C a) = p - C (eval a p) := by -- linearity: p ∘ (X - a) = p(X) - p(a) + have h_comp_left: p.comp (X - C a) = p.comp X - p.comp (C a) := by + rw [sub_eq_add_neg] + have h_comp_add := h_lin.map_add (X: L[X]) (-C a) + rw [h_comp_add] + conv_rhs => rw [sub_eq_add_neg] + rw [add_right_inj (a:=p.comp X) (b:=p.comp (-C a)) (c:=-p.comp (C a))] + exact h_lin.map_neg (C a) + + rw [h_comp_left] + rw [comp_X] + rw [sub_right_inj] + exact comp_C + +lemma inductive_rec_form_W_comp (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat) + (h_prev_linear_map: IsLinearMap (R:=𝔽q) (M:=L[X]) (M₂:=L[X]) + (f:=fun inner_p ↦ (W L 𝔽q β i).comp inner_p)) + : ∀ p: L[X], (W L 𝔽q β (i + 1)).comp p = + ((W L 𝔽q β i).comp p) ^ Fintype.card 𝔽q - + C (eval (β i) (W L 𝔽q β i)) ^ (Fintype.card 𝔽q - 1) * ((W L 𝔽q β i).comp p) := by + intro p + set W_i := W L 𝔽q β i + set q := Fintype.card 𝔽q + set v := W_i.eval (β i) + + -- First, we must prove that v is non-zero to use its inverse. + have hv_ne_zero : v ≠ 0 := by + unfold v W_i + exact Wᵢ_eval_βᵢ_neq_zero L 𝔽q β hβ_lin_indep i + + -- Proof flow: + -- `Wᵢ₊₁(X) = ∏_{c ∈ 𝔽q} (Wᵢ ∘ (X - c • βᵢ))` -- from W_recursive_decomposition + -- `= ∏_{c ∈ 𝔽q} (Wᵢ(X) - c • Wᵢ(βᵢ))` -- linearity of Wᵢ + -- `= ∏_{c ∈ 𝔽q} (Wᵢ(X) - c • v)` + -- `= v² ∏_{c ∈ 𝔽q} (v⁻¹ • Wᵢ(X) - c)` + -- `= v² (v⁻² • Wᵢ(X)² - v⁻¹ • Wᵢ(X))` => FLT (prod_X_sub_C_eq_X_pow_card_sub_X_in_L) + -- `= Wᵢ(X)² - v • Wᵢ(X)` => Q.E.D + + have h_scalar_smul_eq_C_v_mul: ∀ s: L, ∀ p: L[X], s • p = C s * p := by + intro s p + exact smul_eq_C_mul s + have h_v_smul_v_inv_eq_one: v • v⁻¹ = 1 := by + simp only [smul_eq_mul] + exact CommGroupWithZero.mul_inv_cancel v hv_ne_zero + have h_v_mul_v_inv_eq_one: v * v⁻¹ = 1 := by + exact h_v_smul_v_inv_eq_one + -- The main proof using a chain of equalities (the `calc` block). + calc + (W L 𝔽q β (i + 1)).comp p + _ = (∏ c: 𝔽q, (W_i).comp (X - C (c • β i))).comp p := by + have h_res := W_recursive_decomposition L 𝔽q β hβ_lin_indep (i+1) (by omega) + rw [h_res] + simp only [add_tsub_cancel_right] + rfl + -- Step 2: Apply the linearity property of Wᵢ as a polynomial. + _ = (∏ c: 𝔽q, (W_i - C (W_i.eval (c • β i)))).comp p := by + congr + funext c + -- We apply the transformation inside the product for each element `c`. + -- apply Finset.prod_congr rfl + -- ⊢ W_i.comp (X - C (c • β i)) = W_i - C (eval (c • β i) W_i) + exact comp_sub_C_of_linear_eval (p:=W_i) (h_lin:=h_prev_linear_map) (a:=(c • β i)) + -- Step 3: Apply the linearity of Wᵢ's *evaluation map* to the constant term. + -- Hypothesis: `h_prev_linear_map.map_smul` + _ = (∏ c: 𝔽q, (W_i - C (c • v))).comp p := by + congr + funext c + -- ⊢ W_i - C (eval (c • β i) W_i) = W_i - C (c • v) + congr + -- ⊢ eval (c • β i) W_i = c • v + -- Use the linearity of the evaluation map, not the composition map + have h_eval_linear := AdditiveNTT.linear_map_of_comp_to_linear_map_of_eval (f:=(W L 𝔽q β i)) + (h_f_linear:=h_prev_linear_map) + exact h_eval_linear.map_smul c (β i) + -- Step 4: Perform the final algebraic transformation. + _ = (C (v^q) * (∏ c: 𝔽q, (C (v⁻¹) * W_i - C (algebraMap 𝔽q L c)))).comp p := by + congr + calc + _ = ∏ c: 𝔽q, (v • (v⁻¹ • W_i - C (algebraMap 𝔽q L c))) := by + apply Finset.prod_congr rfl + intro c _ + rw [smul_sub] + -- ⊢ W_i - C (c • v) = v • v⁻¹ • W_i - v • C ((algebraMap 𝔽q L) c) + rw [smul_C, smul_eq_mul, map_mul] + rw [←smul_assoc] + rw [h_v_smul_v_inv_eq_one] + rw [one_smul] + rw [sub_right_inj] + -- ⊢ C (c • v) = C v * C ((algebraMap 𝔽q L) c) + rw [←C_mul] + -- ⊢ C (c • v) = C (v * (algebraMap 𝔽q L) c) + have h_c_smul_v: c • v = (algebraMap 𝔽q L c) • v := by + exact algebra_compatible_smul L c v + rw [h_c_smul_v] + rw [mul_comm] + rw [smul_eq_mul] + _ = ∏ c: 𝔽q, (C v * (v⁻¹ • W_i - C (algebraMap 𝔽q L c))) := by + apply Finset.prod_congr rfl + intro c _ + rw [h_scalar_smul_eq_C_v_mul] + _ = C (v^q) * (∏ c: 𝔽q, (C v⁻¹ * W_i - C (algebraMap 𝔽q L c))) := by + -- rw [Finset.prod_mul_distrib] + -- rw [Finset.prod_const, Finset.card_univ] + rw [Finset.prod_mul_distrib] + conv_lhs => + enter [2] + enter [2] + rw [h_scalar_smul_eq_C_v_mul] + congr + -- ⊢ ∏ (x: 𝔽q), C v = C (v ^ q) + rw [Finset.prod_const, Finset.card_univ] + unfold q + exact Eq.symm C_pow + _ = (C (v^q) * ((C v⁻¹ * W_i)^q - (C v⁻¹ * W_i))).comp p := by + congr + -- ⊢ ∏ c, (C v⁻¹ * W_i - C ((algebraMap 𝔽q L) c)) = (C v⁻¹ * W_i) ^ q - C v⁻¹ * W_i + rw [AdditiveNTT.prod_poly_sub_C_eq_poly_pow_card_sub_poly_in_L + h_Fq_card_gt_1 (p:=C v⁻¹ * W_i)] + _ = (C (v^q) * C (v⁻¹^q) * W_i^q - C (v^q) * C v⁻¹ * W_i).comp p := by + congr + rw [mul_sub] + conv_lhs => + rw [mul_pow, ←mul_assoc, ←mul_assoc, ←C_pow] + _ = (W_i^q - C (v^(q-1)) * W_i).comp p := by + congr + rw [←C_mul, ←mul_pow, h_v_mul_v_inv_eq_one, one_pow, C_1, one_mul] + rw [←C_mul] + have h_v_pow_q_minus_1: v^q * v⁻¹ = v^(q-1) := by + rw [pow_sub₀ (a:=v) (m:=q) (n:=1) (ha:=hv_ne_zero) (h:=by omega)] + -- ⊢ v ^ q * v⁻¹ = v ^ q * (v ^ 1)⁻¹ + congr + norm_num + rw [h_v_pow_q_minus_1] + _ = (W_i^q - C (eval (β i) W_i) ^ (q - 1) * W_i).comp p := by + simp only [map_pow, add_tsub_cancel_right, W_i, q, v] + _ = (W_i^q).comp p - (C (eval (β i) W_i) ^ (q - 1) * W_i).comp p := by + rw [sub_comp] + _ = (W_i.comp p)^q - (C (eval (β i) W_i) ^ (q - 1)) * (W_i.comp p) := by + rw [pow_comp, mul_comp] + conv_lhs => + rw [pow_comp] + rw [C_comp (a:=(eval (β i) W_i)) (p:=p)] + +lemma inductive_linear_map_W (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (h_Fq_card_prime: Fact (Nat.Prime (ringChar 𝔽q))) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat) + (h_prev_linear_map: IsLinearMap 𝔽q (f:=fun inner_p ↦ (W L 𝔽q β i).comp inner_p)) + : IsLinearMap 𝔽q (f:=fun inner_p ↦ (W L 𝔽q β (i + 1)).comp inner_p) := by + + have h_rec_form := inductive_rec_form_W_comp (h_Fq_card_gt_1:=h_Fq_card_gt_1) + (hβ_lin_indep:=hβ_lin_indep) (h_prev_linear_map:=h_prev_linear_map) (i:=i) + + set q := Fintype.card 𝔽q + set v := (W L 𝔽q β i).eval (β i) + + -- `∀ f(X), f(X) ∈ L[X]`: + constructor + · intro f g + -- 1. Proof flow + -- `Wᵢ₊₁(f(X)+g(X)) = Wᵢ(f(X)+g(X))² - v • Wᵢ(f(X)+g(X))` -- h_rec_form + -- `= (Wᵢ(f(X)) + Wᵢ(g(X)))² - v • (Wᵢ(f(X)) + Wᵢ(g(X)))` + -- `= (Wᵢ(f(X))² + (Wᵢ(g(X)))² - v • Wᵢ(f(X)) - v • Wᵢ(g(X)))` => Freshman's Dream + -- `= (Wᵢ(f(X))² - v • Wᵢ(f(X))) + (Wᵢ(g(X))² - v • Wᵢ(g(X)))` -- h_rec_form + -- `= Wᵢ₊₁(f(X)) + Wᵢ₊₁(g(X))` -- Q.E.D. + + -- ⊢ (W L 𝔽q β (i + 1)).comp (x + y) = (W L 𝔽q β (i + 1)).comp x + (W L 𝔽q β (i + 1)).comp y + calc + _ = ((W L 𝔽q β i).comp (f + g))^q - C v ^ (q - 1) * ((W L 𝔽q β i).comp (f + g)) := by + rw [h_rec_form] + _ = ((W L 𝔽q β i).comp f)^q + ((W L 𝔽q β i).comp g)^q + - C v ^ (q - 1) * ((W L 𝔽q β i).comp f) - C v ^ (q - 1) * ((W L 𝔽q β i).comp g) := by + rw [h_prev_linear_map.map_add] + rw [AdditiveNTT.frobenius_identity_in_algebra (h_Fq_char_prime:=h_Fq_card_prime)] + rw [left_distrib] + unfold q + abel_nf + _ = (((W L 𝔽q β i).comp f)^q - C v ^ (q - 1) * ((W L 𝔽q β i).comp f)) + + (((W L 𝔽q β i).comp g)^q - C v ^ (q - 1) * ((W L 𝔽q β i).comp g)) := by + abel_nf + _ = (W L 𝔽q β (i+1)).comp f + (W L 𝔽q β (i+1)).comp g := by + unfold q + rw [h_rec_form f] + rw [h_rec_form g] + · intro c f + -- 2. Proof flow + -- `Wᵢ₊₁(c • f(X)) = Wᵢ(c • f(X))² - v • Wᵢ(c • f(X))` -- h_rec_form + -- `= c² • Wᵢ(f(X))² - v • c • Wᵢ(f(X))` + -- `= c • Wᵢ(f(X))² - v • c • Wᵢ(f(X))` via Fermat's Little Theorem (X^q = X) + -- `= c • (Wᵢ(f(X))² - v • Wᵢ(f(X)))` -- h_rec_form + -- `= c • Wᵢ₊₁(f(X))` -- Q.E.D. + have h_c_smul_to_algebraMap_smul: ∀ t: L[X], c • t = (algebraMap 𝔽q L c) • t := by + exact algebra_compatible_smul L c + have h_c_smul_to_C_algebraMap_mul: ∀ t: L[X], c • t = C (algebraMap 𝔽q L c) * t := by + intro t + rw [h_c_smul_to_algebraMap_smul] + exact smul_eq_C_mul ((algebraMap 𝔽q L) c) + -- ⊢ (W L 𝔽q β (i + 1)).comp (c • x) = c • (W L 𝔽q β (i + 1)).comp x + calc + _ = ((W L 𝔽q β i).comp (c • f))^q - C v ^ (q - 1) * ((W L 𝔽q β i).comp (c • f)) := by + rw [h_rec_form (c • f)] + _ = (C (algebraMap 𝔽q L c) * (W L 𝔽q β i).comp f)^q + - C v ^ (q - 1) * (C (algebraMap 𝔽q L c) * (W L 𝔽q β i).comp f) := by + rw [h_prev_linear_map.map_smul] + rw [mul_pow] + simp_rw [h_c_smul_to_C_algebraMap_mul] + congr + rw [mul_pow] + _ = C (algebraMap 𝔽q L (c^q)) * ((W L 𝔽q β i).comp f)^q + - C v ^ (q - 1) * (C (algebraMap 𝔽q L c) * (W L 𝔽q β i).comp f) := by + rw [mul_pow] + congr -- ⊢ C ((algebraMap 𝔽q L) c) ^ q = C ((algebraMap 𝔽q L) (c ^ q)) + rw [←C_pow] + simp_rw [algebraMap.coe_pow c q] + _ = C (algebraMap 𝔽q L (c^q)) * ((W L 𝔽q β i).comp f)^q + - C v ^ (q - 1) * (C (algebraMap 𝔽q L c) * (W L 𝔽q β i).comp f) := by + -- use Fermat's Little Theorem (X^q = X) + simp only [map_pow] + _ = C (algebraMap 𝔽q L (c)) * ((W L 𝔽q β i).comp f)^q + - C v ^ (q - 1) * (C (algebraMap 𝔽q L c) * (W L 𝔽q β i).comp f) := by + rw [FiniteField.pow_card] + _ = C (algebraMap 𝔽q L c) * (((W L 𝔽q β i).comp f)^q + - C v ^ (q - 1) * (W L 𝔽q β i).comp f) := by + rw [←mul_assoc] + conv_lhs => rw [mul_comm (a:=C v ^ (q - 1)) (b:=C (algebraMap 𝔽q L c))]; rw [mul_assoc] + exact + Eq.symm + (mul_sub_left_distrib (C ((algebraMap 𝔽q L) c)) ((W L 𝔽q β i).comp f ^ q) + (C v ^ (q - 1) * (W L 𝔽q β i).comp f)) + _ = C (algebraMap 𝔽q L c) * (W L 𝔽q β (i + 1)).comp f := by + rw [h_rec_form f] + _ = _ := by + rw [h_c_smul_to_C_algebraMap_mul] + +/-- +**Simultaneous Proof of Linearity for `Wᵢ`** from the paper [GG96] (Lemma 2.3) +`Wᵢ` is an 𝔽q-linearized polynomial. This means for all polynomials `f, g` with coefficients + in `L` (i.e. `L[X]`) and for all `c ∈ 𝔽q`, we have: `Wᵢ(f + g) = Wᵢ(f) + Wᵢ(g)` and + `Wᵢ(c * f) = c * Wᵢ(f)`. As a corollary of this, `Wᵢ` is 𝔽q-linear when evaluated on elements + of `L`: `Wᵢ(x + y) = Wᵢ(x) + Wᵢ(y)` for all `x, y ∈ L`. +-/ +theorem W_linearity + (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (h_Fq_card_prime: Fact (Nat.Prime (ringChar 𝔽q))) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat) : IsLinearMap 𝔽q (f:=fun inner_p ↦ (W L 𝔽q β i).comp inner_p) := by + induction i with + | zero => + -- Base Case: i = 0 => Prove W₀ is linear. + unfold W + have h_U0 : (univ : Finset (U L 𝔽q β 0)) = {0} := by + ext u -- u : ↥(U L 𝔽q β 0) + simp only [mem_univ, true_iff, mem_singleton] + -- ⊢ u = 0 + by_contra h + have h_u := u.property + -- only U and Submodule.span_empty is enough for simp + simp only [U, lt_self_iff_false, not_false_eq_true, Set.Ico_eq_empty, Set.image_empty, + Submodule.span_empty, Submodule.mem_bot, ZeroMemClass.coe_eq_zero] at h_u + contradiction + + rw [h_U0, prod_singleton, Submodule.coe_zero, C_0, sub_zero] + -- ⊢ IsLinearMap 𝔽q fun x ↦ eval x X + exact { -- can also use `refine` with exact same syntax + map_add := fun x y => by + rw [X_comp, X_comp, X_comp] + map_smul := fun c x => by + rw [X_comp, X_comp] + } + | succ i ih => + -- Inductive Step: Assume properties hold for `i`, prove for `i+1`. + let q := Fintype.card 𝔽q + + have h_linear_map: (IsLinearMap 𝔽q (f:=fun inner_p ↦ (W L 𝔽q β (i + 1)).comp inner_p)) := by + exact inductive_linear_map_W L 𝔽q β h_Fq_card_gt_1 h_Fq_card_prime hβ_lin_indep ih (i:=i) + + exact h_linear_map + +/-- The additive property of `Wᵢ`: `Wᵢ(x + y) = Wᵢ(x) + Wᵢ(y)`. -/ +lemma W_additive + (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (h_Fq_card_prime: Fact (Nat.Prime (ringChar 𝔽q))) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat) : + IsLinearMap (R:=𝔽q) (M:=L) (M₂:=L) (f:=fun x ↦ (W L 𝔽q β i).eval x) := by + exact AdditiveNTT.linear_map_of_comp_to_linear_map_of_eval (f:=(W L 𝔽q β i)) + (h_f_linear:=W_linearity L 𝔽q β h_Fq_card_gt_1 h_Fq_card_prime hβ_lin_indep (i:=i)) + +/-- For all `y ∈ Uᵢ`, `Wᵢ(x + y) = Wᵢ(x)`. -/ +lemma W_add_U_invariant + (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (h_Fq_card_prime: Fact (Nat.Prime (ringChar 𝔽q))) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) (i : Nat) : + ∀ x : L, ∀ y ∈ U L 𝔽q β i, (W L 𝔽q β i).eval (x + y) = (W L 𝔽q β i).eval x := by + intro x y hy + rw [W_additive L 𝔽q β h_Fq_card_gt_1 h_Fq_card_prime hβ_lin_indep (i:=i).map_add] + rw [Wᵢ_vanishing L 𝔽q β i y hy, add_zero] + +/-! # Normalized Subspace Vanishing Polynomials `Ŵᵢ(X) := Wᵢ(X) / Wᵢ(βᵢ)` -/ +noncomputable def normalizedW (i : Nat) : L[X] := + C (1 / (W L 𝔽q β i).eval (β i)) * W L 𝔽q β i + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The evaluation of the normalized polynomial `Ŵᵢ(X)` at `βᵢ` is 1. -/ +lemma normalizedWᵢ_eval_βᵢ {i : Nat} + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)): + (normalizedW (𝔽q:=𝔽q) (β:=β) (i:=i)).eval (β i) = 1 := by + rw [normalizedW, eval_mul, eval_C] + -- This simplifies to `(1 / y) * y`, which is `1`. + simp only [one_div] + set u: L := eval (β i) (W (𝔽q:=𝔽q) (β:=β) (i:=i)) + rw [←mul_comm] + -- ⊢ u * u⁻¹ = 1 + refine CommGroupWithZero.mul_inv_cancel u ?_ + -- ⊢ u ≠ 0 + exact Wᵢ_eval_βᵢ_neq_zero (𝔽q:=𝔽q) (β:=β) (i:=i) (hβ_lin_indep:=hβ_lin_indep) + +omit [DecidableEq 𝔽q] in +/-- The degree of `Ŵᵢ(X)` remains `|𝔽q|ⁱ`. -/ +lemma degree_normalizedW + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat): + (normalizedW L 𝔽q β i).degree = (Fintype.card 𝔽q)^i := by + -- Multiplication by a non-zero constant does not change the degree of a polynomial. + let c := (1 / (W L 𝔽q β i).eval (β i)) + have c_eq: c = (eval (β i) (W L 𝔽q β i))⁻¹ := by + rw [←one_div] + have hc : c ≠ 0 := by + have eval_ne_0 := Wᵢ_eval_βᵢ_neq_zero (𝔽q:=𝔽q) (β:=β) (i:=i) (hβ_lin_indep:=hβ_lin_indep) + have inv_ne_0 := inv_ne_zero eval_ne_0 + rw [←c_eq] at inv_ne_0 + exact inv_ne_0 + rw [normalizedW, degree_C_mul hc] + exact degree_W (𝔽q:=𝔽q) (β:=β) (i:=i) (hβ_lin_indep:=hβ_lin_indep) + +omit [DecidableEq L] [Fintype 𝔽q] [DecidableEq 𝔽q] in +/-- The normalized polynomial `Ŵᵢ(X)` vanishes on `Uᵢ`. -/ +lemma normalizedWᵢ_vanishing (i : Nat) : + ∀ u ∈ U L 𝔽q β i, (normalizedW L 𝔽q β i).eval u = 0 := by + -- The roots of `Ŵᵢ(X)` are precisely the elements of `Uᵢ`. + -- `Ŵᵢ` is just a constant multiple of `Wᵢ`, so they share the same roots. + intro u hu + rw [normalizedW, eval_mul, eval_C, Wᵢ_vanishing L 𝔽q β i u hu, mul_zero] + +/-- The normalized subspace vanishing polynomial `Ŵᵢ(X)` is `𝔽q`-linear. -/ +theorem normalizedW_is_linear_map + (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (h_Fq_card_prime: Fact (Nat.Prime (ringChar 𝔽q))) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat): + IsLinearMap 𝔽q (f:=fun inner_p ↦ (normalizedW L 𝔽q β i).comp inner_p) := by + let c := 1 / (W L 𝔽q β i).eval (β i) + have hW_lin : IsLinearMap 𝔽q (f:=fun inner_p ↦ (W L 𝔽q β i).comp inner_p) := + W_linearity L 𝔽q β h_Fq_card_gt_1 h_Fq_card_prime hβ_lin_indep (i:=i) + have h_comp_add := hW_lin.map_add + have h_comp_smul := hW_lin.map_smul + -- ⊢ IsLinearMap 𝔽q fun inner_p ↦ (normalizedW L 𝔽q β i).comp inner_p + -- We are given that the composition map for W_i is 𝔽q-linear. + have h_comp_add := hW_lin.map_add + have h_comp_smul := hW_lin.map_smul + + -- A crucial helper lemma is understanding how composition distributes over + -- multiplication by a constant polynomial. (p * C c).comp(q) = p.comp(q) * (C c).comp(q) + -- Since (C c).comp(q) is just C c, this simplifies nicely. + have comp_C_mul (f g : L[X]) : (C c * f).comp g = C c * f.comp g := by + simp only [Polynomial.comp] -- comp to eval₂ + simp only [eval₂_mul, eval₂_C] + + -- To prove `IsLinearMap`, we must prove two properties: `map_add` and `map_smul`. + -- We construct the IsLinearMap structure directly. + refine { + map_add := by { + intro p q + -- Unfold the definition of normalizedW to show the structure C c * W_i + dsimp only [normalizedW] + -- Apply our helper lemma to the LHS and both terms on the RHS + rw [comp_C_mul, comp_C_mul, comp_C_mul] + -- Now use the given linearity of W_i's composition map + rw [h_comp_add] + -- The rest is just distribution of multiplication over addition + rw [mul_add] + }, + map_smul := by { + intro k p + -- Unfold the definition + dsimp only [normalizedW] + -- Apply our helper lemma on both sides + rw [comp_C_mul, comp_C_mul] + -- Use the given smul-linearity of W_i's composition map + rw [h_comp_smul] + -- The rest is showing that scalar multiplication by `k` and polynomial + -- multiplication by `C c` commute, which follows from ring axioms. + -- `C c * (k • W_i.comp p)` should equal `k • (C c * W_i.comp p)`. + -- ⊢ C c * k • (W L 𝔽q β i).comp p = k • (C c * (W L 𝔽q β i).comp p) + rw [Algebra.smul_def, Algebra.smul_def] + -- ⊢ C c * ((algebraMap 𝔽q L[X]) k * (W L 𝔽q β i).comp p) + -- = (algebraMap 𝔽q L[X]) k * (C c * (W L 𝔽q β i).comp p) + -- The `algebraMap` converts the scalar k from 𝔽q into a constant polynomial. + rw [Algebra.algebraMap_eq_smul_one] + -- ⊢ C c * (k • 1 * (W L 𝔽q β i).comp p) = k • 1 * (C c * (W L 𝔽q β i).comp p) + ac_rfl + } + } + +theorem normalizedW_is_additive + (h_Fq_card_gt_1: Fintype.card 𝔽q > 1) + (h_Fq_card_prime: Fact (Nat.Prime (ringChar 𝔽q))) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (i : Nat): + IsLinearMap 𝔽q (f:=fun x ↦ (normalizedW L 𝔽q β i).eval x) := by + exact AdditiveNTT.linear_map_of_comp_to_linear_map_of_eval (f:=(normalizedW L 𝔽q β i)) + (h_f_linear:=normalizedW_is_linear_map L 𝔽q β h_Fq_card_gt_1 + h_Fq_card_prime hβ_lin_indep (i:=i)) + +end LinearityOfSubspaceVanishingPolynomials + +section NovelPolynomialBasisProof +/-- The Novel Polynomial Basis {`Xⱼ(X)`, j ∈ Fin 2^ℓ} for the space `L⦃<2^ℓ⦄[X]` over `L` -/ +-- Definition of Novel Polynomial Basis: `Xⱼ(X) := Π_{i=0}^{ℓ-1} (Ŵᵢ(X))^{jᵢ}` +noncomputable def Xⱼ (ℓ : Nat) (j : Nat) : L[X] := + (range ℓ).prod (fun i => (normalizedW L 𝔽q β i)^(bit (k:=i) (n:=j))) + +omit [DecidableEq 𝔽q] in +/-- The degree of `Xⱼ(X)` is `j`: + `deg(Xⱼ(X)) = Σ_{i=0}^{ℓ-1} jᵢ * deg(Ŵᵢ(X)) = Σ_{i=0}^{ℓ-1} jᵢ * 2ⁱ = j` -/ +lemma degree_Xⱼ + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (hF₂ : Fintype.card 𝔽q = 2) + (ℓ : Nat) (j : Nat) (h_j : j < 2^ℓ) : + (Xⱼ L 𝔽q β ℓ j).degree = j := by + rw [Xⱼ, degree_prod] + -- ⊢ ∑ i ∈ range ℓ, (normalizedW β i ^ bit i j).degree = ↑j + by_cases h_ℓ: ℓ = 0 + · simp only [h_ℓ, zero_add, pow_one, tsub_self, Icc_self, sum_singleton, + pow_zero, mul_one]; + rw [range_zero, sum_empty] + rw [h_ℓ, pow_zero] at h_j + interval_cases j + · rfl + · push_neg at h_ℓ + have deg_each: ∀ i ∈ range ℓ, + (normalizedW (𝔽q:=𝔽q) (β:=β) (i:=i) ^ bit (k:=i) (n:=j)).degree + = if bit (k:=i) (n:=j) = 1 then 2^i else 0 := by + intro i _ + rw [degree_pow] + rw [degree_normalizedW (𝔽q:=𝔽q) (β:=β) (i:=i) (hβ_lin_indep:=hβ_lin_indep)] + simp only [bit] + simp only [Nat.and_one_is_mod, nsmul_eq_mul, Nat.pow_one] + -- ⊢ ↑(j >>> i % 2) * 2 ^ i = if j >>> i % 2 = 1 then 2 ^ i else 0 + by_cases h: (j >>> i) % 2 = 1 + · simp only [h, if_true]; rw [hF₂]; simp only [Nat.cast_one, Nat.cast_ofNat, one_mul] + · simp only [h, if_false]; + have h_0: (j >>> i) % 2 = 0 := by + exact Nat.mod_two_ne_one.mp h + rw [h_0] + exact mul_eq_zero_comm.mp rfl + -- We use the `Nat.digits` API for this. + rw [sum_congr rfl deg_each] + -- ⊢ (∑ x ∈ range ℓ, if bit x j = 1 then 2 ^ x else 0) = ↑j + have h_range: range ℓ = Icc 0 (ℓ-1) := by + rw [←Nat.range_succ_eq_Icc_zero (n:=ℓ - 1)] + congr + rw [Nat.sub_add_cancel] + omega + rw [h_range] + -- ⊢ (∑ x ∈ Icc 0 (ℓ - 1), if bit x j = 1 then 2 ^ x else 0) = ↑j => in Withbot ℕ + have h_sum: (∑ x ∈ Icc 0 (ℓ - 1), if bit x j = 1 then 2 ^ x else 0) + = (∑ x ∈ Icc 0 (ℓ - 1), (bit x j) * 2^x) := by + apply sum_congr rfl (fun x hx => by + have h_res: (if bit x j = 1 then 2 ^ x else 0) = (bit x j) * 2^x := by + by_cases h: bit x j = 1 + · simp only [h, if_true]; norm_num + · simp only [h, if_false]; push_neg at h; + have h_bit_x_j_eq_0: bit x j = 0 := by + have h_either_eq := bit_eq_zero_or_one (k:=x) (n:=j) + simp only [h, or_false] at h_either_eq + exact h_either_eq + rw [h_bit_x_j_eq_0, zero_mul] + exact h_res + ) + norm_cast -- convert the goal back to ℕ + rw [h_sum] + have h_bit_repr_j := bit_repr (ℓ:=ℓ) (h_ℓ:=by omega) (j:=j) (h_j) + rw [←h_bit_repr_j] + +/-- The basis vectors `{Xⱼ(X), j ∈ Fin 2^ℓ}` forms a basis for `L⦃<2^ℓ⦄[X]` -/ +noncomputable def basis_vectors (hF₂ : Fintype.card 𝔽q = 2) (ℓ : Nat): + Fin (2 ^ ℓ) → L⦃<2^ℓ⦄[X] := + fun ⟨j, hj⟩ => ⟨Xⱼ (𝔽q:=𝔽q) (β:=β) (ℓ:=ℓ) (j:=j), by + -- proof of coercion of `Xⱼ(X)` to `L⦃<2^ℓ⦄[X]`, i.e. `degree < 2^ℓ` + apply Polynomial.mem_degreeLT.mpr + rw [degree_Xⱼ (𝔽q:=𝔽q) (β:=β) (ℓ:=ℓ) (j:=j) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep)] + norm_cast + exact hj + ⟩ + +/-- The vector space of coefficients for polynomials of degree < 2^ℓ. -/ +abbrev CoeffVecSpace (L : Type u) (ℓ : Nat) := Fin (2^ℓ) → L + +noncomputable instance (ℓ : Nat) : AddCommGroup (CoeffVecSpace L ℓ) := by + unfold CoeffVecSpace + infer_instance -- default additive group for `Fin (2^ℓ) → L` + +noncomputable instance finiteDimensionalCoeffVecSpace (ℓ : ℕ) : + FiniteDimensional (K:=L) (V:=CoeffVecSpace (L:=L) ℓ) := by + unfold CoeffVecSpace + exact inferInstance + +/-- The linear map from polynomials (in the subtype) to their coefficient vectors. -/ +def to_coeffs_vec (ℓ : Nat) : L⦃<2^ℓ⦄[X] →ₗ[L] CoeffVecSpace L ℓ where + toFun := fun p => fun i => p.val.coeff i.val + map_add' := fun p q => by ext i; simp [coeff_add] + map_smul' := fun c p => by ext i; simp [coeff_smul, smul_eq_mul] + +/-- The rows of a square lower-triangular matrix with +non-zero diagonal entries are linearly independent. -/ +lemma linearIndependent_rows_of_lower_triangular_ne_zero_diag + {n : ℕ} {R : Type*} [Field R] (A : Matrix (Fin n) (Fin n) R) + (h_lower_triangular: A.BlockTriangular ⇑OrderDual.toDual) (h_diag: ∀ i, A i i ≠ 0) : + LinearIndependent R A := by -- This follows from the fact that such a matrix is invertible + -- because its determinant is non-zero. + have h_det : A.det ≠ 0 := by + rw [Matrix.det_of_lowerTriangular A h_lower_triangular] + apply prod_ne_zero_iff.mpr + intro i _; exact h_diag i + exact Matrix.linearIndependent_rows_of_det_ne_zero (A:=A) h_det + +omit [DecidableEq 𝔽q] in +/-- +The coefficient vectors of the novel basis polynomials are linearly independent. +This is proven by showing that the change-of-basis matrix to the monomial basis +is lower-triangular with a non-zero diagonal. +-/ +lemma coeff_vectors_linear_independent (hF₂ : Fintype.card 𝔽q = 2) + (hβ_lin_indep : LinearIndependent (R:=𝔽q) (M:=L) (v:=β)) + (ℓ : Nat) : + LinearIndependent L (to_coeffs_vec (L:=L) (ℓ:=ℓ) ∘ + (basis_vectors (𝔽q:=𝔽q) (L:=L) (β:=β) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ))) := by + -- Let `A` be the `2^ℓ x 2^ℓ` change-of-basis matrix. + -- The `i`-th row of `A` is the coefficient vector of `Xᵢ` in the novel basis. + let A : Matrix (Fin (2^ℓ)) (Fin (2^ℓ)) L := + fun j i => (to_coeffs_vec (L:=L) (ℓ:=ℓ) ( + basis_vectors (𝔽q:=𝔽q) (L:=L) (β:=β) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ) j)) i + -- Apply the lemma about triangular matrices. + apply linearIndependent_rows_of_lower_triangular_ne_zero_diag A + · -- ⊢ A.BlockTriangular ⇑OrderDual.toDual => Prove the matrix A is lower-triangular. + intro i j hij + dsimp only [to_coeffs_vec, basis_vectors, LinearMap.coe_mk, AddHom.coe_mk, A] + -- ⊢ (Xⱼ β ℓ ↑i).coeff ↑j = 0 + have deg_X : (Xⱼ L 𝔽q β ℓ i).degree = i := + degree_Xⱼ L 𝔽q β hβ_lin_indep hF₂ ℓ i i.isLt + have h_i_lt_j : i < j := by + simp only [OrderDual.toDual_lt_toDual, A] at hij + exact hij + have h_res: (Xⱼ L 𝔽q β ℓ i).coeff j = 0 := by + apply coeff_eq_zero_of_natDegree_lt -- we don't use coeff_eq_zero_of_degree_lt + -- because p.natDegree returns a value of type ℕ instead of WithBot ℕ as in p.degree + rw [natDegree_eq_of_degree_eq_some (degree_Xⱼ L 𝔽q β hβ_lin_indep hF₂ ℓ i i.isLt)] + norm_cast -- auto resolve via h_i_lt_j + exact h_res + · -- ⊢ ∀ (i : Fin (2 ^ ℓ)), A i i ≠ 0 => All diagonal entries are non-zero. + intro i + dsimp [A, to_coeffs_vec, basis_vectors] + -- `A i i` is the `i`-th (also the leading) coefficient of `Xⱼ`, which is non-zero. + have h_deg : (Xⱼ L 𝔽q β ℓ i).degree = i := degree_Xⱼ L 𝔽q β hβ_lin_indep hF₂ ℓ i i.isLt + have h_natDegree : (Xⱼ L 𝔽q β ℓ i).natDegree = i := natDegree_eq_of_degree_eq_some h_deg + have deg_X : (Xⱼ L 𝔽q β ℓ i).degree = i := degree_Xⱼ L 𝔽q β hβ_lin_indep hF₂ ℓ i i.isLt + apply coeff_ne_zero_of_eq_degree -- (hn : degree p = n) : coeff p n ≠ 0 + rw [deg_X] + rfl + +omit [DecidableEq 𝔽q] in +/-- The basis vectors are linearly independent over `L`. -/ +theorem basis_vectors_linear_independent (hF₂ : Fintype.card 𝔽q = 2) (ℓ : Nat) : + LinearIndependent L (basis_vectors (𝔽q:=𝔽q) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ)) := by + -- We have proved that the image of our basis vectors under the linear map + -- `to_coeffs_vec` is a linearly independent family. + have h_comp_li := coeff_vectors_linear_independent (𝔽q:=𝔽q) (hF₂:=hF₂) + (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ) + -- `LinearIndependent.of_comp` states that if the image of a family of vectors under + -- a linear map is linearly independent, then so is the original family. + exact LinearIndependent.of_comp (to_coeffs_vec (L:=L) (ℓ:=ℓ)) h_comp_li + +omit [DecidableEq 𝔽q] in +/-- The basis vectors span the space of polynomials with degree less than `2^ℓ`. -/ +theorem basis_vectors_span (hF₂ : Fintype.card 𝔽q = 2) (ℓ : Nat) + : Submodule.span L (Set.range (basis_vectors (𝔽q:=𝔽q) + (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ))) = ⊤ := by + have h_li := basis_vectors_linear_independent (𝔽q:=𝔽q) (hF₂:=hF₂) + (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ) + let n := 2 ^ ℓ + have h_n: n = 2 ^ ℓ := by omega + have h_n_pos: 0 < n := by + rw [h_n] + exact Nat.two_pow_pos ℓ + have h_finrank_eq_n : Module.finrank L (L⦃< n⦄[X]) = n := finrank_degreeLT_n n + -- We have `n` linearly independent vectors in an `n`-dimensional space. + -- The dimension of their span is `n`. + have h_span_finrank : Module.finrank L (Submodule.span L (Set.range ( + basis_vectors (𝔽q:=𝔽q) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ)))) = n := by + rw [finrank_span_eq_card h_li, Fintype.card_fin] + -- A subspace with the same dimension as the ambient space must be the whole space. + rw [←h_finrank_eq_n] at h_span_finrank + have inst_finite_dim : FiniteDimensional (K:=L) (V:=L⦃< n⦄[X]) := + finiteDimensional_degreeLT (h_n_pos:=by omega) + apply Submodule.eq_top_of_finrank_eq (K:=L) (V:=L⦃< n⦄[X]) + exact h_span_finrank + +/-- The novel polynomial basis for `L⦃<2^ℓ⦄[X]` -/ +noncomputable def novel_polynomial_basis (hF₂ : Fintype.card 𝔽q = 2) (ℓ : Nat) : + Basis (Fin (2^ℓ)) (R:=L) (M:=L⦃<2^ℓ⦄[X]) := by + have hli := basis_vectors_linear_independent (𝔽q:=𝔽q) (hF₂:=hF₂) + (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ) + have hspan := basis_vectors_span (𝔽q:=𝔽q) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ) + exact Basis.mk hli (le_of_eq hspan.symm) + +end NovelPolynomialBasisProof + +/-- The polynomial `P(X)` derived from coefficients `a` in the novel polynomial basis `(Xⱼ)`, +`P(X) := ∑_{j=0}^{2^ℓ-1} aⱼ ⋅ Xⱼ(X)` -/ +noncomputable def polynomial_from_novel_coeffs (ℓ : Nat) (a : Fin (2^ℓ) → L) : L[X] := + ∑ j, C (a j) * (Xⱼ L 𝔽q β ℓ j.val) + +omit [DecidableEq 𝔽q] in +/-- Proof that the novel polynomial basis is indeed the indicated basis vectors -/ +theorem novel_polynomial_basis_is_basis_vectors (hF₂ : Fintype.card 𝔽q = 2) (ℓ : Nat) : + (novel_polynomial_basis (𝔽q:=𝔽q) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ)) + = basis_vectors (𝔽q:=𝔽q) (hF₂:=hF₂) (hβ_lin_indep:=hβ_lin_indep) (ℓ:=ℓ) := by + simp only [novel_polynomial_basis, Basis.coe_mk] + +end AdditiveNTT diff --git a/ArkLib/Data/FieldTheory/BinaryField/AdditiveNTT/Prelude.lean b/ArkLib/Data/FieldTheory/BinaryField/AdditiveNTT/Prelude.lean new file mode 100644 index 000000000..d7c37bb23 --- /dev/null +++ b/ArkLib/Data/FieldTheory/BinaryField/AdditiveNTT/Prelude.lean @@ -0,0 +1,901 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Chung Thai Nguyen +-/ + +import ArkLib.Data.MvPolynomial.Notation +import ArkLib.Data.FieldTheory.BinaryField.Tower.Prelude + +/-! +# Prelude for Additive NTT +- Bitwise identities +- Monomial basis for submodule of polynomials of degree less than `n` +- Frobenius polynomial identity on prime-characteristic fields and its algebras +- Field-vanishing polynomial equality in polynomial ring of algebras +-/ + +namespace AdditiveNTT +open Polynomial FiniteDimensional +section BitwiseIdentities +-- We decompose each number `j < 2^ℓ` into its binary representation: `j = Σ k ∈ Fin ℓ, jₖ * 2ᵏ` +def bit (k n : Nat) : Nat := (n >>> k) &&& 1 -- k'th LSB bit of `n` + +lemma bit_eq_zero_or_one {k n : Nat} : + bit k n = 0 ∨ bit k n = 1 := by + unfold bit + rw [Nat.and_one_is_mod] + simp only [Nat.mod_two_eq_zero_or_one] + +lemma lsb_of_single_bit {n: ℕ} (h_n: n < 2): bit 0 n = n := by + unfold bit + rw [Nat.shiftRight_zero] + rw [Nat.and_one_is_mod] + exact Nat.mod_eq_of_lt h_n + +lemma lsb_of_multiple_of_two {n: ℕ}: bit 0 (2*n) = 0 := by + unfold bit + rw [Nat.shiftRight_zero, Nat.and_one_is_mod, Nat.mul_mod_right] + +lemma and_eq_zero_iff {n m: ℕ} : n &&& m = 0 ↔ ∀ k, (n >>> k) &&& (m >>> k) = 0 := by + constructor + · intro h_and_zero -- h_and_zero: n &&& m = 0 + intro k + rw [← Nat.shiftRight_and_distrib] + rw [h_and_zero] + rw [Nat.zero_shiftRight] + · intro h_forall_k + have h_k_is_zero := h_forall_k 0 + simp only [Nat.shiftRight_zero] at h_k_is_zero -- utilize n = (n >>> 0), m = (m >>> 0) + exact h_k_is_zero + +lemma eq_iff_eq_all_bits {n m: ℕ} : n = m ↔ ∀ k, (n >>> k) &&& 1 = (m >>> k) &&& 1 := by + constructor + · intro h_eq -- h_eq : n = m + intro k + rw [h_eq] + · intro h_all_bits -- h_all_bits: ∀ k, (n >>> k) &&& 1 = (m >>> k) &&& 1 + apply Nat.eq_of_testBit_eq + intro k + simp only [Nat.testBit, Nat.one_and_eq_mod_two, Nat.mod_two_bne_zero, beq_eq_beq] + simp only [Nat.and_one_is_mod] at h_all_bits k + rw [h_all_bits k] + +lemma shiftRight_and_one_distrib {n m k: ℕ} : + (n &&& m) >>> k &&& 1 = ((n >>> k) &&& 1) &&& ((m >>> k) &&& 1) := by + rw [Nat.shiftRight_and_distrib] + conv => + lhs + rw [←Nat.and_self (x:=1)] + rw [←Nat.and_assoc] + rw [Nat.and_assoc (y:=m >>> k) (z:=1), Nat.and_comm (x:=m>>>k) (y:=1), ←Nat.and_assoc] + rw [Nat.and_assoc] + +lemma and_eq_zero_iff_and_each_bit_eq_zero {n m: ℕ} : + n &&& m = 0 ↔ ∀ k, ((n >>> k) &&& 1) &&& ((m >>> k) &&& 1) = 0 := by + constructor + · intro h_and_zero + intro k + have h_k := shiftRight_and_one_distrib (n:=n) (m:=m) (k:=k) + rw [←h_k] + rw [h_and_zero, Nat.zero_shiftRight, Nat.zero_and] + · intro h_forall_k -- h_forall_k : ∀ (k : ℕ), n >>> k &&& 1 &&& (m >>> k &&& 1) = 0 + apply eq_iff_eq_all_bits.mpr + intro k + -- ⊢ (n &&& m) >>> k &&& 1 = 0 >>> k &&& 1 + have h_forall_k_eq: ∀ k, ((n &&& m) >>> k) &&& 1 = 0 := by + intro k + rw [shiftRight_and_one_distrib] + exact h_forall_k k + rw [h_forall_k_eq k] + rw [Nat.zero_shiftRight, Nat.zero_and] + +lemma and_one_eq_of_eq {a b : ℕ} : a = b → a &&& 1 = b &&& 1 := by + intro h_eq + rw [h_eq] + +lemma Nat.eq_zero_or_eq_one_of_lt_two {n: ℕ} (h_lt: n < 2): n = 0 ∨ n = 1 := by + interval_cases n + · left; rfl + · right; rfl + +lemma div_2_form {nD2 bn: ℕ} (h_bn: bn < 2): + (nD2 * 2 + bn) / 2 = nD2 := by + rw [←add_comm, ←mul_comm] + rw [Nat.add_mul_div_left (x:=bn) (y:=2) (z:=nD2) (H:=by norm_num)] + norm_num; exact h_bn; + +lemma and_of_chopped_lsb {n m n1 m1 bn bm: ℕ} (h_bn: bn < 2) (h_bm: bm < 2) + (h_n: n = n1 * 2 + bn) (h_m: m = m1 * 2 + bm): + n &&& m = (n1 &&& m1) * 2 + (bn &&& bm) := by -- main tool: Nat.div_add_mod /2 + rw [h_n, h_m] + -- ⊢ (n1 * 2 + bn) &&& (m1 * 2 + bm) = (n1 &&& m1) * 2 + (bn &&& bm) + have h_n1_mul_2_add_bn_div_2: (n1 * 2 + bn) / 2 = n1 := div_2_form h_bn; + have h_m1_mul_2_add_bm_div_2: (m1 * 2 + bm) / 2 = m1 := div_2_form h_bm; + have h_and_bn_bm: (bn &&& bm) < 2 := by + interval_cases bn + · rw [Nat.zero_and]; norm_num; + · interval_cases bm + · rw [Nat.and_zero]; norm_num; + · rw [Nat.and_self]; norm_num; + -- Part 1: Prove the `mod 2` parts are equal. + have h_mod_eq : ((n1 * 2 + bn) &&& (m1 * 2 + bm)) % 2 = ((n1 &&& m1) * 2 + (bn &&& bm)) % 2 := by + simp only [Nat.and_mod_two_pow (n := 1), pow_one, Nat.mul_add_mod_self_right] + -- Part 2: Prove the `div 2` parts are equal. + have h_div_eq : ((n1 * 2 + bn) &&& (m1 * 2 + bm)) / 2 = ((n1 &&& m1) * 2 + (bn &&& bm)) / 2 := by + simp only [Nat.and_div_two_pow (n := 1), pow_one] + -- ⊢ (n1 * 2 + bn) / 2 &&& (m1 * 2 + bm) / 2 = ((n1 &&& m1) * 2 + (bn &&& bm)) / 2 + rw [h_n1_mul_2_add_bn_div_2, h_m1_mul_2_add_bm_div_2] + -- ⊢ n1 &&& m1 = ((n1 &&& m1) * 2 + (bn &&& bm)) / 2 + have h_result: ((n1 &&& m1) * 2 + (bn &&& bm)) / 2 = n1 &&& m1 := by + rw [←add_comm, ←mul_comm] -- (x + y * z) / y = x / y + z + rw [Nat.add_mul_div_left (x:=bn &&& bm) (y:=2) (z:=n1 &&& m1) (H:=by norm_num)] + rw [(Nat.div_eq_zero_iff_lt (x:=bn &&& bm) (k:=2) (h:=by norm_num)).mpr h_and_bn_bm, zero_add] + rw [h_result] + rw [←Nat.div_add_mod ((n1 * 2 + bn) &&& (m1 * 2 + bm)) 2, h_div_eq, h_mod_eq, Nat.div_add_mod] + +lemma xor_of_chopped_lsb {n m n1 m1 bn bm: ℕ} (h_bn: bn < 2) (h_bm: bm < 2) + (h_n: n = n1 * 2 + bn) (h_m: m = m1 * 2 + bm): + n ^^^ m = (n1 ^^^ m1) * 2 + (bn ^^^ bm) := by + rw [h_n, h_m] + -- ⊢ (n1 * 2 + bn) ^^^ (m1 * 2 + bm) = (n1 ^^^ m1) * 2 + (bn ^^^ bm) + have h_n1_mul_2_add_bn_div_2: (n1 * 2 + bn) / 2 = n1 := div_2_form h_bn; + have h_m1_mul_2_add_bm_div_2: (m1 * 2 + bm) / 2 = m1 := div_2_form h_bm; + have h_xor_bn_bm: (bn ^^^ bm) < 2 := by + interval_cases bn + · interval_cases bm + · rw [Nat.zero_xor]; norm_num; + · rw [Nat.zero_xor]; norm_num; + · interval_cases bm + · rw [Nat.xor_zero]; norm_num; + · rw [Nat.xor_self]; norm_num; + -- Part 1: Prove the `mod 2` parts are equal. + have h_mod_eq : ((n1 * 2 + bn) ^^^ (m1 * 2 + bm)) % 2 = ((n1 ^^^ m1) * 2 + (bn ^^^ bm)) % 2 := by + simp only [Nat.xor_mod_two_pow (n := 1), pow_one, Nat.mul_add_mod_self_right] + -- Part 2: Prove the `div 2` parts are equal. + have h_div_eq : ((n1 * 2 + bn) ^^^ (m1 * 2 + bm)) / 2 = ((n1 ^^^ m1) * 2 + (bn ^^^ bm)) / 2 := by + simp only [Nat.xor_div_two_pow (n := 1), pow_one] + -- ⊢ (n1 * 2 + bn) / 2 &&& (m1 * 2 + bm) / 2 = ((n1 &&& m1) * 2 + (bn &&& bm)) / 2 + rw [h_n1_mul_2_add_bn_div_2, h_m1_mul_2_add_bm_div_2] + -- ⊢ n1 &&& m1 = ((n1 &&& m1) * 2 + (bn &&& bm)) / 2 + have h_result: ((n1 ^^^ m1) * 2 + (bn ^^^ bm)) / 2 = n1 ^^^ m1 := by + rw [←add_comm, ←mul_comm] -- (x + y * z) / y = x / y + z + rw [Nat.add_mul_div_left (x:=bn ^^^ bm) (y:=2) (z:=n1 ^^^ m1) (H:=by norm_num)] + rw [(Nat.div_eq_zero_iff_lt (x:=bn ^^^ bm) (k:=2) (h:=by norm_num)).mpr h_xor_bn_bm, zero_add] + rw [h_result] + rw [←Nat.div_add_mod ((n1 * 2 + bn) ^^^ (m1 * 2 + bm)) 2, h_div_eq, h_mod_eq, Nat.div_add_mod] + +lemma or_of_chopped_lsb {n m n1 m1 bn bm: ℕ} (h_bn: bn < 2) (h_bm: bm < 2) + (h_n: n = n1 * 2 + bn) (h_m: m = m1 * 2 + bm): + n ||| m = (n1 ||| m1) * 2 + (bn ||| bm) := by + rw [h_n, h_m] + -- ⊢ (n1 * 2 + bn) ||| (m1 * 2 + bm) = (n1 ||| m1) * 2 + (bn ||| bm) + have h_n1_mul_2_add_bn_div_2: (n1 * 2 + bn) / 2 = n1 := div_2_form h_bn; + have h_m1_mul_2_add_bm_div_2: (m1 * 2 + bm) / 2 = m1 := div_2_form h_bm; + have h_or_bn_bm: (bn ||| bm) < 2 := by + interval_cases bn + · interval_cases bm + · rw [Nat.zero_or]; norm_num; + · rw [Nat.zero_or]; norm_num; + · interval_cases bm + · rw [Nat.or_zero]; norm_num; + · rw [Nat.or_self]; norm_num; + -- Part 1: Prove the `mod 2` parts are equal. + have h_mod_eq : ((n1 * 2 + bn) ||| (m1 * 2 + bm)) % 2 = ((n1 ||| m1) * 2 + (bn ||| bm)) % 2 := by + simp only [Nat.or_mod_two_pow (n := 1), pow_one, Nat.mul_add_mod_self_right] + -- Part 2: Prove the `div 2` parts are equal. + have h_div_eq : ((n1 * 2 + bn) ||| (m1 * 2 + bm)) / 2 = ((n1 ||| m1) * 2 + (bn ||| bm)) / 2 := by + simp only [Nat.or_div_two_pow (n := 1), pow_one] + -- ⊢ (n1 * 2 + bn) / 2 ||| (m1 * 2 + bm) / 2 = ((n1 ||| m1) * 2 + (bn ||| bm)) / 2 + rw [h_n1_mul_2_add_bn_div_2, h_m1_mul_2_add_bm_div_2] + -- ⊢ n1 ||| m1 = ((n1 ||| m1) * 2 + (bn ||| bm)) / 2 + have h_result: ((n1 ||| m1) * 2 + (bn ||| bm)) / 2 = n1 ||| m1 := by + rw [←add_comm, ←mul_comm] -- (x + y * z) / y = x / y + z + rw [Nat.add_mul_div_left (x:=bn ||| bm) (y:=2) (z:=n1 ||| m1) (H:=by norm_num)] + rw [(Nat.div_eq_zero_iff_lt (x:=bn ||| bm) (k:=2) (h:=by norm_num)).mpr h_or_bn_bm, zero_add] + rw [h_result] + rw [←Nat.div_add_mod ((n1 * 2 + bn) ||| (m1 * 2 + bm)) 2, h_div_eq, h_mod_eq, Nat.div_add_mod] + +lemma sum_eq_xor_plus_twice_and (n: Nat): ∀ m: ℕ, n + m = (n ^^^ m) + 2 * (n &&& m) := by + induction n using Nat.binaryRec with + | z => + intro m + rw [zero_add, Nat.zero_and, mul_zero, add_zero, Nat.zero_xor] + | f bn n2 ih => + intro m + let resDiv2M := Nat.boddDiv2 m + let bm := resDiv2M.fst + let m2 := resDiv2M.snd + have h_m2 : m2 = Nat.div2 m := by + rfl + have h_bm : bm = Nat.bodd m := by + rfl + let mVal := Nat.bit bm m2 + set nVal := Nat.bit bn n2 + set bitN := bn.toNat + set bitM := bm.toNat + have h_bitN: bitN < 2 := by + exact Bool.toNat_lt bn + have h_bitM: bitM < 2 := by + exact Bool.toNat_lt bm + have h_and_bitN_bitM: (bitN &&& bitM) < 2 := by + interval_cases bitN + · interval_cases bitM + · rw [Nat.zero_and]; norm_num; + · rw [Nat.zero_and]; norm_num; + · interval_cases bitM + · rw [Nat.and_zero]; norm_num; + · rw [Nat.and_self]; norm_num; + have h_n: nVal = n2 * 2 + bitN := by + unfold nVal + rw [Nat.bit_val, mul_comm] + have h_m: mVal = m2 * 2 + bitM := by + unfold mVal + rw [Nat.bit_val, mul_comm] + have h_mVal_eq_m: mVal = m := by + unfold mVal + rw [Nat.bit_val, mul_comm] + rw [←h_m] + unfold mVal + simp only [h_bm, h_m2] + exact Nat.bit_decomp m + rw [←h_mVal_eq_m] + -- h_prev : n2 + m2 = n2 ^^^ m2 + 2 * (n2 &&& m2) + -- ⊢ nVal + mVal = nVal ^^^ mVal + 2 * (nVal &&& mVal) + have h_and: nVal &&& mVal = (n2 &&& m2) * 2 + (bitN &&& bitM) := + and_of_chopped_lsb (n:=nVal) (m:=mVal) (h_bn:=h_bitN) (h_bm:=h_bitM) (h_n:=h_n) (h_m:=h_m) + have h_xor: nVal ^^^ mVal = (n2 ^^^ m2) * 2 + (bitN ^^^ bitM) := + xor_of_chopped_lsb (n:=nVal) (m:=mVal) (h_bn:=h_bitN) (h_bm:=h_bitM) (h_n:=h_n) (h_m:=h_m) + have h_or: nVal ||| mVal = (n2 ||| m2) * 2 + (bitN ||| bitM) := + or_of_chopped_lsb (n:=nVal) (m:=mVal) (h_bn:=h_bitN) (h_bm:=h_bitM) (h_n:=h_n) (h_m:=h_m) + have h_prev := ih m2 + -- ⊢ nVal + mVal = (nVal ^^^ mVal) + (2 * (nVal &&& mVal)) + have sum_eq: nVal + mVal = (n2 ^^^ m2) * 2 + 4 * (n2 &&& m2) + (bitN + bitM) := by + calc + _ = (n2 * 2 + bitN) + (m2 * 2 + bitM) := by rw [h_n, h_m] + _ = (n2 + m2) * 2 + (bitN + bitM) := by + rw [Nat.right_distrib, ←add_assoc, ←add_assoc]; omega; + _ = ((n2 ^^^ m2) + 2 * (n2 &&& m2)) * 2 + (bitN + bitM) := by rw [h_prev] + _ = (n2 ^^^ m2) * 2 + 4 * (n2 &&& m2) + (bitN + bitM) := by rw [Nat.right_distrib]; omega; + rw [sum_eq] + -- From this point, we basically do case analysis on `bn &&& bm` + -- rw [h_n, h_m] + by_cases h_and_bitN_bitM_eq_1: bitN &&& bitM = 1 + · have h_bitN_and_bitM_eq_1: bitN = 1 ∧ bitM = 1 := by + interval_cases bitN + · interval_cases bitM + · contradiction + · contradiction + · interval_cases bitM + · contradiction + · and_intros; rfl; rfl; + have h_sum_bits: (bitN + bitM) = 2 := by omega + have h_xor_bits: bitN ^^^ bitM = 0 := by simp only [h_bitN_and_bitM_eq_1, Nat.xor_self]; + have h_and_bits: bitN &&& bitM = 1 := by simp only [h_bitN_and_bitM_eq_1, Nat.and_self]; + -- ⊢ (n2 ^^^ m2) * 2 + 4 * (n2 &&& m2) + (bitN + bitM) = (nVal ^^^ mVal) + 2 * (nVal &&& mVal) + have h_left: (n2 ^^^ m2) * 2 = (nVal ^^^ mVal) := by + calc + _ = (n2 ^^^ m2) * 2 + 0 := by omega; + _ = (n2 ^^^ m2) * 2 + (bitN ^^^ bitM) := by rw [h_xor_bits]; + _ = _ := by exact h_xor.symm + rw [h_left] + rw [add_assoc] + have h_right: 4 * (n2 &&& m2) + (bitN + bitM) = 2 * (nVal &&& mVal) := by + calc + _ = 4 * (n2 &&& m2) + 2 := by rw [h_sum_bits]; + _ = 2 * (2 * (n2 &&& m2) + 1) := by omega; + _ = 2 * ((n2 &&& m2) * 2 + (bitN &&& bitM)) := by + rw [h_and_bits, mul_comm (a:=(n2 &&& m2)) (b:=2)]; + _ = 2 * (nVal &&& mVal) := by rw [h_and]; + rw [h_right] + · push_neg at h_and_bitN_bitM_eq_1; + have h_and_bitN_bitM_eq_0: (bitN &&& bitM) = 0 := by + interval_cases (bitN &&& bitM) + · rfl + · contradiction + have h_bits_eq: bitN = 0 ∨ bitM = 0 := by + interval_cases bitN + · left; rfl + · right; + interval_cases bitM + · rfl + · contradiction + have h_sum_bits: (bitN + bitM) = (bitN ^^^ bitM) := by + interval_cases bitN + · interval_cases bitM + · rfl + · rfl + · interval_cases bitM + · rfl + · contradiction -- with h_and_bitN_bitM_eq_0 + -- ⊢ (n2 ^^^ m2) * 2 + 4 * (n2 &&& m2) + (bitN + bitM) = (nVal ^^^ mVal) + 2 * (nVal &&& mVal) + rw [←add_assoc, add_assoc (b:=bitN) (c:=bitM), add_assoc] + rw [add_comm (b:=(bitN + bitM)), ←add_assoc] + have h_left: (n2 ^^^ m2) * 2 + (bitN + bitM) = (nVal ^^^ mVal) := by + calc + _ = (n2 ^^^ m2) * 2 + (bitN ^^^ bitM) := by rw [h_sum_bits]; + _ = _ := by exact h_xor.symm + rw [h_left] + + -- 4 * (n2 &&& m2) = 2 * (2 * (n2 &&& m2) + (bn &&& bm)) = 2 * (n &&& m) + have h_right: 4 * (n2 &&& m2) = 2 * (nVal &&& mVal) := by + calc + _ = 4 * (n2 &&& m2) + 0 := by omega; + _ = 4 * (n2 &&& m2) + (bitN &&& bitM) := by rw [h_and_bitN_bitM_eq_0]; + _ = 2 * (2 * (n2 &&& m2) + (bitN &&& bitM)) := by omega; + _ = 2 * ((n2 &&& m2) * 2 + (bitN &&& bitM)) := by rw [mul_comm (a:=(n2 &&& m2)) (b:=2)]; + _ = 2 * (nVal &&& mVal) := by rw [h_and]; + rw [h_right] + +lemma add_shiftRight_distrib {n m k: ℕ} (h_and_zero: n &&& m = 0): + (n + m) >>> k = (n >>> k) + (m >>> k) := by + rw [sum_eq_xor_plus_twice_and, h_and_zero, mul_zero, add_zero] + conv => + rhs + rw [sum_eq_xor_plus_twice_and] + rw [←Nat.shiftRight_and_distrib, h_and_zero] + rw [Nat.zero_shiftRight, mul_zero, add_zero] + rw [←Nat.shiftRight_xor_distrib] + +lemma sum_of_and_eq_zero_is_xor {n m : ℕ} (h_n_AND_m : n &&& m = 0) : n + m = n ^^^ m := by + rw [sum_eq_xor_plus_twice_and, h_n_AND_m] + omega + +lemma xor_of_and_eq_zero_is_or {n m : ℕ} (h_n_AND_m : n &&& m = 0) : n ^^^ m = n ||| m := by + apply eq_iff_eq_all_bits.mpr + intro k + rw [Nat.shiftRight_xor_distrib, Nat.shiftRight_or_distrib] + rw [Nat.and_xor_distrib_right] -- lhs + rw [Nat.and_distrib_right] -- rhs + -- ⊢ (n >>> k &&& 1) ^^^ (m >>> k &&& 1) = (n >>> k &&& 1) ||| (m >>> k &&& 1) + set bitN := n >>> k &&& 1 + set bitM := m >>> k &&& 1 + have h_bitN: bitN < 2 := by + simp only [bitN, Nat.and_one_is_mod] + simp only [gt_iff_lt, Nat.ofNat_pos, Nat.mod_lt (x := n >>> k) (y := 2)] + have h_bitM: bitM < 2 := by + simp only [bitM, Nat.and_one_is_mod] + simp only [gt_iff_lt, Nat.ofNat_pos, Nat.mod_lt (x := m >>> k) (y := 2)] + -- ⊢ bitN ^^^ bitM = bitN ||| bitM + have h_and_bitN_bitM: (bitN &&& bitM) = 0 := by + exact and_eq_zero_iff_and_each_bit_eq_zero.mp h_n_AND_m k + interval_cases bitN -- case analysis on `bitN, bitM` + · interval_cases bitM + · rfl + · rfl + · interval_cases bitM + · rfl + · contradiction + +lemma sum_of_and_eq_zero_is_or {n m : ℕ} (h_n_AND_m : n &&& m = 0) : n + m = n ||| m := by + rw [sum_eq_xor_plus_twice_and, h_n_AND_m, mul_zero, add_zero] + exact xor_of_and_eq_zero_is_or h_n_AND_m + +lemma bit_of_add_distrib {n m k: ℕ} + (h_n_AND_m: n &&& m = 0): bit k (n + m) = bit k n + bit k m := by + unfold bit + rw [sum_of_and_eq_zero_is_xor h_n_AND_m] + rw [Nat.shiftRight_xor_distrib, Nat.and_xor_distrib_right] + set bitN := n >>> k &&& 1 + set bitM := m >>> k &&& 1 + have h_bitN: bitN < 2 := by + simp only [bitN, Nat.and_one_is_mod] + simp only [gt_iff_lt, Nat.ofNat_pos, Nat.mod_lt (x := n >>> k) (y := 2)] + have h_bitM: bitM < 2 := by + simp only [bitM, Nat.and_one_is_mod] + simp only [gt_iff_lt, Nat.ofNat_pos, Nat.mod_lt (x := m >>> k) (y := 2)] + have h_bitN_and_bitM: (bitN &&& bitM) = 0 := by + exact and_eq_zero_iff_and_each_bit_eq_zero.mp h_n_AND_m k + exact (sum_of_and_eq_zero_is_xor (n:=bitN) (m:=bitM) h_bitN_and_bitM).symm + +lemma bit_of_multiple_of_power_of_two {n k p: ℕ}: + bit (k+p) (2^p * n) = bit k n := by + unfold bit + have h_eq: 2^p * n = n <<< p := by + rw [mul_comm] + exact Eq.symm (Nat.shiftLeft_eq n p) + rw [h_eq] + rw [add_comm (a:=k) (b:=p), Nat.shiftRight_add] + rw [Nat.shiftLeft_shiftRight] + +lemma bit_of_shiftRight {n p: ℕ}: + ∀ k, bit k (n >>> p) = bit (k+p) n := by + intro k + unfold bit + rw [←Nat.shiftRight_add] + rw [←add_comm] + +theorem bit_repr {ℓ : Nat} (h_ℓ: ℓ > 0): ∀ j, j < 2^ℓ → + j = ∑ k ∈ Finset.Icc 0 (ℓ-1), (bit k j) * 2^k := by + induction ℓ with + | zero => + -- Base case: ℓ = 0 + intro j h_j + have h_j_zero : j = 0 := by exact Nat.lt_one_iff.mp h_j + subst h_j_zero + simp only [zero_tsub, Finset.Icc_self, Finset.sum_singleton, pow_zero, mul_one] + unfold bit + rw [Nat.shiftRight_zero, Nat.and_one_is_mod] + | succ ℓ₁ ih => + by_cases h_ℓ₁: ℓ₁ = 0 + · simp only [h_ℓ₁, zero_add, pow_one, tsub_self, Finset.Icc_self, Finset.sum_singleton, + pow_zero, mul_one]; + intro j hj + interval_cases j + · simp only [bit, Nat.shiftRight_zero, Nat.and_one_is_mod, Nat.zero_mod] + · simp only [bit, Nat.shiftRight_zero, Nat.and_one_is_mod, Nat.zero_mod] + · push_neg at h_ℓ₁ + set ℓ := ℓ₁ + 1 + have h_ℓ_eq: ℓ = ℓ₁ + 1 := by rfl + intro j h_j + -- Inductive step: assume theorem holds for ℓ₁ = ℓ - 1 + -- => show j = ∑ k ∈ Finset.range (ℓ + 1), (bit k j) * 2^k + -- Split j into lsb (b) and higher bits (m) & reason inductively from the predicate of (m, ℓ₁) + set b := bit 0 j -- Least significant bit: j % 2 + set m := j >>> 1 -- Higher bits: j / 2 + have h_b_eq: b = bit 0 j := by rfl + have h_m_eq: m = j >>> 1 := by rfl + have h_bit_shift: ∀ k, bit (k+1) j = bit k m := by + intro k + rw [h_m_eq] + exact (bit_of_shiftRight (n:=j) (p:=1) k).symm + have h_j_eq : j = b + 2 * m := by + calc + _ = 2 * m + b := by + have h_m_eq: m = j/2 := by rfl + have h_b_eq: b = j%2 := by + rw [h_b_eq]; unfold bit; rw [Nat.shiftRight_zero]; rw [Nat.and_one_is_mod]; + rw [h_m_eq, h_b_eq]; + rw [Nat.div_add_mod (m:=j) (n:=2)]; -- n * (m / n) + m % n = m := by + _ = b + 2 * m := by omega; + have h_m : m < 2^ℓ₁ := by + by_contra h_m_ge_2_pow_ℓ + push_neg at h_m_ge_2_pow_ℓ + have h_j_ge: j ≥ 2^ℓ := by + calc _ = 2 * m + b := by rw [h_j_eq]; omega + _ ≥ 2 * (2^ℓ₁) + b := by omega + _ = 2^ℓ + b := by rw [h_ℓ_eq]; omega; + _ ≥ 2^ℓ := by omega; + exact Nat.not_lt_of_ge h_j_ge h_j -- contradiction + have h_m_repr := ih (j:=m) (by omega) h_m + have bit_shift : ∀ k, bit (k + 1) j = bit k m := by + intro k + rw [h_m_eq] + exact (bit_of_shiftRight (n:=j) (p:=1) k).symm + -- ⊢ j = ∑ k ∈ Finset.range ℓ, bit k j * 2 ^ k + have h_sum: ∑ k ∈ Finset.Icc 0 (ℓ-1), bit k j * 2 ^ k + = (∑ k ∈ Finset.Icc 0 0, bit k j * 2 ^ k) + + (∑ k ∈ Finset.Icc 1 (ℓ-1), bit k j * 2 ^ k) := by + apply sum_Icc_split + omega + omega + rw [h_sum] + rw [h_j_eq] + rw [Finset.Icc_self, Finset.sum_singleton, pow_zero, mul_one] + + have h_sum_2: ∑ k ∈ Finset.Icc 1 (ℓ-1), bit k (b + 2 * m) * 2 ^ k + = ∑ k ∈ Finset.Icc 0 (ℓ₁-1), bit k (m) * 2 ^ (k+1) := by + apply Finset.sum_bij' (fun i _ => i - 1) (fun i _ => i + 1) + · -- left inverse + intro i hi + simp only [Finset.mem_Icc] at hi ⊢ + exact Nat.sub_add_cancel hi.1 + · -- right inverse + intro i hi + norm_num + · -- function value match + intro i hi + rw [←h_j_eq] + rw [bit_of_shiftRight] + have ⟨left_bound, right_bound⟩ := Finset.mem_Icc.mp hi + rw [Nat.sub_add_cancel left_bound] + · -- left membership preservation + intro i hi -- hi : i ∈ Finset.Icc 1 (ℓ - 1) + rw [Finset.mem_Icc] + have ⟨left_bound, right_bound⟩ := Finset.mem_Icc.mp hi + -- ⊢ 0 ≤ i - 1 ∧ i - 1 ≤ ℓ₁ - 1 + apply And.intro + · exact Nat.pred_le_pred left_bound + · exact Nat.pred_le_pred right_bound + · -- right membership preservation + intro j hj + rw [Finset.mem_Icc] + have ⟨left_bound, right_bound⟩ := Finset.mem_Icc.mp hj -- (0 ≤ j ∧ j ≤ ℓ₁ - 1) + -- ⊢ 1 ≤ j + 1 ∧ j + 1 ≤ ℓ - 1 + apply And.intro + · exact Nat.le_add_of_sub_le left_bound + · rw [h_ℓ_eq]; rw [Nat.add_sub_cancel]; -- ⊢ j + 1 ≤ ℓ₁ + have h_j_add_1_le_ℓ₁: j + 1 ≤ ℓ₁ := by + calc j + 1 ≤ (ℓ₁ - 1) + 1 := by apply Nat.add_le_add_right; exact right_bound; + _ = ℓ₁ := by rw [Nat.sub_add_cancel]; omega; + exact h_j_add_1_le_ℓ₁ + rw [h_sum_2] + + have h_sum_3: ∑ k ∈ Finset.Icc 0 (ℓ₁-1), bit k (m) * 2 ^ (k+1) + = 2 * ∑ k ∈ Finset.Icc 0 (ℓ₁-1), bit k (m) * 2 ^ k := by + calc + _ = ∑ k ∈ Finset.Icc 0 (ℓ₁-1), ((bit k (m) * 2^k) * 2) := by + apply Finset.sum_congr rfl (fun k hk => by + rw [Finset.mem_Icc] at hk -- hk : 0 ≤ k ∧ k ≤ ℓ₁ - 1 + have h_res: bit k (m) * 2 ^ (k+1) = bit k (m) * 2 ^ k * 2 := by + rw [Nat.pow_succ, ←mul_assoc] + exact h_res + ) + _ = (∑ k ∈ Finset.Icc 0 (ℓ₁-1), bit k (m) * 2 ^ k) * 2 := by rw [Finset.sum_mul] + _ = 2 * ∑ k ∈ Finset.Icc 0 (ℓ₁-1), bit k (m) * 2 ^ k := by rw [mul_comm] + rw [h_sum_3] + rw [←h_m_repr] + conv => + rhs + rw [←h_j_eq] +end BitwiseIdentities + +section FinFieldPolyHelper +variable {Fq : Type*} [Field Fq] [Fintype Fq] + +section FieldVanishingPolynomialEquality +-- NOTE: We lift `∏_{c ∈ Fq} (X - c) = X^q - X` from `Fq[X]` to `L[X]`, +-- then achieve a generic version `∏_{c ∈ Fq} (p - c) = p^q - p` for any `p` in `L[X]` + +/-- +The polynomial `X^q - X` factors into the product of `(X - c)` ∀ `c` ∈ `Fq`, +i.e. `∏_{c ∈ Fq} (X - c) = X^q - X`. +-/ +theorem prod_X_sub_C_eq_X_pow_card_sub_X (h_Fq_card_gt_1: Fintype.card Fq > 1): + (∏ c ∈ (Finset.univ : Finset Fq), (Polynomial.X - Polynomial.C c)) = + Polynomial.X^(Fintype.card Fq) - Polynomial.X := by + + -- Step 1: Setup - Define P and Q for clarity. + set P : Fq[X] := ∏ c ∈ (Finset.univ : Finset Fq), (Polynomial.X - Polynomial.C c) + set Q : Fq[X] := Polynomial.X^(Fintype.card Fq) - Polynomial.X + + -- We will prove P = Q by showing they are both monic and have the same roots. + + -- Step 2: Prove P and Q are monic. + have hP_monic : P.Monic := by + apply Polynomial.monic_prod_of_monic + intro c _ + exact Polynomial.monic_X_sub_C c + + have hQ_monic : Q.Monic := by + apply Polynomial.monic_X_pow_sub + -- The condition is that degree(X) < Fintype.card Fq + rw [Polynomial.degree_X] + exact_mod_cast h_Fq_card_gt_1 + + have h_roots_P : P.roots = (Finset.univ : Finset Fq).val := by + apply Polynomial.roots_prod_X_sub_C + -- The roots of Q are, by Fermat's Little Theorem, also all elements of Fq. + have h_roots_Q : Q.roots = (Finset.univ : Finset Fq).val := by + exact FiniteField.roots_X_pow_card_sub_X Fq + + -- Step 3: Prove P and Q have the same set of roots. + -- We show that both root sets are equal to `Finset.univ`. + have h_roots_eq : P.roots = Q.roots := by + rw [h_roots_P, h_roots_Q] + + have hP_splits : P.Splits (RingHom.id Fq) := by + -- ⊢ Splits (RingHom.id Fq) (∏ c ∈ Finset.univ, X - C c) + apply Polynomial.splits_prod + intro c _ + apply Polynomial.splits_X_sub_C + + have hQ_card_roots: Q.roots.card = Fintype.card Fq := by + rw [h_roots_Q] + exact rfl + + have natDegree_Q: Q.natDegree = Fintype.card Fq := by + unfold Q + have degLt: (X: Fq[X]).natDegree < ((X: Fq[X]) ^ Fintype.card Fq).natDegree := by + rw [Polynomial.natDegree_X_pow] + rw [Polynomial.natDegree_X] + exact h_Fq_card_gt_1 + rw [Polynomial.natDegree_sub_eq_left_of_natDegree_lt degLt] + rw [Polynomial.natDegree_X_pow] + + have hQ_splits : Q.Splits (RingHom.id Fq) := by + unfold Q + apply Polynomial.splits_iff_card_roots.mpr + -- ⊢ (X ^ Fintype.card Fq - X).roots.card = (X ^ Fintype.card Fq - X).natDegree + rw [hQ_card_roots] + rw [natDegree_Q] + + -- 4. CONCLUSION: Since P and Q are monic, split, and have the same roots, they are equal. + have hP_eq_prod: P = (Multiset.map (fun a ↦ Polynomial.X - Polynomial.C a) P.roots).prod := by + apply Polynomial.eq_prod_roots_of_monic_of_splits_id hP_monic hP_splits + have hQ_eq_prod: Q = (Multiset.map (fun a ↦ Polynomial.X - Polynomial.C a) Q.roots).prod := by + apply Polynomial.eq_prod_roots_of_monic_of_splits_id hQ_monic hQ_splits + rw [hP_eq_prod, hQ_eq_prod, h_roots_eq] + +variable {L: Type*} [CommRing L] [Algebra Fq L] +/-- +The identity `∏_{c ∈ Fq} (X - c) = X^q - X` also holds in the polynomial ring `L[X]`, +where `L` is any field extension of `Fq`. +-/ +theorem prod_X_sub_C_eq_X_pow_card_sub_X_in_L + (h_Fq_card_gt_1: Fintype.card Fq > 1): + (∏ c ∈ (Finset.univ : Finset Fq), (Polynomial.X - Polynomial.C (algebraMap Fq L c))) = + Polynomial.X^(Fintype.card Fq) - Polynomial.X := by + + -- Let `f` be the ring homomorphism from `Fq` to `L`. + let f := algebraMap Fq L + + -- The goal is an equality in L[X]. We will show that this equality is just + -- the "mapped" version of the equality in Fq[X], which we already proved. + + -- First, show the LHS of our goal is the mapped version of the LHS of the base theorem. + have h_lhs_map : (∏ c ∈ (Finset.univ : Finset Fq), (Polynomial.X - Polynomial.C (f c))) = + Polynomial.map f (∏ c ∈ (Finset.univ : Finset Fq), (Polynomial.X - Polynomial.C c)) := by + -- `map` is a ring homomorphism, so it distributes over products, subtraction, X, and C. + rw [Polynomial.map_prod] + congr! with c + rw [Polynomial.map_sub, Polynomial.map_X, Polynomial.map_C] + + -- Next, show the RHS of our goal is the mapped version of the RHS of the base theorem. + have h_rhs_map : (Polynomial.X^(Fintype.card Fq) - Polynomial.X) = + Polynomial.map f (Polynomial.X^(Fintype.card Fq) - Polynomial.X) := by + -- `map` also distributes over powers. + rw [Polynomial.map_sub, Polynomial.map_pow, Polynomial.map_X] + + -- Now, we can rewrite our goal using these facts. + rw [h_lhs_map, h_rhs_map] + -- ⊢ map f (∏ c, (X - C c)) = map f (X ^ Fintype.card Fq - X) + + -- The goal is now `map f (LHS_base) = map f (RHS_base)`. + -- This is true if `LHS_base = RHS_base`, which is exactly our previous theorem. + rw [prod_X_sub_C_eq_X_pow_card_sub_X h_Fq_card_gt_1] + +/-- +The identity `∏_{c ∈ Fq} (X - c) = X^q - X` also holds in the polynomial ring `L[X]`, +where `L` is any field extension of `Fq`. +-/ +theorem prod_poly_sub_C_eq_poly_pow_card_sub_poly_in_L + (h_Fq_card_gt_1: Fintype.card Fq > 1) (p: L[X]): + (∏ c ∈ (Finset.univ : Finset Fq), (p - Polynomial.C (algebraMap Fq L c))) = + p^(Fintype.card Fq) - p := by + + -- The strategy is to take the known identity for the polynomial X and substitute + -- X with the arbitrary polynomial p. This substitution is formally known as + -- polynomial composition (`Polynomial.comp`). + + -- Let `q` be the cardinality of the field Fq for brevity. + let q := Fintype.card Fq + + -- From the previous theorem, we have the base identity in L[X]: + -- `(∏ c, (X - C c)) = X^q - X` + let base_identity := prod_X_sub_C_eq_X_pow_card_sub_X_in_L (L:=L) h_Fq_card_gt_1 + + -- `APPROACH: f = g => f.comp(p) = g.comp(p)` + have h_composed_eq : (∏ c ∈ (Finset.univ : Finset Fq), (X - C (algebraMap Fq L c))).comp p + = ((X:L[X])^q - X).comp p := by + rw [base_identity] + + -- Now, we simplify the left-hand side (LHS) and right-hand side (RHS) of `h_composed_eq` + -- to show they match the goal. + + -- First, simplify the LHS: `(∏ c, (X - C c)).comp(p)` + have h_lhs_simp : (∏ c ∈ (Finset.univ : Finset Fq), (X - C (algebraMap Fq L c))).comp p = + ∏ c ∈ (Finset.univ : Finset Fq), (p - C (algebraMap Fq L c)) := by + -- Use the theorem that composition distributes over products + rw [Polynomial.prod_comp] + apply Finset.prod_congr rfl + intro c _ + --⊢ (X - C ((algebraMap Fq L) c)).comp p = p - C ((algebraMap Fq L) c) + rw [Polynomial.sub_comp, Polynomial.X_comp, Polynomial.C_comp] + + -- Next, simplify the RHS: `(X^q - X).comp(p)` + have h_rhs_simp : ((X:L[X])^q - X).comp p = p^q - p := by + -- Composition distributes over subtraction and powers. + rw [Polynomial.sub_comp, Polynomial.pow_comp, Polynomial.X_comp] + + -- By substituting our simplified LHS and RHS back into `h_composed_eq`, + -- we arrive at the desired goal. + rw [h_lhs_simp, h_rhs_simp] at h_composed_eq + exact h_composed_eq +end FieldVanishingPolynomialEquality + +section FrobeniusPolynomialIdentity +-- NOTE: We lift the Frobenius identity from `Fq[X]` to `L[X]` +/-- +The Frobenius identity for polynomials in `Fq[X]`. +The `q`-th power of a sum of polynomials is the sum of their `q`-th powers. +-/ +theorem frobenius_identity_in_ground_field + {h_Fq_char_prime: Fact (Nat.Prime (ringChar Fq))} (f g : Fq[X]) : + (f + g)^(Fintype.card Fq) = f^(Fintype.card Fq) + g^(Fintype.card Fq) := by + -- The Freshman's Dream `(a+b)^e = a^e + b^e` holds if `e` is a power of the characteristic. + -- First, we establish that `q = p^n` where `p` is the characteristic of `Fq`. + let p := ringChar Fq + have h_p_prime : Fact p.Prime := h_Fq_char_prime + obtain ⟨n, hp, hn⟩ := FiniteField.card Fq p + rw [hn] + -- The polynomial ring `Fq[X]` also has characteristic `p`. + haveI : CharP Fq[X] p := Polynomial.charP + -- Apply the general "Freshman's Dream" theorem for prime powers. + exact add_pow_expChar_pow f g p ↑n + +variable {L: Type*} [CommRing L] [Algebra Fq L] [Nontrivial L] + + +/-- +The lifted Frobenius identity for polynomials in `L[X]`, where `L` is an `Fq`-algebra. +The exponent `q` is the cardinality of the base field `Fq`. +-/ +theorem frobenius_identity_in_algebra {h_Fq_char_prime: Fact (Nat.Prime (ringChar Fq))} + (f g : L[X]): (f + g)^(Fintype.card Fq) = f^(Fintype.card Fq) + g^(Fintype.card Fq) := by + -- The logic is identical. The key is that `L` inherits the characteristic of `Fq`. + let p := ringChar Fq + haveI : Fact p.Prime := h_Fq_char_prime + obtain ⟨n, hp, hn⟩ := FiniteField.card Fq p + + -- Rewrite the goal using `q = p^n`. + rw [hn] + + -- Since `L` is an `Fq`-algebra, it must have the same characteristic `p`. + have h_charP_Fq: CharP Fq p := by + simp only [p] + exact ringChar.charP Fq + + have h_charP_L: CharP L p := by + have h_inj: Function.Injective (algebraMap Fq L) := by + exact RingHom.injective (algebraMap Fq L) -- L must be nontrivial + have h_charP_L := (RingHom.charP_iff (A:=L) (f:=algebraMap Fq L) (H:=h_inj) p).mp h_charP_Fq + exact h_charP_L + -- The polynomial ring `L[X]` also has characteristic `p`. + have h_charP_L_X: CharP L[X] p := by + exact Polynomial.charP + exact add_pow_expChar_pow f g p ↑n + +omit [Fintype Fq] [Nontrivial L] in +/-- + Restricting a linear map on polynomial composition to a linear map on polynomial evaluation. +-/ +theorem linear_map_of_comp_to_linear_map_of_eval (f: L[X]) + (h_f_linear : IsLinearMap (R:=Fq) (M:=L[X]) (M₂:=L[X]) (f:=fun inner_p ↦ f.comp inner_p)) : + IsLinearMap (R:=Fq) (M:=L) (M₂:=L) (f:=fun x ↦ f.eval x) := by + constructor + · intro x y + -- ⊢ eval (x + y) f = eval x f + eval y f + have h_comp_add := h_f_linear.map_add + have h_spec := h_comp_add (C x) (C y) + have h_lhs_simp : f.comp (C x + C y) = C (f.eval (x + y)) := by + rw [←Polynomial.C_add, Polynomial.comp_C] + have h_rhs_simp : f.comp (C x) + f.comp (C y) = C (f.eval x + f.eval y) := by + rw [Polynomial.comp_C, Polynomial.comp_C, Polynomial.C_add] + rw [h_lhs_simp, h_rhs_simp] at h_spec + exact (Polynomial.C_injective) h_spec + · intro k x + have h_comp_smul := h_f_linear.map_smul + have h_spec := h_comp_smul k (C x) + have h_lhs_simp : f.comp (k • C x) = C (f.eval (k • x)) := by + rw [Polynomial.smul_C, Polynomial.comp_C] + have h_rhs_simp : k • f.comp (C x) = C (k • f.eval x) := by + rw [Polynomial.comp_C, Polynomial.smul_C] + rw [h_lhs_simp, h_rhs_simp] at h_spec + exact (Polynomial.C_injective) h_spec +end FrobeniusPolynomialIdentity + +end FinFieldPolyHelper + +section MonomialBasis + +universe u + +-- Fix a binary field `L` of degree `r` over its prime subfield `F₂` +variable {L : Type u} [Field L] [Fintype L] +variable {F₂ : Type u} [Field F₂] [Fintype F₂] (hF₂ : Fintype.card F₂ = 2) +variable [Algebra F₂ L] +-- We assume an `F₂`-basis for `L`, denoted by `(β₀, β₁, ..., β_{r-1})`, indexed by natural numbers. +variable (β : Nat → L) (hβ_lin_indep : LinearIndependent (R:=F₂) (M:=L) (v:=β)) + +omit [Fintype L] in +@[simp] +theorem sum_degreeLT_monomial_eq_subtype {n: ℕ} (p : L⦃< n⦄[X]) : + (⟨p.val.sum (fun n a => Polynomial.monomial n a), by + -- degree of sum is degree of p.val, which is < n + rw [Polynomial.sum_monomial_eq p.val] + exact p.property + ⟩ : L⦃< n⦄[X]) = p := + -- `span_le` changes the goal to showing every vector in the generating set + Subtype.eq (Polynomial.sum_monomial_eq p.val) + +noncomputable def monomialBasisOfDegreeLT {n: ℕ} : Basis (Fin n) L (L⦃< n⦄[X]) := by + set monomials_in_submodule:Fin n → L⦃< n⦄[X] := fun ⟨i, hi⟩ => + ⟨monomial (R:=L) (n:=i) (a:=1), by + simp only [Polynomial.mem_degreeLT] + simp only [ne_eq, one_ne_zero, not_false_eq_true, degree_monomial, Nat.cast_lt] + exact hi + ⟩ + + have h_li_submodule : LinearIndependent L monomials_in_submodule := by + rw [linearIndependent_iff] -- alternative: linearIndependent_powers_iff_aeval + intro l hl -- l : Fin n →₀ L, hl : (Finsupp.linearCombination L monomials_in_submodule) l = 0 + apply Finsupp.ext -- ⊢ ∀ (a : Fin n), l a = 0 a + intro i + have h_poly_eq_zero : (Finsupp.linearCombination L monomials_in_submodule l).val = 0 := by + -- converts the equality of subtypes `hl` to an equality of values + exact Subtype.ext_iff.mp hl + + have h_coeff_eq_li : + coeff (Finsupp.linearCombination L monomials_in_submodule l).val i = l i := by + -- `span_le` changes the goal to showing every vector in the generating set + set v := monomials_in_submodule + simp only [v, monomials_in_submodule, Subtype.coe_eq_iff, Subtype.coe_mk, + monomial_one_right_eq_X_pow, smul_eq_mul] + rw [Finsupp.linearCombination_apply] + simp only [SetLike.mk_smul_mk, monomials_in_submodule, v] + conv => + lhs + simp only [Finsupp.sum, AddSubmonoidClass.coe_finset_sum, finset_sum_coeff, coeff_smul, + coeff_X_pow, smul_eq_mul, mul_ite, mul_one, mul_zero, monomials_in_submodule, v] + -- ⊢ (∑ x ∈ l.support, if ↑i = ↑x then l x else 0) = l i + simp_rw [Fin.val_eq_val, eq_comm] + -- ⊢ l i = ∑ x ∈ l.support, if i = x then l x else 0 + by_cases h_i_in_l_support : i ∈ l.support + -- `span_le` changes the goal to showing every vector in the generating set + · rw [Finset.sum_ite_eq_of_mem] -- ⊢ represent l i as a sum over l.support + exact h_i_in_l_support + · have l_i_is_zero : l i = 0 := by + exact Finsupp.notMem_support_iff.mp h_i_in_l_support + simp only [l_i_is_zero, Finset.sum_ite_eq, Finsupp.mem_support_iff, ne_eq, + not_true_eq_false, ↓reduceIte, monomials_in_submodule, v] + rw [h_poly_eq_zero] at h_coeff_eq_li + simp only [coeff_zero] at h_coeff_eq_li + exact h_coeff_eq_li.symm + + have h_span_submodule : Submodule.span (R:=L) (Set.range monomials_in_submodule) = ⊤ :=by + apply le_antisymm + · -- First goal: Prove that your span is a subspace of the whole space. + -- `span_le` changes the goal to showing every vector in the generating set + rw [Submodule.span_le] + -- ⊢ ↑(Finset.image (fun n ↦ X ^ n) (Finset.range n)) ⊆ ↑L⦃< n⦄[X] + rw [Set.range_subset_iff] -- simp [Set.image_subset_image_iff] + intro j -- `j` is an index for your family, e.g., `j : Fin n` + -- ⊢ monomials_in_submodule j ∈ ↑⊤ + simp only [Submodule.top_coe, Set.mem_univ, monomials_in_submodule] + · -- Second goal: Prove the whole space is a subspace of your span. + rw [Submodule.top_le_span_range_iff_forall_exists_fun] + intro p + have hp := p.property + have h_deg_p: p.val.degree < n := by + rw [Polynomial.mem_degreeLT] at hp + exact hp + -- ⊢ ∃ c, ∑ i, c i • monomials_in_submodule i = p + set c: Fin n → L := fun i => p.val.coeff i + have h_c: c = fun (i: Fin n) => p.val.coeff i := by rfl + use c + -- ⊢ ∑ i, c i • monomials_in_submodule i = p + apply Subtype.ext -- bring equality from ↑L⦃< n⦄[X] to L[X] + rw [←sum_degreeLT_monomial_eq_subtype (p:=p)] -- convert ↑p in rhs into (↑p).sum form + conv => + rhs -- ↑⟨(↑p).sum fun n a ↦ (monomial n) a, ⋯⟩ + -- we have to convert (↑p).sum into Fin n → L form using Polynomial.sum_fin + simp only [monomial_zero_right, implies_true, ←Polynomial.sum_fin (hn := h_deg_p)] + -- ⊢ ↑(∑ i, c i • monomials_in_submodule i) = ∑ i, (monomial ↑i) ((↑p).coeff ↑i) + rw [AddSubmonoidClass.coe_finset_sum] -- bring both sides back to L[X] + apply Finset.sum_congr rfl + intro ⟨i, hi_finN⟩ hi + simp only [SetLike.mk_smul_mk, c, monomials_in_submodule] + -- ⊢ (↑p).coeff i • (monomial i) 1 = (monomial i) ((↑p).coeff i) + simp only [monomial_one_right_eq_X_pow] + rw [←Polynomial.smul_X_eq_monomial] + + apply Basis.mk (R:=L) (M:=degreeLT (R:=L) (n:=n)) + exact h_li_submodule + exact le_of_eq (hab:=h_span_submodule.symm) + +omit [Fintype L] in +theorem finrank_degreeLT_n (n: ℕ) : Module.finrank L (L⦃< n⦄[X]) = n := by + have monomial_basis: Basis (Fin n) (R:=L) (M:=L⦃< n⦄[X]) := monomialBasisOfDegreeLT (n:=n) + rw [Module.finrank_eq_card_basis monomial_basis] + rw [Fintype.card_fin] + +instance finiteDimensional_degreeLT {n : ℕ} (h_n_pos: 0 < n) : + FiniteDimensional (K:=L) (V:=L⦃< n⦄[X]) := by + have h : 0 < Module.finrank L (L⦃< n⦄[X]) := by + rw [finrank_degreeLT_n n] + omega + exact FiniteDimensional.of_finrank_pos h + +end MonomialBasis +end AdditiveNTT diff --git a/ArkLib/Data/FieldTheory/BinaryTowerField/Basic.lean b/ArkLib/Data/FieldTheory/BinaryField/Tower/Basic.lean similarity index 99% rename from ArkLib/Data/FieldTheory/BinaryTowerField/Basic.lean rename to ArkLib/Data/FieldTheory/BinaryField/Tower/Basic.lean index 0a174d655..edd6ccbea 100644 --- a/ArkLib/Data/FieldTheory/BinaryTowerField/Basic.lean +++ b/ArkLib/Data/FieldTheory/BinaryField/Tower/Basic.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao, Chung Thai Nguyen -/ -import ArkLib.Data.FieldTheory.BinaryTowerField.Prelude +import ArkLib.Data.FieldTheory.BinaryField.Tower.Prelude /-! # Binary Tower Fields diff --git a/ArkLib/Data/FieldTheory/BinaryField/Tower/Impl.lean b/ArkLib/Data/FieldTheory/BinaryField/Tower/Impl.lean new file mode 100644 index 000000000..32fe6114d --- /dev/null +++ b/ArkLib/Data/FieldTheory/BinaryField/Tower/Impl.lean @@ -0,0 +1,1964 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Chung Thai Nguyen +-/ + +import ArkLib.Data.FieldTheory.BinaryField.Tower.Basic +import ArkLib.Data.Classes.DCast + +/-! +# Computable Binary Tower Fields + +This file provides executable implementations for binary tower fields + +## Main Definitions + +- `ConcreteBinaryTower k`: the concrete binary tower fields of level k whose elements are + reprensented via bit vectors of size 2^k + +## TODOs +- Proof of mul_inv_cancel +- Optimization of multiplication via lookup +- Proof of isomorphism with the abstract binary tower fields and derive theorems about multilinear + basis + +## References + +- [Wie88] Doug Wiedemann. "An Iterated Quadratic Extension of GF(2)" In: The Fibonacci Quarterly + 26.4 (1988), pp. 290–295. + +- [FP97] John L. Fan and Christof Paar. "On efficient inversion in tower fields of characteristic + two". In: Proceedings of IEEE International Symposium on Information Theory. 1997. +-/ + +namespace BinaryTower +namespace ConcreteDefinition +open Polynomial + +section BaseDefinitions + +def ConcreteBinaryTower : ℕ → Type := fun k => BitVec (2 ^ k) + +section BitVecDCast +instance BitVec.instDCast : DCast Nat BitVec where + dcast h := BitVec.cast h + dcast_id := by + intro n + funext bv -- bv: BitVec n + rw [BitVec.cast_eq, id_eq] + +theorem BitVec.bitvec_cast_eq_dcast {n m : Nat} (h : n = m) (bv : BitVec n) : + BitVec.cast h bv = DCast.dcast h bv := by + simp only [BitVec.cast, BitVec.instDCast] + +theorem BitVec.dcast_id {n : Nat} (bv : BitVec n) : + DCast.dcast (Eq.refl n) bv = bv := by + simp only [BitVec.instDCast.dcast_id, id_eq] + +theorem BitVec.dcast_bitvec_eq {l r val : ℕ} (h_width_eq : l = r): + dcast h_width_eq (BitVec.ofNat l val) = BitVec.ofNat r val := by + subst h_width_eq + rw [dcast_eq] + +@[simp] theorem BitVec.cast_zero {n m : ℕ} (h : n = m) : BitVec.cast h 0 = 0 := rfl +@[simp] theorem BitVec.cast_one {n m : ℕ} (h : n = m) : BitVec.cast h 1 = 1#m := by + simp only [BitVec.ofNat_eq_ofNat, BitVec.cast_ofNat] +@[simp] theorem BitVec.dcast_zero {n m : ℕ} (h : n = m) : DCast.dcast h (0#n) = 0#m := rfl +@[simp] theorem BitVec.dcast_one {n m : ℕ} (h : n = m) : DCast.dcast h (1#n) = 1#m := by + rw [←BitVec.bitvec_cast_eq_dcast] + exact BitVec.cast_one (h:=h) + +theorem BitVec.dcast_bitvec_toNat_eq {w w2 : ℕ} (x : BitVec w) (h_width_eq : w = w2): + BitVec.toNat x = BitVec.toNat (dcast (h_width_eq) x) := by + subst h_width_eq + rw [dcast_eq] + +theorem BitVec.dcast_bitvec_eq_zero {l r : ℕ} (h_width_eq : l = r): + dcast (h_width_eq) 0#(l) = 0#(r) := by + exact BitVec.dcast_bitvec_eq (l:=l) (r:=r) (val:=0) (h_width_eq:=h_width_eq) + +theorem BitVec.dcast_bitvec_extractLsb_eq {w hi1 lo1 hi2 lo2 : ℕ} + (x : BitVec w) (h_lo_eq : lo1 = lo2) + (h_width_eq : hi1 - lo1 + 1 = hi2 - lo2 + 1): + dcast h_width_eq (BitVec.extractLsb (hi:=hi1) (lo:=lo1) x) + = BitVec.extractLsb (hi:=hi2) (lo:=lo2) (x) := by + unfold BitVec.extractLsb BitVec.extractLsb' + have xToNat_eq: x.toNat >>> lo1 = x.toNat >>> lo2 := by rw [h_lo_eq] + rw [xToNat_eq] + rw! [h_width_eq] + rw [BitVec.dcast_id] + +theorem BitVec.dcast_dcast_bitvec_extractLsb_eq {w hi lo : ℕ} (x : BitVec w) + (h_width_eq : w = hi - lo + 1): dcast h_width_eq (dcast (h_width_eq.symm) + (BitVec.extractLsb (hi:=hi) (lo:=lo) x)) = BitVec.extractLsb (hi:=hi) (lo:=lo) x := by + simp only [dcast, BitVec.cast_cast, BitVec.cast_eq] + +theorem BitVec.eq_mp_eq_dcast {w w2 : ℕ} (x : BitVec w) (h_width_eq : w = w2) + (h_bitvec_eq : BitVec w = BitVec w2 := by rw [h_width_eq]): + Eq.mp (h:=h_bitvec_eq) (a:=x) = dcast (h_width_eq) (x) := by + rw [eq_mp_eq_cast] -- convert Eq.mp into root.cast + rw [dcast_eq_root_cast] + +theorem BitVec.extractLsb_concat_hi {hi_size lo_size : ℕ} (hi : BitVec hi_size) + (lo : BitVec lo_size) (h_hi : hi_size > 0): + BitVec.extractLsb (hi:=hi_size + lo_size - 1) (lo:=lo_size) + (BitVec.append (msbs:=hi) (lsbs:=lo)) = dcast (by + rw [←Nat.sub_add_comm (by omega), Nat.sub_add_cancel (by omega), Nat.add_sub_cancel] + ) hi := by + simp only [BitVec.extractLsb] + simp [BitVec.extractLsb'_append_eq_of_le (Nat.le_refl lo_size)] + simp only [BitVec.extractLsb', Nat.shiftRight_zero, BitVec.ofNat_toNat] + have hi_eq: hi = BitVec.ofNat hi_size (hi.toNat) := by rw [BitVec.ofNat_toNat, BitVec.setWidth_eq] + rw [hi_eq] + rw [dcast_bitvec_eq] -- un-dcast + simp only [BitVec.ofNat_toNat, BitVec.setWidth_eq] + +theorem BitVec.extractLsb_concat_lo {hi_size lo_size : ℕ} (hi : BitVec hi_size) + (lo : BitVec lo_size) (h_lo: lo_size > 0): BitVec.extractLsb (hi:=lo_size-1) (lo:=0) + (BitVec.append (msbs:=hi) (lsbs:=lo)) = dcast (by + rw [←Nat.sub_add_comm (h:=by omega), Nat.sub_add_cancel (h:=by omega), Nat.sub_zero] + ) lo := by + simp only [BitVec.extractLsb] + simp + simp only [BitVec.extractLsb', Nat.shiftRight_zero, BitVec.ofNat_toNat] + have lo_eq: lo = BitVec.ofNat lo_size (lo.toNat) := by rw [BitVec.ofNat_toNat, BitVec.setWidth_eq] + rw [lo_eq] + rw [dcast_bitvec_eq] -- un-dcast + simp only [BitVec.ofNat_toNat, BitVec.setWidth_eq] + -- ⊢ BitVec.setWidth (n - 1 + 1) (hi ++ lo) = BitVec.setWidth (n - 1 + 1) lo + rw [BitVec.setWidth_append] + have h_le: lo_size - 1 + 1 ≤ lo_size := by rw [Nat.sub_add_cancel (by omega)] + simp only [h_le, ↓reduceDIte] + +theorem Nat.shiftRight_eq_sub_mod_then_div_two_pow {n lo_len : ℕ}: + n >>> lo_len = (n - n % 2^lo_len) / 2^lo_len := by + rw [Nat.shiftRight_eq_div_pow] + rw (occs := .pos [1]) [Nat.div_eq_sub_mod_div] + +theorem Nat.shiftRight_lo_mod_2_pow_hi_shiftLeft_lo (n hi_len lo_len : ℕ) + (h_n: n < 2^(hi_len + lo_len)): + (((n >>> lo_len) % (2^hi_len)) <<< lo_len) = (n - n % 2^lo_len) := by + rw [Nat.shiftLeft_eq] + have n_shr_lo_mod_2_pow_hi: (n >>> lo_len) % 2 ^ hi_len = n >>> lo_len := by + have shr_lt: n >>> lo_len < 2^hi_len := by + rw [Nat.shiftRight_eq_div_pow] + have n_lt: n < 2 ^ lo_len * 2 ^ hi_len := by + rw [←Nat.pow_add] + rw [Nat.add_comm] + exact h_n + exact Nat.div_lt_of_lt_mul n_lt + have shr_mod: (n >>> lo_len) % 2 ^ hi_len = n >>> lo_len := by + rw [Nat.mod_eq_of_lt] + exact shr_lt + rw [shr_mod] + + rw [n_shr_lo_mod_2_pow_hi] -- ⊢ n = n >>> lo_len * 2 ^ lo_len + n % 2 ^ lo_len + rw [Nat.shiftRight_eq_div_pow] + -- ⊢ n = n / 2 ^ lo_len * 2 ^ lo_len + n % 2 ^ lo_len + have n_div_eq: (n / 2^lo_len) = (n - n % 2^lo_len) / 2^lo_len := by + exact Nat.div_eq_sub_mod_div + rw [n_div_eq] + have h_div1: 2 ^ lo_len ∣ n - n % 2 ^ lo_len := by + exact Nat.dvd_sub_mod n + rw [Nat.div_mul_cancel h_div1] + +theorem Nat.reconstruct_from_hi_and_lo_parts (n hi_len lo_len : ℕ) (h_n: n < 2^(hi_len + lo_len)): + n = (((n >>> lo_len) % (2^hi_len)) <<< lo_len) + (n % (2^lo_len)) := by + rw [Nat.shiftRight_lo_mod_2_pow_hi_shiftLeft_lo (n:=n) (hi_len:=hi_len) + (lo_len:=lo_len) (h_n:=h_n)] + -- ⊢ n = n - n % 2 ^ lo_len + n % 2 ^ lo_len + have h_mod_le: n % 2 ^ lo_len ≤ n := by + exact Nat.mod_le n (2^lo_len) + rw [Nat.sub_add_cancel h_mod_le] + +theorem Nat.reconstruct_from_hi_and_lo_parts_or_ver (n hi_len lo_len : ℕ) + (h_n: n < 2^(hi_len + lo_len)): + n = (((n >>> lo_len) % (2^hi_len)) <<< lo_len) ||| (n % (2^lo_len)) := by + rw (occs := .pos [1]) [Nat.reconstruct_from_hi_and_lo_parts (n:=n) (hi_len:=hi_len) + (lo_len:=lo_len) (h_n:=h_n)] + -- ⊢ (n >>> lo_len % 2 ^ hi_len) <<< lo_len + n % 2 ^ lo_len + -- = (n >>> lo_len % 2 ^ hi_len) <<< lo_len ||| n % 2 ^ lo_len + rw [Nat.shiftLeft_add_eq_or_of_lt (a:=n >>> lo_len % 2 ^ hi_len) (b:=n % 2 ^ lo_len) + (b_lt:=by exact Nat.mod_lt n (by norm_num))] + +theorem BitVec.eq_append_iff_extract {lo_size hi_size: ℕ} (lo: BitVec lo_size) (hi: BitVec hi_size) + (h_hi_gt_0: hi_size > 0) (h_lo_gt_0: lo_size > 0) (x: BitVec (hi_size + lo_size)): + x = dcast (by rfl) (BitVec.append (msbs:=hi) (lsbs:=lo)) ↔ + hi = dcast (by omega) (BitVec.extractLsb (hi:=hi_size+lo_size-1) (lo:=lo_size) x) ∧ + lo = dcast (by omega) (BitVec.extractLsb (hi:=lo_size-1) (lo:=0) x) := by + -- idea: use BitVec.extractLsb_concat_hi and BitVec.extractLsb_concat_lo + have h_hi := dcast_symm (hb:=(BitVec.extractLsb_concat_hi (hi:=hi) (lo:=lo) + (h_hi:=h_hi_gt_0)).symm) + have h_lo := dcast_symm (hb:=(BitVec.extractLsb_concat_lo (hi:=hi) (lo:=lo) + (h_lo:=h_lo_gt_0)).symm) + constructor + · intro h_x -- x = dcast ⋯ (hi.append lo) + rw [h_x] + apply And.intro + · -- rewrite only the first goal + rw (occs := .pos [1]) [←h_hi] + unfold BitVec.extractLsb BitVec.extractLsb' + rw [←dcast_bitvec_toNat_eq] -- cancel INNER dcast via BitVec.toNat + · rw (occs := .pos [1]) [←h_lo] + unfold BitVec.extractLsb BitVec.extractLsb' + rw [←dcast_bitvec_toNat_eq] -- cancel INNER dcast via BitVec.toNat + · -- Right to left + intro h_extract_eq_parts + obtain ⟨h_hi_eq_extract, h_lo_eq_extract⟩ := h_extract_eq_parts + apply BitVec.eq_of_toNat_eq (x:=x) (y:=(BitVec.append (n:=hi_size) + (m:=lo_size) (msbs:=hi) (lsbs:=lo))) + rw [BitVec.append_eq, BitVec.toNat_append] + -- Convert goal to Nat equality: ⊢ x.toNat = hi.toNat <<< lo_size ||| lo.toNat + rw [h_hi_eq_extract, h_lo_eq_extract] + -- convert all BitVec.toNat (dcast bitvec) to Nat + rw [←BitVec.dcast_bitvec_toNat_eq, ←BitVec.dcast_bitvec_toNat_eq] + rw [BitVec.extractLsb, BitVec.extractLsb, BitVec.extractLsb', BitVec.extractLsb'] + rw [Nat.shiftRight_zero] + rw [BitVec.ofNat_toNat] + rw [BitVec.toNat_ofNat, Nat.sub_zero, BitVec.toNat_setWidth] + -- ⊢ x.toNat = (x.toNat >>> lo_size % 2 ^ (hi_size + lo_size - 1 - lo_size + 1)) + --- <<< lo_size ||| x.toNat % 2 ^ (lo_size - 1 + 1) + have h0: lo_size ≤ hi_size + lo_size - 1 := by omega + have h1: 2 ^ (hi_size + lo_size - 1 - lo_size + 1) = 2 ^ hi_size := by + rw [←Nat.sub_add_comm (h:=h0)] + simp only [Nat.ofNat_pos, ne_eq, OfNat.ofNat_ne_one, not_false_eq_true, pow_right_inj₀] + omega + have h2: lo_size - 1 + 1 = lo_size := by rw [Nat.sub_add_cancel (h_lo_gt_0)] + rw [h1, h2] -- ⊢ x.toNat = (x.toNat >>> lo_size % 2 ^ hi_size) + --- <<< lo_size ||| x.toNat % 2 ^ lo_size + exact Nat.reconstruct_from_hi_and_lo_parts_or_ver (n:=x.toNat) + (hi_len:=hi_size) (lo_len:=lo_size) (h_n:=by exact BitVec.isLt (x:=x)) + +end BitVecDCast + +def bitVecToString (width : ℕ) (bv : BitVec width) : String := + Fin.foldl width (fun (s : String) (idx : Fin width) => + -- idx is the MSB-oriented loop index (0 to width-1) + -- Fin.rev idx converts it to an LSB-oriented index + s.push (if BitVec.getLsb' bv (Fin.rev idx) then '1' else '0') + ) "" + +def ConcreteBinaryTower.toBitString {k: ℕ} (bv: ConcreteBinaryTower k): String := + bitVecToString (2^k) (bv) + +-- Helper: Bit width for ConcreteBinaryTower +def width (k : ℕ) : ℕ := 2^k + +-- Convert Nat to ConcreteBinaryTower +def fromNat {k : ℕ} (n : Nat) : ConcreteBinaryTower k := + BitVec.ofNat (2^k) n + +@[simp] theorem fromNat_toNat_eq_self {k : ℕ} (bv : BitVec (2 ^ k)) : + (fromNat (BitVec.toNat bv) : ConcreteBinaryTower k) = bv := by + simp only [BitVec.ofNat_toNat, BitVec.setWidth_eq, fromNat, ConcreteBinaryTower] + +instance ConcreteBinaryTower.instDCast_local : DCast ℕ ConcreteBinaryTower where + dcast h_k_eq term_k1 := BitVec.cast (congrArg (fun n => 2^n) h_k_eq) term_k1 + dcast_id := by + intro k_idx; funext x + simp only [id_eq, BitVec.cast, BitVec.ofNatLT_toNat] + +-- Zero element +def zero {k : ℕ} : ConcreteBinaryTower k := BitVec.zero (2^k) + +-- One element +def one {k : ℕ} : ConcreteBinaryTower k := 1#(2^k) + +instance instZeroConcreteBinaryTower (k : ℕ) : Zero (ConcreteBinaryTower k) where + zero := zero +instance instOneConcreteBinaryTower (k : ℕ) : One (ConcreteBinaryTower k) where + one := one + +-- Generic OfNat instance for ConcreteBinaryTower +-- instance (k : ℕ) (n : Nat): OfNat (ConcreteBinaryTower k) n where + -- ofNat := fromNat n + +-- Special element Z_k for each level k +def Z (k : ℕ) : ConcreteBinaryTower k := + if k = 0 then one + else BitVec.ofNat (2^k) (1 <<< 2^(k-1)) + -- fromNat (2^(2^(k-1))) + -- For k > 0, Z_k is defined based on the irreducible polynomial + -- TODO: Define Z_k properly for k > 0 + +-- Algebraic map from level k to level k+1 +def algebraMap {k : ℕ} : ConcreteBinaryTower k → ConcreteBinaryTower (k+1) := + fun x => fromNat (BitVec.toNat x) + +-- Define the irreducible polynomial for level k +noncomputable def definingPoly {k : ℕ} [Semiring (ConcreteBinaryTower k)] : + Polynomial (ConcreteBinaryTower k) := + -- it depends on 'Polynomial.add'', and it does not have executable code + X^2 + (C (Z k) * X + 1) + +-- Basic operations +def add {k : ℕ} (x y : ConcreteBinaryTower k) : ConcreteBinaryTower k := BitVec.xor x y +def neg {k : ℕ} (x : ConcreteBinaryTower k) : ConcreteBinaryTower k := x + +-- Type class instances +instance (k : ℕ) : HAdd (ConcreteBinaryTower k) (ConcreteBinaryTower k) (ConcreteBinaryTower k) + where hAdd := add + +-- Type class instances +instance (k : ℕ) : Add (ConcreteBinaryTower k) where + add := add + +-- instance (k: ℕ) : OfNat (ConcreteBinaryTower k) 0 where + -- ofNat := zero +-- instance (k: ℕ) : OfNat (ConcreteBinaryTower k) 1 where + -- ofNat := one + +theorem sum_fromNat_eq_from_xor_Nat {k : ℕ} (x y : Nat) : + fromNat (k:=k) (x ^^^ y) = fromNat (k:=k) x + fromNat (k:=k) y := by + unfold fromNat + simp only [instHAddConcreteBinaryTower, add, BitVec.xor_eq] + rw [BitVec.ofNat_xor] + +-- Basic lemmas for addition +lemma add_self_cancel {k : ℕ} (a : ConcreteBinaryTower k) : a + a = 0 := by + exact BitVec.xor_self (x:=a) + +lemma add_eq_zero_iff_eq {k : ℕ} (a b : ConcreteBinaryTower k) : a + b = 0 ↔ a = b := by + exact BitVec.xor_eq_zero_iff + +lemma add_assoc {k: ℕ}: ∀ (a b c : ConcreteBinaryTower k), a + b + c = a + (b + c) := by + exact BitVec.xor_assoc + +-- Addition is commutative +lemma add_comm {k : ℕ} (a b : ConcreteBinaryTower k) : a + b = b + a := by + exact BitVec.xor_comm a b + +-- Zero is identity +lemma zero_add {k : ℕ} (a : ConcreteBinaryTower k) : 0 + a = a := by + exact BitVec.zero_xor (x:=a) + +lemma add_zero {k : ℕ} (a : ConcreteBinaryTower k) : a + 0 = a := by + exact BitVec.xor_zero (x:=a) + +-- Negation is additive inverse (in char 2, neg = id) +lemma neg_add_cancel {k : ℕ} (a : ConcreteBinaryTower k) : neg a + a = 0 := by + exact BitVec.xor_self (x:=a) + +lemma if_self_rfl {α : Type*} [DecidableEq α] (a b : α) : + (if a = b then b else a) = a := by + by_cases h : a = b + · rw [if_pos h, h] + · rw [if_neg h] + + +-- Isomorphism between ConcreteBinaryTower 0 and GF(2) +-- Ensure GF(2) has decidable equality +noncomputable instance : DecidableEq (GF(2)) := + fun x y => + -- Use the isomorphism between GF(2) and ZMod 2 + let φ : GF(2) ≃ₐ[ZMod 2] ZMod 2 := GaloisField.equivZmodP 2 + -- ZMod 2 has decidable equality + if h : φ x = φ y then + isTrue (by + -- φ is injective, so φ x = φ y implies x = y + exact φ.injective h) + else + isFalse (by + intro h_eq + -- If x = y, then φ x = φ y + apply h + exact congrArg φ h_eq) + +instance (k : ℕ) : DecidableEq (ConcreteBinaryTower k) := + fun x y => + let p := BitVec.toNat x = BitVec.toNat y + let q := x = y + let hp : Decidable p := Nat.decEq (BitVec.toNat x) (BitVec.toNat y) + let h_iff_pq : p ↔ q := (BitVec.toNat_eq).symm -- p is (toNat x = toNat y), q is (x = y) + match hp with + | isTrue (proof_p : p) => + -- We have a proof of p (toNat x = toNat y). We need a proof of q (x = y). + -- h_iff_pq.mp gives p → q. So, (h_iff_pq.mp proof_p) is a proof of q. + isTrue (h_iff_pq.mp proof_p) + | isFalse (nproof_p : ¬p) => + -- We have a proof of ¬p. We need a proof of ¬q (which is q → False). + -- So, assume proof_q : q. We need to derive False. + -- h_iff_pq.mpr gives q → p. So, (h_iff_pq.mpr proof_q) is a proof of p. + -- This contradicts nproof_p. + isFalse (fun (proof_q : q) => nproof_p (h_iff_pq.mpr proof_q)) + +noncomputable def toConcreteBTF0 : GF(2) → ConcreteBinaryTower 0 := + fun x => if decide (x = 0) then zero else one -- it depends on 'instFieldGaloisField' + +noncomputable def fromConcreteBTF0 : ConcreteBinaryTower 0 → (GF(2)) := + fun x => if decide (x = zero) then 0 else 1 + +lemma nsmul_succ {k : ℕ} (n : ℕ) (x : ConcreteBinaryTower k) : + (if ↑n.succ % 2 = 0 then zero else x) = (if ↑n % 2 = 0 then zero else x) + x := by + have h : ↑n.succ % 2 = (↑n % 2 + 1) % 2 := by + simp + have zero_is_0: (zero: ConcreteBinaryTower k) = 0 := by rfl + have h_n_mod_le: n % 2 < 2 := Nat.mod_lt n (by norm_num) + match h_n_mod: n % 2 with + | Nat.zero => + rw [h] + have h1 : (n + 1) % 2 = 1 := by simp [Nat.add_mod, h_n_mod] + simp [h1]; rw [(add_zero x).symm]; rw [←add_assoc, ←add_comm]; + rw [zero_is_0]; + rw [zero_add]; + rw [add_zero] + | Nat.succ x' => -- h_n_mod : n % 2 = x'.succ + match x' with + | Nat.zero => -- h_n_mod : n % 2 = Nat.zero.succ => h_n_mod automatically updates + rw [h] + have h_n_mod_eq_1 : n % 2 = 1 := by rw [h_n_mod] + rw [h_n_mod_eq_1] + have h1 : (1 + 1 : ℤ) % 2 = 0 := by norm_num + simp [h1] + rw [zero_is_0, add_self_cancel] + | Nat.succ _ => -- h_n_mod : n % 2 = n✝.succ.succ + have h_n_mod_ge_2 : n % 2 ≥ 2 := by + rw [h_n_mod] + apply Nat.le_add_left + rw [h_n_mod] at h_n_mod_le + linarith + +lemma zsmul_succ {k : ℕ} (n : ℕ) (x : ConcreteBinaryTower k) : + (if (↑n.succ: ℤ) % 2 = 0 then zero else x) = (if (↑n: ℤ) % 2 = 0 then zero else x) + x := by + norm_cast + exact nsmul_succ n x + +lemma neg_mod_2_eq_0_iff_mod_2_eq_0 {n : ℤ} : (-n) % 2 = 0 ↔ n % 2 = 0 := by + constructor + · intro h + apply Int.dvd_iff_emod_eq_zero.mp + apply Int.dvd_neg.mp + exact Int.dvd_iff_emod_eq_zero.mpr h + · intro h + apply Int.dvd_iff_emod_eq_zero.mp + apply Int.dvd_neg.mpr + exact Int.dvd_iff_emod_eq_zero.mpr h + +-- Int.negSucc n = -(n+1) +lemma zsmul_neg' {k : ℕ} (n : ℕ) (a : ConcreteBinaryTower k) : + (if ((Int.negSucc n):ℤ) % (2:ℤ) = (0:ℤ) then zero else a) = + neg (if (↑n.succ: ℤ) % (2:ℤ) = (0:ℤ) then zero else a) := +by + have negSucc_eq_minus_of_n_plus_1: Int.negSucc n = -(n + 1) := by rfl + rw [negSucc_eq_minus_of_n_plus_1] + have n_succ_eq_n_plus_1: (↑n.succ: ℤ) = (↑n: ℤ) + 1 := by rfl + rw [n_succ_eq_n_plus_1] + -- Split on two cases of the `if ... else` statement + by_cases h : (n + 1) % 2 = 0 + { -- h: (n + 1) % 2 = 0 + have n_succ_mod_2_eq_0: ((↑n: ℤ) + 1) % 2 = 0 := by norm_cast + rw [n_succ_mod_2_eq_0] + -- ⊢ (if -(↑n + 1) % 2 = 0 then zero else a) = neg (if 0 = 0 then zero else a) + have neg_n_succ_mod_2_eq_0: (-((↑n: ℤ) + 1)) % 2 = 0 := by + exact (neg_mod_2_eq_0_iff_mod_2_eq_0 (n:=((n: ℤ) + 1))).mpr n_succ_mod_2_eq_0 + -- ⊢ (if 0 = 0 then zero else a) = neg (if 0 = 0 then zero else a) + rw [neg_n_succ_mod_2_eq_0] + simp + rfl + } + { -- h : ¬(n + 1) % 2 = 0 + push_neg at h -- h : (n + 1) % 2 ≠ 0 + have n_succ_mod_2_ne_1: ((↑n: ℤ) + 1) % 2 = 1 := by + have h_mod : (n + 1) % 2 = 1 := by -- prove the ℕ-version of the hypothesis & use norm_cast + have tmp := Nat.mod_two_eq_zero_or_one (n:=n+1) + match tmp with + | Or.inl h_mod_eq_0 => + rw [h_mod_eq_0] + contradiction + | Or.inr h_mod_eq_1 => + rw [h_mod_eq_1] + norm_cast + rw [n_succ_mod_2_ne_1] + have neg_n_succ_mod_2_ne_0: (-((↑n: ℤ) + 1)) % 2 ≠ 0 := by + by_contra h_eq_0 + have neg_succ_mod_2_eq_0: ((↑n: ℤ) + 1) % 2 = 0 := + (neg_mod_2_eq_0_iff_mod_2_eq_0 (n:=((n: ℤ) + 1))).mp h_eq_0 + have neg_succ_mod_2_eq_0_nat: (n + 1) % 2 = 0 := by + have : ↑((n + 1) % 2) = ((↑n : ℤ) + 1) % 2 := by norm_cast + rw [neg_succ_mod_2_eq_0] at this + apply Int.ofNat_eq_zero.mp -- simp [Int.ofNat_emod] + exact this + rw [neg_succ_mod_2_eq_0_nat] at h + contradiction + -- ⊢ (if -(↑n + 1) % 2 = 0 then zero else a) = neg (if 1 = 0 then zero else a) + rw [if_neg one_ne_zero, neg] + rw [if_neg neg_n_succ_mod_2_ne_0] + } + +instance (k : ℕ) : AddCommGroup (ConcreteBinaryTower k) where + toZero := inferInstance + neg := neg + sub := fun x y => add x y + add_assoc := add_assoc + add_comm := add_comm + zero_add := zero_add + add_zero := add_zero + nsmul := fun n x => if n % 2 = (0:ℕ) then zero else x + zsmul := fun (n:ℤ) x => if n % 2 = 0 then zero else x -- Changed to n : ℤ + neg_add_cancel := neg_add_cancel + nsmul_succ := nsmul_succ + zsmul_succ' := fun n a => zsmul_succ n a + add := add + zsmul_neg' := zsmul_neg' (k := k) + +lemma zero_is_0 {k: ℕ}: (zero (k:=k)) = (0: ConcreteBinaryTower k) := by rfl +lemma one_is_1 {k: ℕ}: (one (k:=k)) = 1 := by rfl +lemma concrete_one_ne_zero {k : ℕ} : (one (k:=k)) ≠ (zero (k:=k)) := by + intro h_eq + have h_toNat_eq : (one (k:=k)).toNat = (zero (k:=k)).toNat := congrArg BitVec.toNat h_eq + simp [one, zero, BitVec.toNat_ofNat] at h_toNat_eq + +instance {k: ℕ}: NeZero (1 : ConcreteBinaryTower k) := by + unfold ConcreteBinaryTower + exact {out := concrete_one_ne_zero (k:=k) } + +section NumericLemmas + +lemma one_le_sub_middle_of_pow2 {k: ℕ} (h_k: 1 ≤ k): 1 ≤ 2 ^ k - 2 ^ (k - 1) := by + have l1: 2 ^ (k - 1 + 1) = 2^k := by congr 1; omega + have res := one_le_sub_consecutive_two_pow (n:=k-1) + rw [l1] at res + exact res + +lemma sub_middle_of_pow2_with_one_canceled {k: ℕ} (h_k: 1 ≤ k): 2 ^ k - 1 - 2 ^ (k - 1) + 1 + = 2 ^ (k - 1) := by + calc 2 ^ k - 1 - 2 ^ (k - 1) + 1 = 2 ^ k - 2 ^ (k - 1) - 1 + 1 := by omega + _ = 2 ^ k - 2 ^ (k - 1) := by + rw [Nat.sub_add_cancel];exact one_le_sub_middle_of_pow2 (h_k:=h_k) + _ = 2^(k-1) * 2 - 2 ^ (k - 1):= by + congr 1 + rw [← Nat.pow_succ] + congr + simp only [Nat.succ_eq_add_one] + rw [Nat.sub_add_cancel] + omega + _ = 2^(k-1) + 2^(k-1) - 2^(k-1) := by rw [Nat.mul_two] + _ = 2^(k-1) := Nat.add_sub_self_left (2 ^ (k - 1)) (2 ^ (k - 1)) + +lemma h_sub_middle {k: ℕ} (h_pos: k > 0): 2 ^ k - 1 - 2 ^ (k - 1) + 1 = 2 ^ (k - 1) := by + exact sub_middle_of_pow2_with_one_canceled (k:=k) (h_k:=h_pos) + +lemma h_middle_sub {k: ℕ}: 2 ^ (k - 1) - 1 - 0 + 1 = 2 ^ (k - 1) := by + rw [Nat.sub_zero, Nat.sub_add_cancel (h:=one_le_two_pow_n (n:=k-1))] + +lemma h_sum_two_same_pow2 {k: ℕ} (h_pos: k > 0): 2 ^ (k - 1) + 2 ^ (k - 1) = 2 ^ k := by + rw [← Nat.mul_two, Nat.mul_comm, Nat.mul_comm, Nat.two_pow_pred_mul_two h_pos] + +end NumericLemmas + +-- Split a field element into high and low parts +def split {k : ℕ} (h : k > 0) (x : ConcreteBinaryTower k) : + ConcreteBinaryTower (k - 1) × ConcreteBinaryTower (k - 1) := + let lo_bits: BitVec (2 ^ (k - 1) - 1 - 0 + 1) := + BitVec.extractLsb (hi := 2 ^ (k - 1) - 1) (lo := 0) x + let hi_bits: BitVec (2 ^ k - 1 - 2 ^ (k - 1) + 1) := + BitVec.extractLsb (hi := 2 ^ k - 1) (lo := 2 ^ (k - 1)) x + have h_lo: 2 ^ (k - 1) - 1 - 0 + 1 = 2 ^ (k - 1) := by + calc 2 ^ (k - 1) - 1 - 0 + 1 = 2 ^ (k - 1) - 1 + 1 := by norm_num + _ = 2^(k-1) := by rw [Nat.sub_add_cancel]; exact one_le_two_pow_n (n:=k-1) + have h_hi := sub_middle_of_pow2_with_one_canceled (k:=k) (h_k:=by omega) + -- Use cast to avoid overuse of fromNat & toNat + let lo: BitVec (2^(k-1)) := dcast h_lo lo_bits + let hi: BitVec (2^(k-1)) := dcast h_hi hi_bits + (hi, lo) + +def join {k : ℕ} (h_pos : k > 0) (hi lo : ConcreteBinaryTower (k-1)) : ConcreteBinaryTower k := by + let res := BitVec.append (msbs:=hi) (lsbs:=lo) + rw [h_sum_two_same_pow2 (h_pos:=h_pos)] at res + exact res + +theorem BitVec.extractLsb_eq_shift_ofNat {n : Nat} (x : BitVec n) (l r : Nat) : + BitVec.extractLsb r l x = BitVec.ofNat (r - l + 1) (x.toNat >>> l) := by + unfold BitVec.extractLsb BitVec.extractLsb' + rfl + +theorem setWidth_eq_ofNat_mod {n num_bits : Nat} (x : BitVec n) : + BitVec.setWidth num_bits x = BitVec.ofNat num_bits (x.toNat % 2 ^ num_bits) := by + simp only [BitVec.ofNat_toNat, ← BitVec.toNat_setWidth, BitVec.setWidth_eq] + +theorem BitVec.extractLsb_eq_and_pow_2_minus_1_ofNat {n num_bits : Nat} + (h_num_bits: num_bits > 0) (x : BitVec n) : + BitVec.extractLsb (hi:= num_bits-1) (lo := 0) x = + BitVec.ofNat (num_bits - 1 - 0 + 1) (x.toNat &&& (2^num_bits-1)) := by + unfold BitVec.extractLsb BitVec.extractLsb' + simp only [Nat.sub_zero, Nat.shiftRight_zero] + have eq: num_bits - 1 + 1 = num_bits := Nat.sub_add_cancel (h:=h_num_bits) + rw [eq] + have lhs: BitVec.ofNat num_bits x.toNat = BitVec.ofNat num_bits (x.toNat % 2^num_bits) := by + simp only [BitVec.ofNat_toNat, setWidth_eq_ofNat_mod] + have rhs: BitVec.ofNat num_bits (x.toNat &&& 2 ^ num_bits - 1) = + BitVec.ofNat num_bits (x.toNat % 2^num_bits) := by + congr + exact Nat.and_two_pow_sub_one_eq_mod x.toNat num_bits + rw [lhs, ←rhs] + +theorem split_bitvec_eq_iff_fromNat {k : ℕ} (h_pos : k > 0) (x : ConcreteBinaryTower k) + (hi_btf lo_btf : ConcreteBinaryTower (k - 1)): + split h_pos x = (hi_btf, lo_btf) ↔ + (hi_btf = fromNat (k:=k-1) (x.toNat >>> 2^(k-1)) ∧ + lo_btf = fromNat (k:=k-1) (x.toNat &&& (2^(2^(k-1)) - 1))) := by + have lhs_lo_case := BitVec.extractLsb_eq_and_pow_2_minus_1_ofNat (num_bits:=2^(k-1)) + (n:=2^k) (Nat.two_pow_pos (k - 1)) (x:=x) + have rhs_hi_case_bitvec_eq := BitVec.extractLsb_eq_shift_ofNat (n:=2^k) (r:=2^k - 1) + (l:=2^(k-1)) (x:=x) + constructor + · -- Forward direction: split x = (hi_btf, lo_btf) → bitwise operations + intro h_split + unfold split at h_split + have ⟨h_hi, h_lo⟩ := Prod.ext_iff.mp h_split + simp only at h_hi h_lo + have hi_eq: hi_btf = fromNat (k:=k-1) (x.toNat >>> 2^(k-1)) := by + unfold fromNat + rw [←h_hi] + rw [dcast_symm (h_sub_middle h_pos).symm] + rw [rhs_hi_case_bitvec_eq] + rw [BitVec.dcast_bitvec_eq] + have lo_eq: lo_btf = fromNat (k:=k-1) (x.toNat &&& ((2^(2^(k-1)) - 1))) := by + unfold fromNat + rw [←h_lo] + have rhs_lo_case_bitvec_eq := + BitVec.extractLsb_eq_shift_ofNat (n:=2^k) (r:=2^(k-1)-1) (l:=0) (x:=x) + rw [dcast_symm (h_middle_sub).symm] + rw [rhs_lo_case_bitvec_eq] + rw [BitVec.dcast_bitvec_eq] -- remove dcast + rw [←lhs_lo_case] + exact rhs_lo_case_bitvec_eq + exact ⟨hi_eq, lo_eq⟩ + · -- Backward direction: bitwise operations → split x = (hi_btf, lo_btf) + intro h_bits + unfold split + have ⟨h_hi, h_lo⟩ := h_bits + have hi_extract_eq: dcast (h_sub_middle h_pos) (BitVec.extractLsb (hi := 2 ^ k - 1) + (lo := 2 ^ (k - 1)) x) = fromNat (k:=k-1) (x.toNat >>> 2^(k-1)) := by + unfold fromNat + rw [dcast_symm (h_sub_middle h_pos).symm] + rw [rhs_hi_case_bitvec_eq] + rw [BitVec.dcast_bitvec_eq] + + have lo_extract_eq: dcast (h_middle_sub) (BitVec.extractLsb (hi := 2 ^ (k - 1) - 1) + (lo := 0) x) = fromNat (k:=k-1) (x.toNat &&& ((2^(2^(k-1)) - 1))) := by + unfold fromNat + rw [lhs_lo_case] + rw [BitVec.dcast_bitvec_eq] + + simp only [hi_extract_eq, Nat.sub_zero, lo_extract_eq, Nat.and_two_pow_sub_one_eq_mod, h_hi, + h_lo] + +theorem join_eq_iff_dcast_extractLsb {k : ℕ} (h_pos : k > 0) (x : ConcreteBinaryTower k) + (hi_btf lo_btf : ConcreteBinaryTower (k - 1)) : + x = join h_pos hi_btf lo_btf ↔ + (hi_btf = dcast (h_sub_middle h_pos) (BitVec.extractLsb (hi := 2 ^ k - 1) (lo := 2 ^ (k - 1)) x) ∧ + lo_btf = dcast (h_middle_sub) (BitVec.extractLsb (hi := 2 ^ (k - 1) - 1) (lo := 0) x)) := by + constructor + · intro h_join + unfold join at h_join + have h_two_pow_k_minus_1_gt_0: 2 ^ (k - 1) > 0 := by exact Nat.two_pow_pos (k - 1) + have h_x: x = dcast (h_sum_two_same_pow2 h_pos) + (BitVec.append (msbs:=hi_btf) (lsbs:=lo_btf)) := by + rw [h_join] + simp only + rw [BitVec.eq_mp_eq_dcast] + have h_x_symm := dcast_symm (hb:=h_x.symm) + have h_hi : hi_btf = dcast (h_sub_middle h_pos) + (BitVec.extractLsb (hi := 2 ^ k - 1) (lo := 2 ^ (k - 1)) x) := by + have hi_btf_is_dcast_extract := (dcast_symm (hb:=(BitVec.extractLsb_concat_hi (hi:=hi_btf) + (lo:=lo_btf) (h_hi:=h_two_pow_k_minus_1_gt_0)).symm)).symm + rw [hi_btf_is_dcast_extract] + rw [dcast_eq_dcast_iff] -- undcast in lhs + rw [h_x] + rw [BitVec.dcast_bitvec_extractLsb_eq (h_lo_eq:=rfl)] + rfl + have h_lo : + lo_btf = dcast (h_middle_sub) (BitVec.extractLsb (hi := 2^(k-1) - 1) (lo :=0) x) := by + have lo_btf_is_dcast_extract := (dcast_symm (hb:=(BitVec.extractLsb_concat_lo (hi:=hi_btf) + (lo:=lo_btf) (h_lo:=h_two_pow_k_minus_1_gt_0)).symm)).symm + rw [lo_btf_is_dcast_extract] + rw [dcast_eq_dcast_iff] -- undcast in lhs + rw [h_x] + rw [BitVec.dcast_bitvec_extractLsb_eq (h_lo_eq:=rfl)] + rfl + exact ⟨h_hi, h_lo⟩ + -- Backward direction: bitwise operations → x = join h_pos hi_btf lo_btf + · intro h_bits + -- ⊢ x = join h_pos hi_btf lo_btf + have two_pow_k_minus_1_gt_0: 2 ^ (k - 1) > 0 := by + simp only [Nat.two_pow_pos] + have sum1:= h_sum_two_same_pow2 (k:=k) (h_pos:=by omega).symm + have res := BitVec.eq_append_iff_extract (lo:=lo_btf) (hi:=hi_btf) + (h_hi_gt_0:=two_pow_k_minus_1_gt_0) (h_lo_gt_0:=two_pow_k_minus_1_gt_0) + (x:=dcast (by exact sum1) x).mpr + have ⟨h_hi, h_lo⟩ := h_bits + have sum2: 2 ^ k - 1 = 2 ^ (k - 1) + 2 ^ (k - 1) - 1 := by omega + rw! [sum2] at h_hi + have h_x_eq := res ⟨h_hi, h_lo⟩ + rw [dcast_eq_dcast_iff] at h_x_eq + unfold join + rw [BitVec.eq_mp_eq_dcast (h_width_eq:=by exact sum1.symm) (h_bitvec_eq:=by rw [sum1.symm])] + rw [h_x_eq] + +theorem join_eq_join_iff {k: ℕ} (h_pos: k > 0) (hi₀ lo₀ hi₁ lo₁: ConcreteBinaryTower (k-1)): + join h_pos hi₀ lo₀ = join h_pos hi₁ lo₁ ↔ (hi₀ = hi₁ ∧ lo₀ = lo₁) := by + constructor + · intro h_join + let x₀ := join h_pos hi₀ lo₀ + let x₁ := join h_pos hi₁ lo₁ + have h_x₀: x₀ = join h_pos hi₀ lo₀ := by rfl + have h_x₁: x₁ = join h_pos hi₁ lo₁ := by rfl + have h₀ := join_eq_iff_dcast_extractLsb h_pos x₀ hi₀ lo₀ + have h₁ := join_eq_iff_dcast_extractLsb h_pos x₁ hi₁ lo₁ + have h_x₀_eq_x₁: x₀ = x₁ := by rw [h_x₀, h_x₁, h_join] + have ⟨h_hi₀, h_lo₀⟩ := h₀.mp h_x₀ + have ⟨h_hi₁, h_lo₁⟩ := h₁.mp h_x₁ + rw [h_hi₁, h_hi₀, h_lo₀, h_lo₁] + rw [h_x₀_eq_x₁] + exact ⟨rfl, rfl⟩ + · intro h_eq + rw [h_eq.1, h_eq.2] + +theorem join_eq_bitvec_iff_fromNat {k : ℕ} (h_pos : k > 0) (x : ConcreteBinaryTower k) + (hi_btf lo_btf : ConcreteBinaryTower (k - 1)) : + x = join h_pos hi_btf lo_btf ↔ + (hi_btf = fromNat (k:=k-1) (x.toNat >>> 2^(k-1)) ∧ + lo_btf = fromNat (k:=k-1) (x.toNat &&& (2^(2^(k-1)) - 1))) := by + -- Idea: derive from theorem join_eq_iff_dcast_extractLsb + constructor + · -- Forward direction + intro h_join + have h := join_eq_iff_dcast_extractLsb h_pos x hi_btf lo_btf + have ⟨h_hi, h_lo⟩ := h.mp h_join + have hi_eq : hi_btf = fromNat (k:=k-1) (x.toNat >>> 2^(k-1)) := by + rw [h_hi] + have := BitVec.extractLsb_eq_shift_ofNat (n:=2^k) (r:=2^k - 1) (l:=2^(k-1)) (x:=x) + rw [this] + unfold fromNat + rw [BitVec.dcast_bitvec_eq] + have lo_eq : lo_btf = fromNat (k:=k-1) (x.toNat &&& (2^(2^(k-1)) - 1)) := by + rw [h_lo] + have := BitVec.extractLsb_eq_and_pow_2_minus_1_ofNat (num_bits:=2^(k-1)) (n:=2^k) + (Nat.two_pow_pos (k - 1)) (x:=x) + rw [this] + unfold fromNat + rw [BitVec.dcast_bitvec_eq] + exact ⟨hi_eq, lo_eq⟩ + · -- Backward direction + intro h_bits + have ⟨h_hi, h_lo⟩ := h_bits + have h := join_eq_iff_dcast_extractLsb h_pos x hi_btf lo_btf + have hi_eq : hi_btf = dcast (h_sub_middle h_pos) + (BitVec.extractLsb (hi := 2^k - 1) (lo := 2^(k-1)) x) := by + rw [h_hi] + unfold fromNat + have := BitVec.extractLsb_eq_shift_ofNat (n:=2^k) (r:=2^k - 1) (l:=2^(k-1)) (x:=x) + rw [this] + rw [BitVec.dcast_bitvec_eq] + have lo_eq : lo_btf = dcast (h_middle_sub) + (BitVec.extractLsb (hi := 2^(k-1) - 1) (lo := 0) x) := by + rw [h_lo] + unfold fromNat + have := BitVec.extractLsb_eq_and_pow_2_minus_1_ofNat (num_bits:=2^(k-1)) (n:=2^k) + (Nat.two_pow_pos (k - 1)) (x:=x) + rw [this] + rw [BitVec.dcast_bitvec_eq] + exact h.mpr ⟨hi_eq, lo_eq⟩ + +theorem join_of_split {k : ℕ} (h_pos : k > 0) (x : ConcreteBinaryTower k) + (hi_btf lo_btf : ConcreteBinaryTower (k - 1)) + (h_split_eq : split h_pos x = (hi_btf, lo_btf)): + x = join h_pos hi_btf lo_btf := by + have h_split := (split_bitvec_eq_iff_fromNat (k:=k) (h_pos:=h_pos) x hi_btf lo_btf).mp h_split_eq + obtain ⟨h_hi, h_lo⟩ := h_split + exact (join_eq_bitvec_iff_fromNat h_pos x hi_btf lo_btf).mpr ⟨h_hi, h_lo⟩ + +theorem split_of_join {k : ℕ} (h_pos : k > 0) (x : ConcreteBinaryTower k) + (hi_btf lo_btf : ConcreteBinaryTower (k - 1)) + (h_join: x = join h_pos hi_btf lo_btf): + (hi_btf, lo_btf) = split h_pos x := by + have ⟨h_hi, h_lo⟩ := (join_eq_bitvec_iff_fromNat h_pos x hi_btf lo_btf).mp h_join + exact ((split_bitvec_eq_iff_fromNat h_pos x hi_btf lo_btf).mpr ⟨h_hi, h_lo⟩).symm + +theorem bitvec_eq_iff_split_eq {k: ℕ} (h_pos: k > 0) (x₀ x₁: ConcreteBinaryTower k): + x₀ = x₁ ↔ (split h_pos x₀ = split h_pos x₁) := by + constructor + · intro h_eq + rw [h_eq] + · intro h_split + let p₀ := split h_pos x₀ + let hi₀ := p₀.fst + let lo₀ := p₀.snd + have h_split_x₀: split h_pos x₀ = (hi₀, lo₀) := by rfl + have h_split_x₁: split h_pos x₁ = (hi₀, lo₀) := by + rw [h_split.symm] + have h_x₀ := (split_bitvec_eq_iff_fromNat h_pos x₀ hi₀ lo₀).mp h_split_x₀ + have h_x₁ := (split_bitvec_eq_iff_fromNat h_pos x₁ hi₀ lo₀).mp h_split_x₁ + -- now prove that x₀ is join of hi₀ and lo₀, similarly for x₁, so they are equal + have h_x₀_eq_join := join_of_split h_pos x₀ hi₀ lo₀ h_split_x₀ + have h_x₁_eq_join := join_of_split h_pos x₁ hi₀ lo₀ h_split_x₁ + rw [h_x₀_eq_join, h_x₁_eq_join] + +theorem split_sum_eq_sum_split {k: ℕ} (h_pos: k > 0) (x₀ x₁: ConcreteBinaryTower k) + (hi₀ lo₀ hi₁ lo₁: ConcreteBinaryTower (k-1)) + (h_split_x₀: split h_pos x₀ = (hi₀, lo₀)) + (h_split_x₁: split h_pos x₁ = (hi₁, lo₁)): + split h_pos (x₀ + x₁) = (hi₀ + hi₁, lo₀ + lo₁) := by + have h_x₀ := join_of_split h_pos x₀ hi₀ lo₀ h_split_x₀ + have h_x₁ := join_of_split h_pos x₁ hi₁ lo₁ h_split_x₁ + -- Approach: convert equation to Nat realm for simple proof + have h₀ := (split_bitvec_eq_iff_fromNat (k:=k) (h_pos:=h_pos) x₀ hi₀ lo₀).mp h_split_x₀ + have h₁ := (split_bitvec_eq_iff_fromNat (k:=k) (h_pos:=h_pos) x₁ hi₁ lo₁).mp h_split_x₁ + have h_sum_hi : (hi₀ + hi₁) = fromNat (BitVec.toNat (x₀ + x₁) >>> 2 ^ (k - 1)) := by + rw [h₀.1, h₁.1] + rw [←sum_fromNat_eq_from_xor_Nat] + have h_nat_eq: BitVec.toNat x₀ >>> 2 ^ (k - 1) ^^^ BitVec.toNat x₁ >>> 2 ^ (k - 1) + = BitVec.toNat (x₀ + x₁) >>> 2 ^ (k - 1) := by + -- unfold Concrete BTF addition into BitVec.xor + simp only [instHAddConcreteBinaryTower, add, BitVec.xor_eq] + rw [Nat.shiftRight_xor_distrib.symm] + rw [BitVec.toNat_xor] -- distribution of BitVec.xor over BitVec.toNat + rw [h_nat_eq] + have h_sum_lo : (lo₀ + lo₁) = fromNat (BitVec.toNat (x₀ + x₁) &&& 2 ^ 2 ^ (k - 1) - 1) := by + rw [h₀.2, h₁.2] + rw [←sum_fromNat_eq_from_xor_Nat] + have h_nat_eq: BitVec.toNat x₀ &&& 2 ^ 2 ^ (k - 1) - 1 ^^^ BitVec.toNat x₁ + &&& 2 ^ 2 ^ (k - 1) - 1 = BitVec.toNat (x₀ + x₁) &&& 2 ^ 2 ^ (k - 1) - 1 := by + simp only [instHAddConcreteBinaryTower, add, BitVec.xor_eq] + rw [BitVec.toNat_xor] + rw [Nat.and_xor_distrib_right.symm] + rw [h_nat_eq] + have h_sum_hi_lo : (hi₀ + hi₁, lo₀ + lo₁) = split h_pos (x₀ + x₁) := by + rw [(split_bitvec_eq_iff_fromNat (k:=k) (h_pos:=h_pos) (x₀ + x₁) + (hi₀ + hi₁) (lo₀ + lo₁)).mpr ⟨h_sum_hi, h_sum_lo⟩] + exact h_sum_hi_lo.symm + +theorem join_add_join {k: ℕ} (h_pos: k > 0) (hi₀ lo₀ hi₁ lo₁: ConcreteBinaryTower (k-1)): + join h_pos hi₀ lo₀ + join h_pos hi₁ lo₁ = join h_pos (hi₀ + hi₁) (lo₀ + lo₁) := by + set x₀ := join h_pos hi₀ lo₀ + set x₁ := join h_pos hi₁ lo₁ + set x₂ := join h_pos (hi₀ + hi₁) (lo₀ + lo₁) + have h_x₀: x₀ = join h_pos hi₀ lo₀ := by rfl + have h_x₁: x₁ = join h_pos hi₁ lo₁ := by rfl + have h_x₂: x₂ = join h_pos (hi₀ + hi₁) (lo₀ + lo₁) := by rfl + -- proof: x₀ + x₁ = x₂, utilize split_sum_eq_sum_split and bitvec_eq_iff_split_eq + have h_split_x₀ := split_of_join h_pos x₀ hi₀ lo₀ h_x₀ + have h_split_x₁ := split_of_join h_pos x₁ hi₁ lo₁ h_x₁ + have h_split_x₂ := split_of_join h_pos x₂ (hi₀ + hi₁) (lo₀ + lo₁) h_x₂ + have h_split_x₂_via_sum := split_sum_eq_sum_split h_pos x₀ x₁ hi₀ lo₀ hi₁ + lo₁ h_split_x₀.symm h_split_x₁.symm + rw [h_split_x₂_via_sum.symm] at h_split_x₂ + have h_eq := (bitvec_eq_iff_split_eq h_pos (x₀ + x₁) x₂).mpr h_split_x₂ + exact h_eq + +theorem split_zero {k: ℕ} (h_pos: k > 0): split h_pos zero = (zero, zero) := by + rw [split] + simp only [zero, BitVec.zero_eq, BitVec.extractLsb_ofNat, Nat.zero_mod, Nat.zero_shiftRight, + Nat.sub_zero, Nat.shiftRight_zero, BitVec.dcast_bitvec_eq_zero] + +lemma one_bitvec_toNat {width: ℕ} (h_width: width > 0): (1#width).toNat = 1 := by + simp only [BitVec.toNat_ofNat, Nat.one_mod_two_pow_eq_one, h_width] + +lemma one_bitvec_shiftRight {d: ℕ} (h_d: d > 0): 1 >>> d = 0 := by + apply Nat.shiftRight_eq_zero + rw [Nat.one_lt_two_pow_iff] + exact Nat.ne_zero_of_lt h_d + +lemma split_one {k : ℕ} (h_k : k > 0) : split h_k (one (k:=k)) = (zero (k:=k-1), one (k:=k-1)) := by + rw [split] + let lo_bits := BitVec.extractLsb (hi := 2 ^ (k - 1) - 1) (lo := 0) (one (k:=k)) + let hi_bits := BitVec.extractLsb (hi := 2 ^ k - 1) (lo := 2 ^ (k - 1)) (one (k:=k)) + apply Prod.ext + · simp only + simp only [BitVec.extractLsb, BitVec.extractLsb'] + rw [one] + have one_toNat_eq := one_bitvec_toNat (width:=2^k) + (h_width:=zero_lt_pow_n (m:=2) (n:=k) (h_m:=Nat.zero_lt_two)) + rw [one_toNat_eq] + have one_shiftRight_eq: 1 >>> 2 ^ (k - 1) = 0 := + one_bitvec_shiftRight (d:=2^(k-1)) (h_d:=by exact Nat.two_pow_pos (k - 1)) + rw [one_shiftRight_eq] + rw [zero, BitVec.zero_eq] + have h_sub_middle := sub_middle_of_pow2_with_one_canceled (k:=k) (h_k:=h_k) + rw [BitVec.dcast_bitvec_eq_zero] + · simp only + simp only [BitVec.extractLsb, BitVec.extractLsb'] + simp only [Nat.sub_zero, one, BitVec.toNat_ofNat, Nat.ofNat_pos, pow_pos, Nat.one_mod_two_pow, + Nat.shiftRight_zero] -- converts BitVec.toNat one >>> 0 into 1#(2 ^ (k - 1)) + rw [BitVec.dcast_bitvec_eq] + +lemma join_zero_zero {k : ℕ} (h_k : k > 0) : + join h_k (zero (k:=k-1)) (zero (k:=k-1)) = zero (k:=k) := by + have h_1 := split_zero h_k + exact (join_of_split h_k (zero) (zero) (zero) (h_1)).symm + +theorem join_zero_one {k: ℕ} (h_k: k > 0): join h_k zero one = one := by + have h_1 := split_one h_k + have res := join_of_split (h_pos:=h_k) (x:=one) (hi_btf:=zero) (lo_btf:=one) (h_1) + exact res.symm + +def equivProd {k : ℕ} (h_k_pos : k > 0) : + ConcreteBinaryTower k ≃ ConcreteBinaryTower (k - 1) × ConcreteBinaryTower (k - 1) where + toFun := split h_k_pos + invFun := fun (hi, lo) => join h_k_pos hi lo + left_inv := fun x => Eq.symm (join_of_split h_k_pos x _ _ rfl) + right_inv := fun ⟨hi, lo⟩ => Eq.symm (split_of_join h_k_pos _ hi lo rfl) + +lemma mul_trans_inequality {k : ℕ} (x: ℕ) (h_k: k ≤ 2) (h_x: x ≤ 2^(2^k) - 1): x < 16 := by + have x_le_1: x ≤ 2^(2^k) - 1 := by omega + have x_le_2: x ≤ 2^(2^2) - 1 := by + apply le_trans x_le_1 -- 2^(2^k) - 1 ≤ 2^(2^2) - 1 + rw [Nat.sub_le_sub_iff_right] + rw [Nat.pow_le_pow_iff_right, Nat.pow_le_pow_iff_right] + exact h_k + norm_num + norm_num + norm_num + have x_le_15: x ≤ 15 := by omega + have x_lt_16: x < 16 := by omega + exact x_lt_16 + +-- Helper: Convert coefficients back to BitVec +def coeffsToBitVec {n : ℕ} (coeffs : List (ZMod 2)) : BitVec n := + let val := List.foldr (fun c acc => acc * 2 + c.val) 0 (coeffs.take n) + BitVec.ofNat n val + +-- Karatsuba-like multiplication for binary tower fields +def concrete_mul {k : ℕ} (a b : ConcreteBinaryTower k) : ConcreteBinaryTower k := + if h_k_zero: k = 0 then + if a = zero then zero + else if b = zero then zero + else if a = one then b + else if b = one then a + else zero -- This case never happens in GF(2) + else + have h_k_gt_0 : k > 0 := by omega + let (a₁, a₀) := split h_k_gt_0 a -- (hi, lo) + let (b₁, b₀) := split h_k_gt_0 b + let a_sum := a₁ + a₀ + let b_sum := b₁ + b₀ + let sum_mul := concrete_mul a_sum b_sum + let prevX := Z (k - 1) + -- Karatsuba-like step + let mult_hi := concrete_mul a₁ b₁ -- Recursive call at k-1 + let mult_lo := concrete_mul a₀ b₀ -- Recursive call at k-1 + let lo_res := mult_lo + mult_hi -- a₀b₀ + a₁b₁ + let hi_res := sum_mul + lo_res + (concrete_mul mult_hi prevX) + have h_eq : k - 1 + 1 = k := by omega + -- Use the proof to cast the type + have res := join (k:=k) (by omega) hi_res lo_res + res +termination_by (k, a.toNat, b.toNat) + +-- Multiplication instance +instance (k : ℕ) : HMul (ConcreteBinaryTower k) (ConcreteBinaryTower k) (ConcreteBinaryTower k) + where hMul := concrete_mul + +instance (k : ℕ) : LT (ConcreteBinaryTower k) where + lt := fun x y => by + unfold ConcreteBinaryTower at x y + exact x < y + +instance (k : ℕ) : LE (ConcreteBinaryTower k) where + le := fun x y => by + unfold ConcreteBinaryTower at x y + exact x ≤ y + +instance (k: ℕ) : Preorder (ConcreteBinaryTower k) where + le_refl := fun x => BitVec.le_refl x + le_trans := fun x y z hxy hyz => BitVec.le_trans hxy hyz + lt := fun x y => x < y + lt_iff_le_not_ge := fun x y => by + unfold ConcreteBinaryTower at x y + have bitvec_statement := (BitVec.not_lt_iff_le : ¬x < y ↔ y ≤ x) + -- We need to prove: x < y ↔ x ≤ y ∧ ¬y ≤ x + constructor + · -- Forward direction: x < y → x ≤ y ∧ ¬y ≤ x + intro h_lt + constructor + · -- x < y → x ≤ y + exact BitVec.le_of_lt h_lt + · -- x < y → ¬y ≤ x + intro h_le_yx + have h_not_le := mt bitvec_statement.mpr + push_neg at h_not_le + have neg_y_le_x := h_not_le h_lt + contradiction + · -- Reverse direction: x ≤ y ∧ ¬y ≤ x → x < y + intro h + cases h with | intro h_le_xy h_not_le_yx => + have x_lt_y:= mt bitvec_statement.mp h_not_le_yx + push_neg at x_lt_y + exact x_lt_y + +theorem toNatInRange {k: ℕ} (b: ConcreteBinaryTower k) : BitVec.toNat b ≤ 2 ^ (2 ^ k) - 1 := by + unfold ConcreteBinaryTower at b + have le_symm: 2^k ≤ 2^k := by omega + have toNat_le_2pow:= BitVec.toNat_lt_twoPow_of_le (m:=2^k) (n:=(2^k)) + have b_le := toNat_le_2pow le_symm (x:=b) + omega + +theorem eq_zero_or_eq_one {a : ConcreteBinaryTower 0} : a = zero ∨ a = one := by + unfold ConcreteBinaryTower at a -- Now a is a BitVec (2^0) = BitVec 1 + have h := BitVec.eq_zero_or_eq_one a + cases h with + | inl h_zero => + left + unfold zero + exact h_zero + | inr h_one => + right + unfold one + exact h_one + +theorem concrete_eq_zero_or_eq_one {k: ℕ} {a : ConcreteBinaryTower k} (h_k_zero: k = 0) + : a = zero ∨ a = one := by + if h_k_zero: k = 0 then + have h_2_pow_k_eq_1: 2 ^ k = 1 := by rw [h_k_zero]; norm_num + let a0 : ConcreteBinaryTower 0 := Eq.mp (congrArg ConcreteBinaryTower h_k_zero) a + have a0_is_eq_mp_a: a0 = Eq.mp (congrArg ConcreteBinaryTower h_k_zero) a := by rfl + -- Approach: convert to BitVec.cast and derive equality of the cast for 0 and 1 + rcases eq_zero_or_eq_one (a := a0) with (ha0 | ha1) + · -- a0 = zero + left + -- Transport equality back to ConcreteBinaryTower k + have : a = Eq.mpr (congrArg ConcreteBinaryTower h_k_zero) a0 := by + simp only [a0_is_eq_mp_a, eq_mp_eq_cast, eq_mpr_eq_cast, cast_cast, cast_eq] + rw [this, ha0] + -- zero (k:=k) = Eq.mpr ... (zero (k:=0)) + have : zero = Eq.mpr (congrArg ConcreteBinaryTower h_k_zero) (zero (k:=0)) := by + simp only [zero, eq_mpr_eq_cast, BitVec.zero] + rw [←dcast_eq_root_cast] + simp + rw [BitVec.dcast_zero] -- ⊢ 1 = 2^k + exact h_2_pow_k_eq_1.symm + rw [this] + · -- a0 = one + right + have : a = Eq.mpr (congrArg ConcreteBinaryTower h_k_zero) a0 := by + simp only [a0_is_eq_mp_a, eq_mp_eq_cast, eq_mpr_eq_cast, cast_cast, cast_eq] + rw [this, ha1] + have : one = Eq.mpr (congrArg ConcreteBinaryTower h_k_zero) (one (k:=0)) := by + simp only [one, eq_mpr_eq_cast] + rw [←dcast_eq_root_cast] + simp + rw [BitVec.dcast_one] -- ⊢ 1 = 2^k + exact h_2_pow_k_eq_1.symm + rw [this] + else + contradiction + +lemma add_eq_one_iff (a b : ConcreteBinaryTower 0) : + a + b = 1 ↔ (a = 0 ∧ b = 1) ∨ (a = 1 ∧ b = 0) := by + rcases eq_zero_or_eq_one (a := a) with (ha | ha) + · simp [ha, zero_is_0] -- a = zero + · simp [ha, one_is_1] + +def concrete_pow_nat {k: ℕ} (x : ConcreteBinaryTower k) (n : ℕ) : ConcreteBinaryTower k := + if n = 0 then one + else if n % 2 = 0 then concrete_pow_nat (concrete_mul x x) (n / 2) + else concrete_mul x (concrete_pow_nat (concrete_mul x x) (n / 2)) + +-- Multiplicative inverse +def concrete_inv {k : ℕ} (a : ConcreteBinaryTower k) : ConcreteBinaryTower k := + if h_k_zero: k = 0 then + if a = 0 then 0 else 1 + else + if h_a_zero: a = 0 then 0 + else if h_a_one: a = 1 then 1 + else + let h_k_gt_0 : k > 0 := Nat.zero_lt_of_ne_zero h_k_zero + let (a_hi, a_lo) := split (k:=k) (h:=h_k_gt_0) a + let prevZ := Z (k-1) + let a_lo_next := a_lo + concrete_mul a_hi prevZ + let delta := concrete_mul a_lo a_lo_next + concrete_mul a_hi a_hi + let delta_inverse := concrete_inv delta + let out_hi := concrete_mul delta_inverse a_hi + let out_lo := concrete_mul delta_inverse a_lo_next + let res := join (k:=k) (by omega) out_hi out_lo + res + +lemma concrete_inv_zero {k: ℕ} : concrete_inv (k:=k) 0 = 0 := by + unfold concrete_inv + by_cases h_k_zero : k = 0 + · rw [dif_pos h_k_zero]; norm_num + · rw [dif_neg h_k_zero]; norm_num + +lemma concrete_exists_pair_ne {k:ℕ}: ∃ x y : ConcreteBinaryTower k, x ≠ y := + ⟨zero (k:=k), one (k:=k), (concrete_one_ne_zero (k:=k)).symm⟩ + +lemma concrete_zero_mul0 (b : ConcreteBinaryTower 0) : + concrete_mul (zero (k:=0)) b = zero (k:=0) := by + unfold concrete_mul + simp only [↓reduceDIte, zero, Nat.pow_zero, BitVec.zero_eq, ↓reduceIte] + +lemma concrete_mul_zero0 (a : ConcreteBinaryTower 0) : + concrete_mul a (zero (k:=0)) = zero (k:=0) := by + unfold concrete_mul + by_cases h : a = zero + · simp only [↓reduceDIte, h, ↓reduceIte] + · simp only [↓reduceDIte, zero, Nat.pow_zero, BitVec.zero_eq, ↓reduceIte, ite_self] + +lemma concrete_one_mul0 (a : ConcreteBinaryTower 0) : + concrete_mul (one (k:=0)) a = a := by + unfold concrete_mul + by_cases h : a = zero + · simp [h, zero] + · norm_num; simp only [concrete_one_ne_zero, ↓reduceIte, h] + +lemma concrete_mul_one0 (a : ConcreteBinaryTower 0) : + concrete_mul a (one (k:=0)) = a := by + unfold concrete_mul + by_cases h : a = zero + · simp [h] + · norm_num; simp [h, concrete_one_ne_zero]; intro h_eq; exact h_eq.symm + +lemma concrete_mul_assoc0 (a b c : ConcreteBinaryTower 0) : + concrete_mul (concrete_mul a b) c = concrete_mul a (concrete_mul b c) := by + rcases eq_zero_or_eq_one (a := a) with (ha | ha) + · simp [ha, concrete_mul] -- a = zero case + · rcases eq_zero_or_eq_one (a := b) with (hb | hb) + · simp [ha, hb, concrete_mul] -- a = one, b = zero + · rcases eq_zero_or_eq_one (a := c) with (hc | hc) + · simp [ha, hb, hc, concrete_mul] -- a = one, b = one, c = zero + · simp [ha, hb, hc, concrete_mul, concrete_one_ne_zero] -- a = one, b = one, c = one + +lemma concrete_mul_comm0 (a b : ConcreteBinaryTower 0) : + concrete_mul a b = concrete_mul b a := by + rcases eq_zero_or_eq_one (a := a) with (ha | ha) + · simp [ha, concrete_mul] -- a = zero + · rcases eq_zero_or_eq_one (a := b) with (hb | hb) + · simp [ha, hb, concrete_mul] -- a = one, b = zero + · simp [ha, hb, concrete_mul] -- a = one, b = one + +-- Helper lemma: For GF(2), `if x = 0 then 0 else x` is just `x`. +lemma if_zero_then_zero_else_self (x : ConcreteBinaryTower 0) : + (if x = zero then zero else x) = x := by + rcases eq_zero_or_eq_one (a := x) with (hx_zero | hx_one) + · simp only [hx_zero, ↓reduceIte] + · simp only [hx_one, concrete_one_ne_zero, ↓reduceIte] + +lemma concrete_mul_left_distrib0 (a b c : ConcreteBinaryTower 0) : + concrete_mul a (b + c) = concrete_mul a b + concrete_mul a c := by + rcases eq_zero_or_eq_one (a := a) with (ha | ha) + · simp [ha, concrete_mul, zero_is_0] -- a = zero + · simp [ha, concrete_mul, zero_is_0, one_is_1]; + rcases eq_zero_or_eq_one (a := b + c) with (hb_add_c | hb_add_c) + · simp [hb_add_c, zero_is_0]; + rw [zero_is_0] at hb_add_c + have b_eq_c: b = c := (add_eq_zero_iff_eq b c).mp hb_add_c + simp only [b_eq_c, add_self_cancel] + · simp [hb_add_c, one_is_1]; + have c_cases := (add_eq_one_iff b c).mp hb_add_c + rcases eq_zero_or_eq_one (a := b) with (hb | hb) + · simp [hb, zero_is_0]; + rw [one_is_1] at hb_add_c + rw [zero_is_0] at hb + simp [hb] at c_cases + have c_ne_0: c ≠ 0 := by + simp only [c_cases, ne_eq, one_ne_zero, not_false_eq_true] + rw [if_neg c_ne_0] + exact c_cases.symm + · rw [one_is_1] at hb; simp [hb]; + simp [hb] at c_cases + exact c_cases + +lemma concrete_mul_right_distrib0 (a b c : ConcreteBinaryTower 0) : + concrete_mul (a + b) c = concrete_mul a c + concrete_mul b c := by + rw [concrete_mul_comm0 (a:=(a+b)) (b:=c)] + rw [concrete_mul_comm0 (a:=a) (b:=c)] + rw [concrete_mul_comm0 (a:=b) (b:=c)] + exact concrete_mul_left_distrib0 (a:=c) (b:=a) (c:=b) + +-- Natural number casting +def natCast {k: ℕ} (n : ℕ) : ConcreteBinaryTower k := if n % 2 = 0 then zero else one +def natCast_zero {k: ℕ} : natCast (k:=k) 0 = zero := by rfl + +def natCast_succ {k: ℕ} (n : ℕ) : natCast (k:=k) (n + 1) = natCast (k:=k) n + 1 := by + by_cases h : n % 2 = 0 + · -- If n % 2 = 0, then (n+1) % 2 = 1 + have h_succ : (n + 1) % 2 = 1 := by omega + simp only [natCast, h, h_succ]; norm_num; rw [one_is_1, zero_is_0]; norm_num + · -- If n % 2 = 1, then (n+1) % 2 = 0 + have h_succ : (n + 1) % 2 = 0 := by omega + simp only [natCast, h, h_succ]; norm_num; rw [one_is_1, zero_is_0, add_self_cancel] + +instance {k: ℕ} : NatCast (ConcreteBinaryTower k) where + natCast n:= natCast n + +@[simp] +lemma concrete_natCast_zero_eq_zero {k:ℕ} : natCast 0 = (0: ConcreteBinaryTower k) := by + simp only [natCast, Nat.zero_mod, ↓reduceIte, zero_is_0] + +@[simp] +lemma concrete_natCast_one_eq_one {k:ℕ} : natCast 1 = (1: ConcreteBinaryTower k) := by + simp only [natCast, Nat.mod_succ, one_ne_zero, ↓reduceIte, one_is_1] + +-- help simp recognize the NatCast coercion +@[simp] lemma natCast_eq {k: ℕ} (n : ℕ) : (↑n : ConcreteBinaryTower k) = natCast n := rfl + +-- Integer casting +def intCast {k: ℕ} (n : ℤ) : ConcreteBinaryTower k := if n % 2 = 0 then zero else one + +instance {k:ℕ} : IntCast (ConcreteBinaryTower k) where + intCast n:= intCast n + +def intCast_ofNat {k:ℕ} (n : ℕ) : intCast (k:=k) (n : ℤ) = natCast n := by + have h_mod_eq : (n : ℤ) % 2 = 0 ↔ n % 2 = 0 := by omega + by_cases h : n % 2 = 0 + · -- Case: n % 2 = 0 + have h_int : (n : ℤ) % 2 = 0 := h_mod_eq.mpr h + simp only [intCast, natCast, h, h_int] + · -- Case: n % 2 ≠ 0 + have h' : n % 2 = 1 := by omega -- For n : ℕ, if not 0, then 1 + have h_int : (n : ℤ) % 2 = 1 := by omega -- Coercion preserves modulo + simp only [intCast, natCast, h_int, h'] + rfl + +@[simp] lemma my_neg_mod_two (m : ℤ) : (-m) % 2 = if m % 2 = 0 then 0 else 1 := by omega +@[simp] lemma mod_two_eq_zero (m: ℤ): m % 2 = (-m) % 2 := by omega + +def intCast_negSucc {k:ℕ} (n : ℕ) : intCast (k:=k) (Int.negSucc n) + = - (↑(n + 1) : ConcreteBinaryTower k) := by + by_cases h_mod : (n + 1) % 2 = 0 + · -- ⊢ intCast (Int.negSucc n) = -↑(n + 1) + have h_neg : (-(n + 1 : ℤ)) % 2 = 0 := by omega + unfold intCast + have int_neg_succ: Int.negSucc n = -(n+1 : ℤ) := by rfl + rw [int_neg_succ] + simp only [h_neg] + have h_nat : (↑(n + 1) : ConcreteBinaryTower k) = (zero : ConcreteBinaryTower k) := by + simp only [natCast_eq, natCast, h_mod] + rfl + rw [h_nat] + rfl + · -- ⊢ intCast (Int.negSucc n) = -↑(n + 1) + have h_neg : (-(n + 1 : ℤ)) % 2 = 1 := by omega + unfold intCast + have int_neg_succ: Int.negSucc n = -(n+1 : ℤ) := by rfl + rw [int_neg_succ] + simp only [h_neg] + rw [if_neg (by simp)] + have h_nat : (↑(n + 1) : ConcreteBinaryTower k) = (one : ConcreteBinaryTower k) := by + simp only [natCast_eq, natCast, h_mod] + rfl + rw [h_nat] + rfl + +end BaseDefinitions + +------------------------------------------------------------------------------------------- +-- Structure to hold properties at a given level k +structure ConcreteBTFStepResult (k : ℕ) where + mul_eq : ∀ (a b : ConcreteBinaryTower k) (h_k : k > 0) + {a₁ a₀ b₁ b₀ : ConcreteBinaryTower (k-1)} + (_h_a : (a₁, a₀) = split h_k a) (_h_b : (b₁, b₀) = split h_k b), + concrete_mul a b = join h_k + (hi:=concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) + (lo:=concrete_mul a₀ b₀ + concrete_mul a₁ b₁) + zero_mul : ∀ a : ConcreteBinaryTower k, concrete_mul zero a = zero + zero_mul' : ∀ a : ConcreteBinaryTower k, concrete_mul 0 a = 0 + mul_zero : ∀ a : ConcreteBinaryTower k, concrete_mul a zero = zero + mul_zero' : ∀ a : ConcreteBinaryTower k, concrete_mul a 0 = 0 + one_mul : ∀ a : ConcreteBinaryTower k, concrete_mul one a = a + mul_one : ∀ a : ConcreteBinaryTower k, concrete_mul a one = a + mul_assoc : ∀ a b c : ConcreteBinaryTower k, concrete_mul (concrete_mul a b) c + = concrete_mul a (concrete_mul b c) + mul_comm : ∀ a b : ConcreteBinaryTower k, concrete_mul a b = concrete_mul b a + mul_left_distrib : ∀ a b c : ConcreteBinaryTower k, concrete_mul a (b + c) + = concrete_mul a b + concrete_mul a c + mul_right_distrib : ∀ a b c : ConcreteBinaryTower k, concrete_mul (a + b) c + = concrete_mul a c + concrete_mul b c + add_assoc : ∀ a b c : ConcreteBinaryTower k, (a + b) + c = a + (b + c) + add_comm : ∀ a b : ConcreteBinaryTower k, a + b = b + a + add_zero : ∀ a : ConcreteBinaryTower k, a + zero = a + zero_add : ∀ a : ConcreteBinaryTower k, zero + a = a + add_neg : ∀ a : ConcreteBinaryTower k, a + (neg a) = zero + mul_inv_cancel : ∀ a : ConcreteBinaryTower k, a ≠ zero → concrete_mul a (concrete_inv a) = one + +------------------------------------------------------------------------------------------- +section InductiveConcreteBTFPropertiesProofs -- for k > 0 +variable {k : ℕ} + +theorem concrete_mul_eq {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} + (a b: ConcreteBinaryTower k) (h_k: k > 0) {a₁ a₀ b₁ b₀: ConcreteBinaryTower (k-1)} + (h_a: (a₁, a₀) = split h_k a) (h_b: (b₁, b₀) = split h_k b): + concrete_mul a b = join h_k + (hi:=concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) + (lo:=concrete_mul a₀ b₀ + concrete_mul a₁ b₁) := by + have h_a₁ : (split h_k a).1 = a₁ := by rw [h_a.symm] + have h_a₀ : (split h_k a).2 = a₀ := by rw [h_a.symm] + have h_b₁ : (split h_k b).1 = b₁ := by rw [h_b.symm] + have h_b₀ : (split h_k b).2 = b₀ := by rw [h_b.symm] + conv => + lhs + unfold concrete_mul + rw [dif_neg (Nat.ne_of_gt h_k)] + simp only [h_a₁, h_a₀, h_b₁, h_b₀] -- Do this to resolve the two nested matches (of the splits) + -- while still allowing substitution of a₀ a₁ b₀ b₁ (components of the splits) into the goal + rw [join_eq_join_iff] + split_ands + · -- ⊢ concrete_mul (a₁ + a₀) (b₁ + b₀) + (concrete_mul a₀ b₀ + concrete_mul a₁ b₁) + + -- concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)) + -- = concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)) + have h_add_left_inj := (add_left_inj (a:=concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) + (b:=concrete_mul (a₁ + a₀) (b₁ + b₀) + (concrete_mul a₀ b₀ + concrete_mul a₁ b₁) + + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) (c:=concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)))).mp + rw [h_add_left_inj] + rw [add_assoc, add_self_cancel, add_zero, add_assoc, add_self_cancel, add_zero] + -- ⊢ concrete_mul (a₁ + a₀) (b₁ + b₀) + (concrete_mul a₀ b₀ + concrete_mul a₁ b₁) + -- = concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + have recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k) + rw [recArgPrevLevel.mul_left_distrib, recArgPrevLevel.mul_right_distrib, + recArgPrevLevel.mul_right_distrib] + have h_a₁_b₀: concrete_mul a₁ b₀ = concrete_mul b₀ a₁ := by + rw [recArgPrevLevel.mul_comm (a:=a₁) (b:=b₀)] + have h_a₀_b₁: concrete_mul a₀ b₁ = concrete_mul b₁ a₀ := by + rw [recArgPrevLevel.mul_comm (a:=a₀) (b:=b₁)] + rw [h_a₁_b₀, h_a₀_b₁] + -- ⊢ concrete_mul a₁ b₁ + concrete_mul b₁ a₀ + (concrete_mul b₀ a₁ + concrete_mul a₀ b₀) + + -- (concrete_mul a₀ b₀ + concrete_mul a₁ b₁) = concrete_mul b₁ a₀ + concrete_mul b₀ a₁ + conv => + lhs + rw [←add_assoc, ←add_assoc] + rw [add_assoc (b:=concrete_mul a₀ b₀) (c:=concrete_mul a₀ b₀), add_self_cancel, add_zero] + rw [add_comm (b:=concrete_mul a₁ b₁), ←add_assoc, ←add_assoc, add_self_cancel, zero_add] + · rfl + +lemma concrete_zero_mul {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} + (a : ConcreteBinaryTower k) : concrete_mul (zero (k:=k)) a = zero (k:=k) := by + unfold concrete_mul + by_cases h_k_zero : k = 0 + · -- Base case: k = 0 + simp only [h_k_zero, ↓reduceDIte, zero, dif_pos, ite_true] + · -- Inductive case: k > 0 + simp only [dif_neg h_k_zero] + -- Obtain h_k_gt_0 from h_k_zero + have h_k_gt_0_proof : k > 0 := by omega + -- Split zero into (zero, zero) + have h_zero_split : split h_k_gt_0_proof (zero (k:=k)) = (zero, zero) := by rw [split_zero] + let (a₁, a₀) := (zero (k:=k-1), zero (k:=k-1)) + let (b₁, b₀) := split h_k_gt_0_proof a + rw [h_zero_split] + simp only + -- Apply the inductive hypothesis to a₁*b₁, a₀*b₀, a₁*b₀, a₀*b₁ + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k_gt_0_proof) + simp only [zero_is_0, zero_add, recArgPrevLevel.zero_mul'] + simp only [← zero_is_0, join_zero_zero h_k_gt_0_proof] + +lemma concrete_mul_zero {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} + (a : ConcreteBinaryTower k) : concrete_mul a (zero (k:=k)) = zero (k:=k) := by + unfold concrete_mul + by_cases h_k_zero : k = 0 + · -- Base case: k = 0 + simp only [h_k_zero, ↓reduceDIte, zero, BitVec.zero_eq, ↓reduceIte, ite_self] + · -- Inductive case: k > 0 + simp only [dif_neg h_k_zero] + -- Obtain h_k_gt_0 from h_k_zero + have h_k_gt_0_proof : k > 0 := by omega + -- Split zero into (zero, zero) + have h_zero_split : split h_k_gt_0_proof (zero (k:=k)) = (zero, zero) := by rw [split_zero] + -- Split a into (a₁, a₀) + let (a₁, a₀) := split h_k_gt_0_proof a + let (b₁, b₀) := (zero (k:=k-1), zero (k:=k-1)) + -- Rewrite with the zero split + rw [h_zero_split] + simp only + -- Apply the inductive hypothesis + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k_gt_0_proof) + simp only [zero_is_0, zero_add, recArgPrevLevel.mul_zero'] + -- Use join_zero to complete the proof + simp only [← zero_is_0, recArgPrevLevel.zero_mul, join_zero_zero h_k_gt_0_proof] + +lemma concrete_one_mul {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} + (a : ConcreteBinaryTower k) : concrete_mul (one (k:=k)) a = a := by + unfold concrete_mul + by_cases h_k_zero : k = 0 + · -- Base case: k = 0 + simp [h_k_zero, ↓reduceDIte, one_is_1, zero_is_0]; intro h; exact h.symm + · -- Inductive case: k > 0 + have h_k_gt_0 : k > 0 := by omega + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k_gt_0) + simp only [dif_neg h_k_zero] + let p := split h_k_gt_0 a + let a₁ := p.fst + let a₀ := p.snd + have h_split_a : split h_k_gt_0 a = (a₁, a₀) := by rfl + rw [split_one] + simp only [zero_is_0, zero_add, recArgPrevLevel.one_mul, recArgPrevLevel.zero_mul', add_zero] + simp [add_assoc, add_self_cancel] + have join_result : join h_k_gt_0 a₁ a₀ = a := by + have split_join := join_of_split h_k_gt_0 a a₁ a₀ + exact (split_join h_split_a).symm + exact join_result + +lemma concrete_mul_one {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} + (a : ConcreteBinaryTower k) : concrete_mul a (one (k:=k)) = a := by + unfold concrete_mul + by_cases h_k_zero : k = 0 + · -- Base case: k = 0 + simp [h_k_zero, ↓reduceDIte, one_is_1, zero_is_0]; + conv => + lhs + simp only [if_self_rfl] + · -- Inductive case: k > 0 + have h_k_gt_0 : k > 0 := by omega + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k_gt_0) + simp only [dif_neg h_k_zero] + let p := split h_k_gt_0 a + let a₁ := p.fst + let a₀ := p.snd + have h_split_a : split h_k_gt_0 a = (a₁, a₀) := by rfl + rw [split_one] + simp only [zero_is_0, zero_add, recArgPrevLevel.mul_one, recArgPrevLevel.mul_zero', + recArgPrevLevel.zero_mul', add_zero] + simp [add_assoc, add_self_cancel] + have join_result : join h_k_gt_0 a₁ a₀ = a := by + have split_join := join_of_split h_k_gt_0 a a₁ a₀ + exact (split_join h_split_a).symm + exact join_result + +lemma concrete_pow_base_one {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} (n : ℕ) : + concrete_pow_nat (k:=k) (x:=1) n = 1 := by + induction n using Nat.strong_induction_on with + | h n ih => + -- ih: ∀ m, m < n → concrete_pow_nat (k:=k) (x:=1) m = 1 + unfold concrete_pow_nat + by_cases h_n_zero : n = 0 + · -- Base case: n = 0 + rw [if_pos h_n_zero] + exact one_is_1 -- one = 1 + · -- Inductive step: n ≠ 0 + rw [if_neg h_n_zero] + by_cases h_mod : n % 2 = 0 + · -- Even case: n % 2 = 0 + rw [if_pos h_mod] + have h_square : concrete_mul (1 : ConcreteBinaryTower k) 1 = 1 := by + rw [← one_is_1] + rw [concrete_one_mul (recArg:=recArg)] -- Assume concrete_mul 1 1 = 1 + rw [h_square] + have h_div_lt : n / 2 < n := by + apply Nat.div_lt_self + simp [Nat.ne_zero_iff_zero_lt.mp h_n_zero] + exact Nat.le_refl 2 + apply ih (n / 2) h_div_lt -- Use ih for n / 2 < n + · -- Odd case: n % 2 ≠ 0 + rw [if_neg h_mod] + have h_square : concrete_mul (1 : ConcreteBinaryTower k) 1 = 1 := by + rw [← one_is_1] + rw [concrete_one_mul (recArg:=recArg)] -- Assume concrete_mul 1 1 = 1 + rw [h_square] + have h_div_lt : n / 2 < n := by + apply Nat.div_lt_self + simp [Nat.ne_zero_iff_zero_lt.mp h_n_zero] + exact Nat.le_refl 2 + rw [ih (n / 2) h_div_lt] -- Use ih for n / 2 < n + rw [← one_is_1] + rw [concrete_one_mul (recArg:=recArg)] -- Assume concrete_mul 1 1 = 1 + +lemma concrete_mul_comm {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} {h_k: k > 0} + (a b : ConcreteBinaryTower k) : + concrete_mul a b = concrete_mul b a := by + by_cases h_k_zero : k = 0 + · linarith + · -- Inductive case: k > 0 + -- Approach: utilize concrete_mul_eq of level (k - 1) + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k) + let p1 := split h_k a + let p2 := split h_k b + let a₁ := p1.fst + let a₀ := p1.snd + let b₁ := p2.fst + let b₀ := p2.snd + have h_split_a : split h_k a = (a₁, a₀) := by rfl + have h_split_b : split h_k b = (b₁, b₀) := by rfl + have h_a₁_a₀ : a = join h_k a₁ a₀ := by exact (join_of_split h_k a a₁ a₀) h_split_a + have h_b₁_b₀ : b = join h_k b₁ b₀ := by exact (join_of_split h_k b b₁ b₀) h_split_b + have h_a₁ : (split h_k a).1 = a₁ := by rw [h_split_a] + have h_a₀ : (split h_k a).2 = a₀ := by rw [h_split_a] + have h_b₁ : (split h_k b).1 = b₁ := by rw [h_split_b] + have h_b₀ : (split h_k b).2 = b₀ := by rw [h_split_b] + have h_lhs := concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=b) (a₁:=a₁) + (a₀:=a₀) (b₁:=b₁) (b₀:=b₀) (h_a:=h_split_a) (h_b:=h_split_b) + have h_rhs := concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=b) (b:=a) (a₁:=b₁) + (a₀:=b₀) (b₁:=a₁) (b₀:=a₀) (h_a:=h_split_b) (h_b:=h_split_a) + rw [h_lhs, h_rhs] + rw [join_eq_join_iff] + rw [recArgPrevLevel.mul_comm (a:=b₀) (b:=a₀), recArgPrevLevel.mul_comm (a:=a₁) (b:=b₁)] + split_ands + · rw [recArgPrevLevel.add_comm (a:=concrete_mul a₀ b₁) (b:=concrete_mul b₀ a₁)] + · rfl + +lemma concrete_mul_assoc {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} {h_k: k > 0} + (a b c : ConcreteBinaryTower k) : + concrete_mul (concrete_mul a b) c = concrete_mul a (concrete_mul b c) := by + by_cases h_k_zero : k = 0 + · linarith + · -- Inductive case: k > 0 + -- Approach: utilize concrete_mul_eq of level (k - 1) + -- ⊢ concrete_mul (concrete_mul a b) c = concrete_mul a (concrete_mul b c) + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k) + let p1 := split h_k a + let p2 := split h_k b + let p3 := split h_k c + let a₁ := p1.fst + let a₀ := p1.snd + let b₁ := p2.fst + let b₀ := p2.snd + let c₁ := p3.fst + let c₀ := p3.snd + have h_split_a : split h_k a = (a₁, a₀) := by rfl + have h_split_b : split h_k b = (b₁, b₀) := by rfl + have h_split_c : split h_k c = (c₁, c₀) := by rfl + have h_a₁_a₀ : a = join h_k a₁ a₀ := by exact (join_of_split h_k a a₁ a₀) h_split_a + have h_b₁_b₀ : b = join h_k b₁ b₀ := by exact (join_of_split h_k b b₁ b₀) h_split_b + have h_c₁_c₀ : c = join h_k c₁ c₀ := by exact (join_of_split h_k c c₁ c₀) h_split_c + -- ⊢ concrete_mul (concrete_mul a b) c = concrete_mul a (concrete_mul b c) + have a_mul_b_eq := concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=b) (a₁:=a₁) + (a₀:=a₀) (b₁:=b₁) (b₀:=b₀) (h_a:=h_split_a) (h_b:=h_split_b) + have b_mul_c_eq := concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=b) (b:=c) (a₁:=b₁) + (a₀:=b₀) (b₁:=c₁) (b₀:=c₀) (h_a:=h_split_b) (h_b:=h_split_c) + set ab₁ := concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)) + set ab₀ := concrete_mul a₀ b₀ + concrete_mul a₁ b₁ + have h_split_a_mul_b: split h_k (concrete_mul a b) = (ab₁, ab₀) := by + exact (split_of_join h_k (concrete_mul a b) ab₁ ab₀ a_mul_b_eq).symm + set bc₁ := concrete_mul b₀ c₁ + concrete_mul c₀ b₁ + + concrete_mul (concrete_mul b₁ c₁) (Z (k - 1)) + set bc₀ := concrete_mul b₀ c₀ + concrete_mul b₁ c₁ + have h_split_b_mul_c: split h_k (concrete_mul b c) = (bc₁, bc₀) := by + exact (split_of_join h_k (concrete_mul b c) bc₁ bc₀ b_mul_c_eq).symm + + set ab := concrete_mul a b + set bc := concrete_mul b c + -- rw [a_mul_b_eq, b_mul_c_eq] + -- ⊢ concrete_mul ab c = concrete_mul a bc + have a_mul_bc_eq := concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=bc) (a₁:=a₁) + (a₀:=a₀) (b₁:=bc₁) (b₀:=bc₀) (h_a:=h_split_a) (h_b:=h_split_b_mul_c.symm) + have ab_mul_c_eq := concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=ab) (b:=c) (a₁:=ab₁) + (a₀:=ab₀) (b₁:=c₁) (b₀:=c₀) (h_a:=h_split_a_mul_b.symm) (h_b:=h_split_c) + set a_bc₁ := concrete_mul a₀ bc₁ + concrete_mul bc₀ a₁ + + concrete_mul (concrete_mul a₁ bc₁) (Z (k - 1)) + set a_bc₀ := concrete_mul a₀ bc₀ + concrete_mul a₁ bc₁ + have h_split_a_bc: split h_k (concrete_mul a bc) = (a_bc₁, a_bc₀) := by + exact (split_of_join h_k (concrete_mul a bc) a_bc₁ a_bc₀ a_mul_bc_eq).symm + set ab_c₁ := concrete_mul ab₀ c₁ + concrete_mul c₀ ab₁ + + concrete_mul (concrete_mul ab₁ c₁) (Z (k - 1)) + set ab_c₀ := concrete_mul ab₀ c₀ + concrete_mul ab₁ c₁ + have h_split_ab_c: split h_k (concrete_mul ab c) = (ab_c₁, ab_c₀) := by + exact (split_of_join h_k (concrete_mul ab c) ab_c₁ ab_c₀ ab_mul_c_eq).symm + + rw [a_mul_bc_eq, ab_mul_c_eq] -- convert concrete mul to join + rw [join_eq_join_iff] + -- ⊢ ab_c₁ = a_bc₁ ∧ ab_c₀ = a_bc₀ + unfold a_bc₁ ab_c₁ ab_c₀ a_bc₀ ab₀ ab₁ bc₀ bc₁ -- unfold all + simp only [recArgPrevLevel.mul_right_distrib, + recArgPrevLevel.mul_left_distrib] -- distribute all + split_ands + · -- ⊢ ab_c₁ = a_bc₁ + have t0 : concrete_mul (concrete_mul a₀ b₀) c₁ = concrete_mul a₀ (concrete_mul b₀ c₁) := by + rw [recArgPrevLevel.mul_assoc (a:=a₀) (b:=b₀) (c:=c₁)] + have t1: concrete_mul (concrete_mul a₁ b₁) c₁ = concrete_mul (concrete_mul b₁ c₁) a₁ := by + rw [recArgPrevLevel.mul_assoc (a:=a₁) (b:=b₁) (c:=c₁)] + rw [recArgPrevLevel.mul_comm (a:=a₁) (b:=concrete_mul b₁ c₁)] + have t2: concrete_mul c₀ (concrete_mul a₀ b₁) = concrete_mul a₀ (concrete_mul c₀ b₁) := by + rw [← recArgPrevLevel.mul_assoc (a:=c₀) (b:=a₀) (c:=b₁), + ← recArgPrevLevel.mul_assoc (a:=a₀) (b:=c₀) (c:=b₁)] + rw [recArgPrevLevel.mul_comm (a:=c₀) (b:=a₀)] + have t3: concrete_mul c₀ (concrete_mul b₀ a₁) = (concrete_mul (concrete_mul b₀ c₀) a₁) := by + rw [← recArgPrevLevel.mul_assoc (a:=c₀) (b:=b₀) (c:=a₁)] + rw [recArgPrevLevel.mul_comm (a:=c₀) (b:=b₀)] + have t4: concrete_mul (concrete_mul a₁ (concrete_mul c₀ b₁)) (Z (k - 1)) + = concrete_mul c₀ (concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) := by + rw [← recArgPrevLevel.mul_assoc (c:=Z (k-1))] + rw [← recArgPrevLevel.mul_assoc (a:=c₀) (b:=a₁) (c:=b₁)] + rw [recArgPrevLevel.mul_comm (a:=c₀) (b:=a₁), + recArgPrevLevel.mul_assoc (a:=a₁) (b:=c₀) (c:=b₁)] + have t5: concrete_mul a₀ (concrete_mul (concrete_mul b₁ c₁) (Z (k - 1))) + = concrete_mul (concrete_mul (concrete_mul a₀ b₁) c₁) (Z (k - 1)) := by + rw [← recArgPrevLevel.mul_assoc (c:=Z (k-1))] + rw [← recArgPrevLevel.mul_assoc (a:=a₀) (b:=b₁) (c:=c₁)] + have t6: concrete_mul (concrete_mul (concrete_mul b₀ a₁) c₁) (Z (k - 1)) + = concrete_mul (concrete_mul a₁ (concrete_mul b₀ c₁)) (Z (k - 1)) := by + rw [recArgPrevLevel.mul_comm (a:=b₀) (b:=a₁), + recArgPrevLevel.mul_assoc (a:=a₁) (b:=b₀) (c:=c₁)] + have t7: concrete_mul (concrete_mul (concrete_mul (concrete_mul a₁ b₁) + (Z (k - 1))) c₁) (Z (k - 1)) = concrete_mul (concrete_mul a₁ (concrete_mul + (concrete_mul b₁ c₁) (Z (k - 1)))) (Z (k - 1)) := by + rw [← recArgPrevLevel.mul_assoc (c:=Z (k-1))] + rw [recArgPrevLevel.mul_comm (a:=(concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)))) (b:=c₁)] + rw [recArgPrevLevel.mul_comm (a:=a₁) (b:=b₁)] + rw [← recArgPrevLevel.mul_assoc (c:=Z (k-1))] + -- c1 (b1 a1) vs a1 (b1 c1) => convert to (c1 b1) a1 vs (c1 b) a1 + rw [recArgPrevLevel.mul_comm (a:=a₁) (b:=concrete_mul b₁ c₁)] + rw [← recArgPrevLevel.mul_assoc (a:=c₁) (b:=b₁) (c:=a₁)] + rw [recArgPrevLevel.mul_comm (a:=c₁) (b:=b₁)] + rw [t0, t1, t2, t3, t4, t5, t6, t7] + abel_nf + · -- ⊢ ab_c₀ = a_bc₀ + have t_a₀_b₀_c₀ : concrete_mul (concrete_mul a₀ b₀) c₀ + = concrete_mul a₀ (concrete_mul b₀ c₀) := by + rw [recArgPrevLevel.mul_assoc (a:=a₀) (b:=b₀) (c:=c₀)] + have t_a₁_b₁_c₀ : concrete_mul (concrete_mul a₁ b₁) c₀ + = concrete_mul a₁ (concrete_mul c₀ b₁) := by + rw [recArgPrevLevel.mul_assoc (a:=a₁) (b:=b₁) (c:=c₀), + recArgPrevLevel.mul_comm (a:=c₀) (b:=b₁)] + have t_a₀_b₁_c₁ : concrete_mul (concrete_mul a₀ b₁) c₁ + = concrete_mul a₀ (concrete_mul b₁ c₁) := by + rw [recArgPrevLevel.mul_assoc (a:=a₀) (b:=b₁) (c:=c₁)] + have t_b₀_a₁_c₁ : concrete_mul (concrete_mul b₀ a₁) c₁ + = concrete_mul a₁ (concrete_mul b₀ c₁) := by + rw [recArgPrevLevel.mul_comm (a:=b₀) (b:=a₁), + recArgPrevLevel.mul_assoc (a:=a₁) (b:=b₀) (c:=c₁)] + have t_a₁_b₁_c₁_z : concrete_mul (concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) c₁ + = concrete_mul a₁ (concrete_mul (concrete_mul b₁ c₁) (Z (k - 1))) := by + rw [recArgPrevLevel.mul_comm (a:=concrete_mul (concrete_mul a₁ b₁) (Z (k - 1))) (b:=c₁)] + rw [←recArgPrevLevel.mul_assoc (c:=Z (k-1)), ←recArgPrevLevel.mul_assoc (c:=Z (k-1))] + -- rewrite both sides + rw [recArgPrevLevel.mul_comm (a:=b₁) (b:=c₁)] + rw [←recArgPrevLevel.mul_assoc (a:=c₁) (b:=a₁) + (c:=b₁), recArgPrevLevel.mul_comm (a:=c₁) (b:=a₁)] + rw [recArgPrevLevel.mul_assoc (a:=a₁) (b:=c₁) (c:=b₁)] + rw [t_a₀_b₀_c₀, t_a₁_b₁_c₀, t_a₀_b₁_c₁, t_b₀_a₁_c₁, t_a₁_b₁_c₁_z] + abel_nf + +lemma concrete_mul_left_distrib {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} {h_k: k > 0} + (a b c : ConcreteBinaryTower k) : + concrete_mul a (b + c) = concrete_mul a b + concrete_mul a c := by + by_cases h_k_zero : k = 0 + · linarith + · -- Inductive case: k > 0 + -- Approach: utilize concrete_mul_eq of level (k - 1) + -- ⊢ concrete_mul (concrete_mul a b) c = concrete_mul a (concrete_mul b c) + let recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k) + let p1 := split h_k a + let p2 := split h_k b + let p3 := split h_k c + let a₁ := p1.fst + let a₀ := p1.snd + let b₁ := p2.fst + let b₀ := p2.snd + let c₁ := p3.fst + let c₀ := p3.snd + have h_split_a : split h_k a = (a₁, a₀) := by rfl + have h_split_b : split h_k b = (b₁, b₀) := by rfl + have h_split_c : split h_k c = (c₁, c₀) := by rfl + have h_a₁_a₀ : a = join h_k a₁ a₀ := by exact (join_of_split h_k a a₁ a₀) h_split_a + have h_b₁_b₀ : b = join h_k b₁ b₀ := by exact (join_of_split h_k b b₁ b₀) h_split_b + have h_c₁_c₀ : c = join h_k c₁ c₀ := by exact (join_of_split h_k c c₁ c₀) h_split_c + have h_split_b_add_c : split h_k (b + c) = (b₁ + c₁, b₀ + c₀) := by + exact split_sum_eq_sum_split h_k (x₀:=b) (x₁:=c) (hi₀:=b₁) (lo₀:=b₀) + (hi₁:=c₁) (lo₁:=c₀) (h_split_x₀:=h_split_b) (h_split_x₁:=h_split_c) + -- ⊢ concrete_mul a (b + c) = concrete_mul a b + concrete_mul a c + conv => + lhs + -- rewrite a * (b + c) + rw [concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=b+c) (a₁:=a₁) + (a₀:=a₀) (b₁:=b₁ + c₁) (b₀:=b₀ + c₀) (h_a:=h_split_a) (h_b:=h_split_b_add_c.symm)] + conv => + rhs + rw [concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=b) (a₁:=a₁) + (a₀:=a₀) (b₁:=b₁) (b₀:=b₀) (h_a:=h_split_a) (h_b:=h_split_b)] + rw [concrete_mul_eq (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=c) (a₁:=a₁) + (a₀:=a₀) (b₁:=c₁) (b₀:=c₀) (h_a:=h_split_a) (h_b:=h_split_c)] + rw [join_add_join] + rw [join_eq_join_iff] + split_ands + · -- ⊢ concrete_mul a₀ (b₁ + c₁) + concrete_mul (b₀ + c₀) a₁ + -- + concrete_mul (concrete_mul a₁ (b₁ + c₁)) (Z (k - 1)) = + -- concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)) + + -- (concrete_mul a₀ c₁ + concrete_mul c₀ a₁ + concrete_mul (concrete_mul a₁ c₁) (Z (k - 1))) + set a := concrete_mul a₀ b₁ + concrete_mul b₀ a₁ + set b := concrete_mul (concrete_mul a₁ b₁) (Z (k - 1)) + set c := (concrete_mul a₀ c₁ + concrete_mul c₀ a₁ + + concrete_mul (concrete_mul a₁ c₁) (Z (k - 1))) + -- rhs is like a + b + c, but we have to bring it to a + (b + c) then a + (c + b) + conv => + rhs + rw [recArgPrevLevel.add_assoc (a:=a) (b:=b) (c:=c), recArgPrevLevel.add_comm (a:=b) (b:=c)] + unfold a b c + -- set t2 := + set t1 := concrete_mul a₁ c₁ + set t2 := concrete_mul a₁ b₁ + rw [add_assoc (b:=concrete_mul t1 (Z (k - 1))) (c:=concrete_mul t2 (Z (k - 1)))] + have de_distrib := recArgPrevLevel.mul_right_distrib (a:=t1) (b:=t2) (c:=Z (k-1)) + rw [←de_distrib] + unfold t1 t2 + have de_distrib_a₁_mul_sum_b₁_c₁ := recArgPrevLevel.mul_left_distrib (a:=a₁) (b:=b₁) (c:=c₁) + rw [add_comm (a:=concrete_mul a₁ b₁) (b:=concrete_mul a₁ c₁)] at de_distrib_a₁_mul_sum_b₁_c₁ + rw [←de_distrib_a₁_mul_sum_b₁_c₁] + rw [←recArgPrevLevel.add_assoc] + have de_distrib_left_a₀_mul_sum_b₁_c₁ := + recArgPrevLevel.mul_left_distrib (a:=a₀) (b:=b₁) (c:=c₁) + have de_distrib_right_a₁_mul_sum_b₀_c₀ := + recArgPrevLevel.mul_right_distrib (a:=b₀) (b:=c₀) (c:=a₁) + conv => + rhs + rw [←add_assoc, add_assoc (b:=concrete_mul b₀ a₁) (c:=concrete_mul a₀ c₁), + add_comm (a:=concrete_mul b₀ a₁) (b:=concrete_mul a₀ c₁)] + rw [←add_assoc, ←de_distrib_left_a₀_mul_sum_b₁_c₁] + rw [add_assoc (b:=concrete_mul b₀ a₁) (c:=concrete_mul c₀ a₁)] + rw [←de_distrib_right_a₁_mul_sum_b₀_c₀] + · -- ⊢ concrete_mul a₀ (b₀ + c₀) + concrete_mul a₁ (b₁ + c₁) = + -- concrete_mul a₀ b₀ + concrete_mul a₁ b₁ + (concrete_mul a₀ c₀ + concrete_mul a₁ c₁) + rw [recArgPrevLevel.mul_left_distrib, recArgPrevLevel.mul_left_distrib] + rw [←recArgPrevLevel.add_assoc, ←recArgPrevLevel.add_assoc] + conv => + rhs + rw [recArgPrevLevel.add_assoc (a:=concrete_mul a₀ b₀) (b:=concrete_mul a₁ b₁) + (c:=concrete_mul a₀ c₀), recArgPrevLevel.add_comm (a:=concrete_mul a₁ b₁) + (b:=concrete_mul a₀ c₀), ←recArgPrevLevel.add_assoc] + +lemma concrete_mul_right_distrib {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} {h_k: k > 0} + (a b c : ConcreteBinaryTower k) : + concrete_mul (a + b) c = concrete_mul a c + concrete_mul b c := by + rw [concrete_mul_comm (recArg:=recArg) (h_k:=h_k) (a:=(a+b)) (b:=c)] + rw [concrete_mul_comm (recArg:=recArg) (h_k:=h_k) (a:=a) (b:=c)] + rw [concrete_mul_comm (recArg:=recArg) (h_k:=h_k) (a:=b) (b:=c)] + exact concrete_mul_left_distrib (recArg:=recArg) (h_k:=h_k) (a:=c) (b:=a) (c:=b) + +instance instInvConcreteBTF: Inv (ConcreteBinaryTower k) where + inv := concrete_inv + +lemma concrete_mul_inv_cancel {recArg : (m : ℕ) → m < k → ConcreteBTFStepResult m} + (a : ConcreteBinaryTower k) (h : a ≠ 0) : + concrete_mul a (concrete_inv a) = one := by + unfold concrete_inv + by_cases h_k_zero : k = 0 + · rw [dif_pos h_k_zero] + have h_2_pow_k_eq_1: 2 ^ k = 1 := by rw [h_k_zero]; norm_num + let a0 : ConcreteBinaryTower 0 := Eq.mp (congrArg ConcreteBinaryTower h_k_zero) a + have a0_is_eq_mp_a: a0 = Eq.mp (congrArg ConcreteBinaryTower h_k_zero) a := by rfl + rcases concrete_eq_zero_or_eq_one (a := a) (h_k_zero:=h_k_zero) with (ha0 | ha1) + · -- ha0: a = zero (k:=k) + have a_is_zero: a = zero (k:=k) := ha0 + have a_is_0: a = 0 := by rw [←zero_is_0, a_is_zero] + contradiction + · -- ha1: a = 1 + have a_is_1: a = 1 := ha1 + have a_ne_0: a ≠ 0 := by rw [a_is_1]; exact one_ne_zero + rw [if_neg a_ne_0] + rw [←one_is_1] + rw [concrete_mul_one (a:=a) (recArg:=recArg)] + rw [ha1] + · by_cases h_a_zero : a = 0 + · contradiction + · rw [dif_neg h_k_zero] + rw [dif_neg h_a_zero] + + by_cases h_a_one : a = 1 + · rw [dif_pos h_a_one] + rw [←one_is_1] + rw [concrete_mul_one (a:=a) (recArg:=recArg)] + rw [h_a_one, one_is_1] + · rw [dif_neg h_a_one] + have h_k_gt_0 : k > 0 := Nat.zero_lt_of_ne_zero h_k_zero + have recArgPrevLevel := recArg (k-1) (Nat.sub_one_lt_of_lt h_k_gt_0) + let split_a := split h_k_gt_0 a + let a₁ := split_a.fst + let a₀ := split_a.snd + have h_a_split : split h_k_gt_0 a = (a₁, a₀) := by rfl + have h_a₁ : (split h_k_gt_0 a).1 = a₁ := by rfl + have h_a₀ : (split h_k_gt_0 a).2 = a₀ := by rfl + simp [h_a₁, h_a₀] -- resolve the match of split a + -- distribute all concrete_mul over the addition + simp only [recArgPrevLevel.mul_left_distrib] + -- NOTE: we have to exploit the special structure of concrete_inv for this + sorry + +lemma concrete_inv_one: + concrete_inv (k:=k) 1 = 1 := by + unfold concrete_inv + by_cases h_k_zero : k = 0 + · simp only [h_k_zero]; norm_num + · simp only [h_k_zero]; norm_num + +instance instHDivConcreteBTF: HDiv (ConcreteBinaryTower k) (ConcreteBinaryTower k) + (ConcreteBinaryTower k) where hDiv a b := a * (concrete_inv b) + +lemma concrete_div_eq_mul_inv (a b : ConcreteBinaryTower k) : a / b = a * (concrete_inv b) := by + rfl + +instance instHPowConcreteBTF: HPow (ConcreteBinaryTower k) ℤ (ConcreteBinaryTower k) where + hPow a n := + match n with + | Int.ofNat m => concrete_pow_nat a m + | Int.negSucc m => + -- n = -(m+1) + if a = 0 then 0 + else concrete_pow_nat (concrete_inv a) (m + 1) -- a^(-(m+1)) = (a^(-1))^(m+1) + +instance : Div (ConcreteBinaryTower k) where + div a b := a * (concrete_inv b) + +end InductiveConcreteBTFPropertiesProofs +------------------------------------------------------------------------------------------- + +def InductiveConcreteBTFPropertiesAux (k : ℕ) (rec : ∀ m : ℕ, m < k → ConcreteBTFStepResult m): + ConcreteBTFStepResult k := + match k with + | 0 => + let result: ConcreteBTFStepResult 0 :={ + mul_eq := fun a b h_k _ _ _ _ _ _ => by + have h_l_ne_lt_0 := Nat.not_lt_zero 0 + exact absurd h_k h_l_ne_lt_0, + zero_mul := concrete_zero_mul0, + zero_mul' := concrete_zero_mul0, + mul_zero := concrete_mul_zero0, + mul_zero' := concrete_mul_zero0, + one_mul := concrete_one_mul0, + mul_one := concrete_mul_one0, + mul_assoc := concrete_mul_assoc0, + mul_comm := concrete_mul_comm0, + mul_left_distrib := concrete_mul_left_distrib0, + mul_right_distrib := concrete_mul_right_distrib0, + add_assoc := add_assoc, + add_comm := add_comm, + add_zero := add_zero, + zero_add := zero_add, + add_neg := neg_add_cancel, + mul_inv_cancel := concrete_mul_inv_cancel (k:=0) (recArg := rec) + } + result + | k + 1 => + -- rec : (m : ℕ) → m < k + 1 → ConcreteBTFStepResult m + let result: ConcreteBTFStepResult (k+1) :={ + zero_mul := concrete_zero_mul (recArg := rec), + zero_mul' := fun a => by rw [←zero_is_0]; exact concrete_zero_mul (recArg := rec) a + mul_zero := concrete_mul_zero (recArg := rec), + mul_zero' := fun a => by rw [←zero_is_0]; exact concrete_mul_zero (recArg := rec) a + one_mul := concrete_one_mul (recArg := rec), + mul_one := concrete_mul_one (recArg := rec), + mul_assoc := concrete_mul_assoc (recArg := rec) (h_k := Nat.succ_pos k), + mul_comm := concrete_mul_comm (recArg := rec) (h_k := Nat.succ_pos k), + mul_left_distrib := concrete_mul_left_distrib (recArg := rec) (h_k := Nat.succ_pos k), + mul_right_distrib := concrete_mul_right_distrib (recArg := rec) (h_k := Nat.succ_pos k), + add_assoc := add_assoc, + add_comm := add_comm, + add_zero := add_zero, + zero_add := zero_add, + add_neg := neg_add_cancel, + mul_inv_cancel := concrete_mul_inv_cancel (k:=k+1) (recArg := rec), + mul_eq := concrete_mul_eq (k:=k+1) (recArg := rec), + } + result + +def InductiveConcreteBTFProperties (k : ℕ) : ConcreteBTFStepResult k := + WellFounded.fix (measure id).wf (fun k rec => InductiveConcreteBTFPropertiesAux k rec) k + +instance instRingConcrete {k:ℕ}: Ring (ConcreteBinaryTower k) where + toAddCommGroup := inferInstance + toOne := inferInstance + mul := concrete_mul + mul_assoc := (InductiveConcreteBTFProperties (k:=k)).mul_assoc + one_mul := (InductiveConcreteBTFProperties (k:=k)).one_mul + mul_one := (InductiveConcreteBTFProperties (k:=k)).mul_one + left_distrib := (InductiveConcreteBTFProperties (k:=k)).mul_left_distrib + right_distrib := (InductiveConcreteBTFProperties (k:=k)).mul_right_distrib + zero_mul := (InductiveConcreteBTFProperties (k:=k)).zero_mul + mul_zero := (InductiveConcreteBTFProperties (k:=k)).mul_zero + + natCast n := natCast n + natCast_zero := natCast_zero + natCast_succ n := natCast_succ n + intCast n := intCast n + intCast_ofNat n := intCast_ofNat n + intCast_negSucc n := intCast_negSucc n + +instance instDivisionRingConcrete {k:ℕ}: DivisionRing (ConcreteBinaryTower k) where + toRing := instRingConcrete (k:=k) + inv := concrete_inv + exists_pair_ne := concrete_exists_pair_ne (k := k) + mul_inv_cancel := (InductiveConcreteBTFProperties (k:=k)).mul_inv_cancel + inv_zero := concrete_inv_zero + qsmul := (Rat.castRec · * ·) + nnqsmul := (NNRat.castRec · * ·) + +instance instFieldConcrete {k:ℕ}: Field (ConcreteBinaryTower k) where + toDivisionRing := instDivisionRingConcrete (k:=k) + mul_comm := (InductiveConcreteBTFProperties (k:=k)).mul_comm + +section Tests + +#check instFieldConcrete (k:=0) +#check instFieldConcrete (k:=5) + +#eval bitVecToString 5 (BitVec.ofNat 5 1) -- 5 in 4 bits is 0101 +#eval split (k:=5) (by omega) (fromNat (k:=5) 1) -- 1 => (0, 1) +#eval (Z 2).toBitString -- 01|00 +#eval (one (k:=2)).toBitString -- 0001 +#eval (zero (k:=2)).toBitString -- 0000 + +#eval (fromNat (k:=2) 3).toBitString +#eval (fromNat (k:=3) 3).toBitString +#eval (fromNat (k:=4) 3).toBitString + +#eval Z (0) +#eval Z (1) +#eval Z (2) +#eval Z (3) +#eval Z (4) +#eval Z (5) +#eval Z (6) + +#eval (fromNat (k:=1) 3) * (fromNat (k:=1) 3) -- 9#4 +#eval (fromNat (k:=4) 3) * (fromNat (k:=4) 3) -- 9#4 +#eval (fromNat (k:=2) 3) * (fromNat (k:=2) 3) -- 9#4 +#eval (fromNat (k:=5) 7) * (fromNat (k:=5) 20) -- 9#4 +#eval add (fromNat (k:=9) 1) (fromNat (k:=9) 7) -- -- 000001 xor 001101 = 001100 = 6#512 +#eval (fromNat (k:=9) 1) + (fromNat (k:=9) 7) + +-- Test function to bundle multiple evaluations +def runTests : IO Unit := do + -- Test k = 0 (field of order 2, like GF(2)) + let k0 : ℕ := 0 + let zero0 := zero (k := k0) + let one0 := one (k := k0) + let five0 := fromNat (k := k0) 5 -- 5 mod 2 = 1 in k=0 + IO.println s!"--- Tests for k = {k0} (width = {width k0}) ---" + IO.println s!"zero = {zero0.toBitString}" + IO.println s!"one = {one0.toBitString}" + IO.println s!"fromNat 5 = {five0.toBitString}" + IO.println s!"zero + one = {(add zero0 one0).toBitString}" + IO.println s!"one + one = {(add one0 one0).toBitString}" + IO.println s!"one * one = {(concrete_mul one0 one0).toBitString}" + IO.println s!"zero * one = {(concrete_mul zero0 one0).toBitString}" + + -- Test k = 1 (field of order 4, like GF(4)) + let k1 : ℕ := 1 + let zero1 := zero (k := k1) + let one1 := one (k := k1) + let two1 := fromNat (k := k1) 2 + let three1 := fromNat (k := k1) 3 + IO.println s!"--- Tests for k = {k1} (width = {width k1}) ---" + IO.println s!"zero = {zero1.toBitString}" + IO.println s!"one = {one1.toBitString}" + IO.println s!"fromNat 2 = {two1.toBitString}" + IO.println s!"fromNat 3 = {three1.toBitString}" + IO.println s!"one + two = {(add one1 two1).toBitString}" + IO.println s!"two + three = {(add two1 three1).toBitString}" + IO.println s!"one * two = {(concrete_mul one1 two1).toBitString}" + IO.println s!"two * three = {(concrete_mul two1 three1).toBitString}" + + -- Test k = 2 (field of order 16, like GF(16)) + let k2 : ℕ := 2 + let zero2 := zero (k := k2) + let one2 := one (k := k2) + let five2 := fromNat (k := k2) 5 + let seven2 := fromNat (k := k2) 7 + IO.println s!"--- Tests for k = {k2} (width = {width k2}) ---" + IO.println s!"zero = {zero2.toBitString}" + IO.println s!"one = {one2.toBitString}" + IO.println s!"fromNat 5 = {five2.toBitString}" + IO.println s!"fromNat 7 = {seven2.toBitString}" + IO.println s!"five + seven = {(add five2 seven2).toBitString}" + IO.println s!"five * one = {(concrete_mul five2 one2).toBitString}" + IO.println s!"five * seven = {(concrete_mul five2 seven2).toBitString}" + +-- Run the tests +#eval runTests + +end Tests + +end ConcreteDefinition +end BinaryTower + +#check BinaryTower.ConcreteDefinition.instFieldConcrete (k:=2) diff --git a/ArkLib/Data/FieldTheory/BinaryTowerField/Prelude.lean b/ArkLib/Data/FieldTheory/BinaryField/Tower/Prelude.lean similarity index 99% rename from ArkLib/Data/FieldTheory/BinaryTowerField/Prelude.lean rename to ArkLib/Data/FieldTheory/BinaryField/Tower/Prelude.lean index 54f654749..439059e69 100644 --- a/ArkLib/Data/FieldTheory/BinaryTowerField/Prelude.lean +++ b/ArkLib/Data/FieldTheory/BinaryField/Tower/Prelude.lean @@ -508,6 +508,17 @@ theorem one_le_two_pow_n (n : ℕ) : 1 ≤ 2 ^ n := by 1 = 2^0 := by rw [Nat.pow_zero] _ ≤ 2 ^ n := Nat.pow_le_pow_right (by decide) (by exact Nat.zero_le n) +theorem zero_lt_pow_n (m: ℕ) (n: ℕ) (h_m: m > 0): 0 < m^n := by + exact Nat.pow_pos h_m + +-- 1 ≤ 2 ^ k - 2 ^ (k - 1) +theorem one_le_sub_consecutive_two_pow (n: ℕ): 1 ≤ 2^(n+1) - 2^n := by + calc + 1 ≤ 2^n := Nat.one_le_pow _ _ (by decide) + _ = 2^(n+1) - 2^n := by + rw [Nat.pow_succ, Nat.mul_two] + rw [Nat.add_sub_cancel] + theorem two_pow_ne_zero (n : ℕ) : 2 ^ n ≠ 0 := by by_contra hn have h_1_le_0: 1 ≤ 0 := by diff --git a/ArkLib/Data/Fin/Basic.lean b/ArkLib/Data/Fin/Basic.lean index 6e8e2ba04..98e7ed893 100644 --- a/ArkLib/Data/Fin/Basic.lean +++ b/ArkLib/Data/Fin/Basic.lean @@ -9,7 +9,7 @@ import Mathlib.Algebra.Order.Sub.Basic import Mathlib.Algebra.Polynomial.Eval.Defs import Mathlib.Data.Fin.Tuple.Take import Batteries.Data.Fin.Fold -import ArkLib.Data.Math.DepCast +import ArkLib.Data.Classes.DCast /-! # Lemmas on `Fin` and `Fin`-indexed tuples @@ -154,7 +154,7 @@ theorem induction_append_left {m n : ℕ} {motive : Fin (m + n + 1) → Sort*} { induction (motive := motive) zero succ ⟨i, by omega⟩ = @induction m (fun j => motive ⟨j, by omega⟩) zero (fun j x => succ ⟨j, by omega⟩ x) i := by induction i using Fin.induction with - | zero => simp [induction_zero, Fin.cast] + | zero => simp [induction_zero]; rfl | succ i ih => simp at ih ⊢ have : (⟨i.1 + 1, by omega⟩ : Fin (m + n + 1)) = (⟨i, by omega⟩ : Fin (m + n)).succ := rfl @@ -171,7 +171,7 @@ theorem induction_append_right {m n : ℕ} {motive : Fin (m + n + 1) → Sort*} (fun i x => succ (i.natAdd m) x) i := by induction i using Fin.induction with | zero => - simp [castAdd, castLE, last, natAdd] + simp [castAdd, castLE, last, natAdd, HMod.hMod, Mod.mod, Nat.mod] rw [induction_append_left (i := ⟨m, by omega⟩)] rfl | succ i ih => @@ -279,7 +279,7 @@ theorem take_addCases'_left {n' : ℕ} {β : Fin n' → Sort u} (m : ℕ) (h : m take m (Nat.le_add_right_of_le h) (addCases' u v) i = (append_left α β (castLE h i)) ▸ (take m h u i) := by have : i < n := Nat.lt_of_lt_of_le i.isLt h - simp [take_apply, addCases', addCases, this, cast_eq_iff_heq, castLT, castLE] + simp [take_apply, addCases', addCases, this, cast_eq_iff_heq, castLE] -- theorem take_addCases'_right {n' : ℕ} {β : Fin n' → Sort u} (m : ℕ) (h : m ≤ n') -- (u : (i : Fin n) → α i) (v : (j : Fin n') → β j) (i : Fin (n + m)) : @@ -385,7 +385,7 @@ theorem drop_tail {α : Fin (n + 1) → Sort*} (m : ℕ) (h : m ≤ n) (v : (i : theorem drop_repeat {α : Type*} {n' : ℕ} (m : ℕ) (h : m ≤ n) (a : Fin n' → α) : HEq (drop (m * n') (Nat.mul_le_mul_right n' h) (Fin.repeat n a)) (Fin.repeat (n - m) a) := - (Fin.heq_fun_iff (Nat.sub_mul n m n').symm).mpr (fun i => by simp [cast, modNat]) + (Fin.heq_fun_iff (Nat.sub_mul n m n').symm).mpr (fun i => by simp [modNat]) end Drop @@ -453,7 +453,7 @@ theorem ranges_eq_ranges_list {a : Fin n → ℕ} : otherwise. This is the dependent version of `Fin.divNat`. - -/ +-/ def divSum? {m : ℕ} (n : Fin m → ℕ) (k : ℕ) : Option (Fin m) := find (fun i => k < ∑ j, n (castLE i.isLt j)) @@ -465,7 +465,7 @@ theorem divSum?_is_some_iff_lt_sum {m : ℕ} {n : Fin m → ℕ} {k : ℕ} : obtain ⟨i, hi⟩ := h have : i.val + 1 + (m - i.val - 1) = m := by omega rw [← Fin.sum_congr' _ this, Fin.sum_univ_add] - simp only [cast, coe_castAdd, coe_natAdd, gt_iff_lt] + simp only [gt_iff_lt] exact Nat.lt_add_right _ hi · intro isLt have : m ≠ 0 := fun h => by subst h; simp at isLt @@ -486,7 +486,7 @@ theorem sum_le_of_divSum?_eq_some {m : ℕ} {n : Fin m → ℕ} {k : Fin (∑ j, · have : (i.val - 1) + 1 = i.val := by omega rw [← Fin.sum_congr' _ this] have := Fin.find_min (Option.mem_def.mp hi) (j := ⟨i.val - 1, by omega⟩) <| Fin.lt_def.mpr - (by simp only [and_true]; omega) + (by simp only; omega) exact not_lt.mp this def modSum {m : ℕ} {n : Fin m → ℕ} (k : Fin (∑ j, n j)) : Fin (n (divSum k)) := @@ -494,7 +494,7 @@ def modSum {m : ℕ} {n : Fin m → ℕ} (k : Fin (∑ j, n j)) : Fin (n (divSum have divSum_mem : divSum k ∈ divSum? n k := by simp only [divSum, divSum?, Option.mem_def, Option.some_get] have hk : k < ∑ j, n (Fin.castLE (divSum k).isLt j) := Fin.find_spec _ divSum_mem - simp only [Fin.sum_univ_succAbove _ (Fin.last (divSum k)), val_last, succAbove_last] at hk + simp only [Fin.sum_univ_succAbove _ (Fin.last (divSum k)), succAbove_last] at hk rw [Nat.sub_lt_iff_lt_add' (sum_le_of_divSum?_eq_some divSum_mem)] rw [add_comm] exact hk⟩ @@ -532,7 +532,7 @@ theorem finSigmaFinEquiv'_apply {m : ℕ} {n : Fin m → ℕ} (k : (i : Fin m) theorem finSigmaFinEquiv'_pair {m : ℕ} {n : Fin m → ℕ} (i : Fin m) (k : Fin (n i)) : (finSigmaFinEquiv' ⟨i, k⟩ : ℕ) = ∑ j, n (Fin.castLE i.isLt.le j) + k := by - simp only [finSigmaFinEquiv', ↓reduceDIte, Equiv.ofRightInverseOfCardLE_apply] + simp only [finSigmaFinEquiv', Equiv.ofRightInverseOfCardLE_apply] end FinSigmaFinEquiv @@ -597,12 +597,12 @@ theorem dfoldl_congr {n : ℕ} subst hinit rfl -/-- Congruence for `dfoldl` whose type vectors are indexed by `ι` and have a `DepCast` instance +/-- Congruence for `dfoldl` whose type vectors are indexed by `ι` and have a `DCast` instance Note that we put `cast` (instead of `dcast`) in the theorem statement for easier matching, but `dcast` inside the hypotheses for downstream proving. -/ theorem dfoldl_congr_dcast {n : ℕ} - {ι : Type v} {α α' : Fin (n + 1) → ι} {β : ι → Type u} [DepCast ι β] + {ι : Type v} {α α' : Fin (n + 1) → ι} {β : ι → Type u} [DCast ι β] {f : (i : Fin n) → β (α i.castSucc) → β (α i.succ)} {f' : (i : Fin n) → β (α' i.castSucc) → β (α' i.succ)} {init : β (α 0)} {init' : β (α' 0)} @@ -613,7 +613,7 @@ theorem dfoldl_congr_dcast {n : ℕ} cast (by have := funext hα; subst this; simp) (dfoldl n (fun i => β (α' i)) f' init') := by have hα' : α = α' := funext hα cases hα' - simp_all [dcast_id, comp_apply] + simp_all only [dcast_eq, cast_eq] simp at hf have hf' : f = f' := funext₂ hf cases hf' @@ -621,7 +621,7 @@ theorem dfoldl_congr_dcast {n : ℕ} rfl /-- Distribute `dcast` inside `dfoldl`. Requires the minimal condition of `α = α'` -/ -theorem dfoldl_dcast {ι : Type v} {β : ι → Type u} [DepCast ι β] +theorem dfoldl_dcast {ι : Type v} {β : ι → Type u} [DCast ι β] {n : ℕ} {α α' : Fin (n + 1) → ι} {f : (i : Fin n) → β (α i.castSucc) → β (α i.succ)} {init : β (α 0)} (hα : ∀ i, α i = α' i) : @@ -630,7 +630,7 @@ theorem dfoldl_dcast {ι : Type v} {β : ι → Type u} [DepCast ι β] (fun i a => dcast (hα _) (f i (dcast (hα _).symm a))) (dcast (hα 0) init) := by have hα' : α = α' := funext hα subst hα' - simp_all [dcast_id, comp_apply] + simp_all only [dcast_eq] -- theorem dfoldl_dcast₂ {n : ℕ} -- {ι₁ : Type v} {ι₂ : ι₁ → Type w} {α α' : Fin (n + 1) → (i : ι₁) → ι₂ i} @@ -653,113 +653,4 @@ theorem dfoldl_dcast {ι : Type v} {β : ι → Type u} [DepCast ι β] end Fold -section Lift - -variable {α : Type*} - {m n : ℕ} - -/- - Basic ad-hoc lifting; - - `liftF : (Fin n → α) → ℕ → α` - - `liftF` : (ℕ → α) → Fin n → α - These invert each other assuming appropriately-bounded domains. - - These are specialised versions of true lifts that uses `Nonempty` / `Inhabited` - and take the complement of the finite set which is the domain of the function being lifted. --/ - -variable [Zero α] {f : ℕ → α} {f' : Fin n → α} - -/-- - `liftF` lifts functions over domains `Fin n` to functions over domains `ℕ` - by returning `0` on points `≥ n`. --/ -def liftF (f : Fin n → α) : ℕ → α := - fun m ↦ if h : m < n then f ⟨m, h⟩ else 0 - -/-- - `liftF'` lifts functions over domains `ℕ` to functions over domains `Fin n` - by taking the obvious injection. --/ -def liftF' (f : ℕ → α) : Fin n → α := - fun m ↦ f m.1 - -open Fin (liftF' liftF) - -@[simp] -lemma liftF_succ {f : Fin (n + 1) → α} : liftF f n = f ⟨n, Nat.lt_add_one _⟩ := by - aesop (add simp liftF) - -lemma liftF'_liftF_of_lt {k : Fin m} (h : k < n) : - liftF' (n := m) (liftF (n := n) f') k = f' ⟨k, by omega⟩ := by - aesop (add simp [liftF, liftF']) - -@[simp] -lemma liftF'_liftF_succ {f : Fin (n + 1) → α} {x : Fin n} : - liftF' (liftF (n := n + 1) f) x = f x.castSucc := by - aesop (add simp [liftF, liftF']) (add safe (by omega)) - -@[simp] -lemma liftF'_liftF : Function.LeftInverse liftF' (liftF (α := α) (n := n)) := by - aesop (add simp [Function.LeftInverse, liftF, liftF']) - -@[simp] -lemma liftF'_liftF_eq : liftF' (liftF f') = f' := by unfold liftF' liftF; simp - -lemma liftF_liftF'_of_lt (h : m < n) : liftF (liftF' (n := n) f) m = f m := by - aesop (add simp liftF) - -@[simp] -lemma liftF_liftF'_succ : liftF (liftF' (n := n + 1) f) n = f n := by - aesop (add simp liftF) - -lemma liftF_eval {f : Fin n → α} {i : Fin n} : - liftF f i.val = f i := by - aesop (add simp liftF) - -lemma lt_of_liftF_ne_zero {f : Fin n → α} {i : ℕ} - (h : liftF f i ≠ 0) - : i < n := by - aesop (add simp liftF) - -lemma liftF_ne_zero_of_lt {i : ℕ} (h : i < n) : liftF f' i ≠ 0 ↔ f' ⟨i, h⟩ ≠ 0 := by - aesop (add simp liftF) - -lemma liftF_eq_of_lt {i : ℕ} (h : i < n) : liftF f' i = f' ⟨i, h⟩ := by - aesop (add simp liftF) - -@[simp] -lemma liftF_zero_eq_zero - : liftF (fun (_ : Fin n) ↦ (0 : α)) = (fun _ ↦ (0 : α)) := by - aesop (add simp liftF) - -@[simp] -lemma liftF'_zero_eq_zero - : liftF' (fun _ ↦ (0 : α)) = (fun (_ : Fin n) ↦ (0 : α)) := by - aesop (add simp liftF') - -abbrev contract (m : ℕ) (f : Fin n → α) := liftF (liftF' (n := m) (liftF f)) - -open Fin (contract) - -lemma contract_eq_liftF_of_lt {k : ℕ} (h₁ : k < m) : - contract m f' k = liftF f' k := by - aesop (add simp [contract, liftF, liftF']) - -attribute [simp] contract.eq_def - -variable {F : Type*} [Semiring F] {p : Polynomial F} - -open Polynomial - -lemma eval_liftF_of_lt {f : Fin m → F} (h : n < m) : - eval (liftF f n) p = eval (f ⟨n, h⟩) p := by - aesop (add simp liftF) - -@[simp] -lemma liftF'_p_coeff {p : F[X]} {k : ℕ} {i : Fin k} : liftF' p.coeff i = p.coeff i := by - simp [liftF'] - -end Lift - end Fin diff --git a/ArkLib/Data/Fin/Lift.lean b/ArkLib/Data/Fin/Lift.lean new file mode 100644 index 000000000..d41179ffb --- /dev/null +++ b/ArkLib/Data/Fin/Lift.lean @@ -0,0 +1,122 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: František Silváši +-/ + +import ArkLib.Data.Fin.Basic + +/-! + # Lifting of `Fin`-indexed vectors +-/ + +namespace Fin + +section Lift + +variable {α : Type*} + {m n : ℕ} + +/- + Basic ad-hoc lifting; + - `liftF : (Fin n → α) → ℕ → α` + - `liftF` : (ℕ → α) → Fin n → α + These invert each other assuming appropriately-bounded domains. + + These are specialised versions of true lifts that uses `Nonempty` / `Inhabited` + and take the complement of the finite set which is the domain of the function being lifted. +-/ + +variable [Zero α] {f : ℕ → α} {f' : Fin n → α} + +/-- `liftF` lifts functions over domains `Fin n` to functions over domains `ℕ` + by returning `0` on points `≥ n`. +-/ +def liftF (f : Fin n → α) : ℕ → α := + fun m ↦ if h : m < n then f ⟨m, h⟩ else 0 + +/-- `liftF'` lifts functions over domains `ℕ` to functions over domains `Fin n` + by taking the obvious injection. +-/ +def liftF' (f : ℕ → α) : Fin n → α := + fun m ↦ f m.1 + +open Fin (liftF' liftF) + +@[simp] +lemma liftF_succ {f : Fin (n + 1) → α} : liftF f n = f ⟨n, Nat.lt_add_one _⟩ := by + aesop (add simp liftF) + +lemma liftF'_liftF_of_lt {k : Fin m} (h : k < n) : + liftF' (n := m) (liftF (n := n) f') k = f' ⟨k, by omega⟩ := by + aesop (add simp [liftF, liftF']) + +@[simp] +lemma liftF'_liftF_succ {f : Fin (n + 1) → α} {x : Fin n} : + liftF' (liftF (n := n + 1) f) x = f x.castSucc := by + aesop (add simp [liftF, liftF']) (add safe (by omega)) + +@[simp] +lemma liftF'_liftF : Function.LeftInverse liftF' (liftF (α := α) (n := n)) := by + aesop (add simp [Function.LeftInverse, liftF, liftF']) + +@[simp] +lemma liftF'_liftF_eq : liftF' (liftF f') = f' := by unfold liftF' liftF; simp + +lemma liftF_liftF'_of_lt (h : m < n) : liftF (liftF' (n := n) f) m = f m := by + aesop (add simp liftF) + +@[simp] +lemma liftF_liftF'_succ : liftF (liftF' (n := n + 1) f) n = f n := by + aesop (add simp liftF) + +lemma liftF_eval {f : Fin n → α} {i : Fin n} : + liftF f i.val = f i := by + aesop (add simp liftF) + +lemma lt_of_liftF_ne_zero {f : Fin n → α} {i : ℕ} + (h : liftF f i ≠ 0) + : i < n := by + aesop (add simp liftF) + +lemma liftF_ne_zero_of_lt {i : ℕ} (h : i < n) : liftF f' i ≠ 0 ↔ f' ⟨i, h⟩ ≠ 0 := by + aesop (add simp liftF) + +lemma liftF_eq_of_lt {i : ℕ} (h : i < n) : liftF f' i = f' ⟨i, h⟩ := by + aesop (add simp liftF) + +@[simp] +lemma liftF_zero_eq_zero + : liftF (fun (_ : Fin n) ↦ (0 : α)) = (fun _ ↦ (0 : α)) := by + aesop (add simp liftF) + +@[simp] +lemma liftF'_zero_eq_zero + : liftF' (fun _ ↦ (0 : α)) = (fun (_ : Fin n) ↦ (0 : α)) := by + aesop (add simp liftF') + +abbrev contract (m : ℕ) (f : Fin n → α) := liftF (liftF' (n := m) (liftF f)) + +open Fin (contract) + +lemma contract_eq_liftF_of_lt {k : ℕ} (h₁ : k < m) : + contract m f' k = liftF f' k := by + aesop (add simp [contract, liftF, liftF']) + +attribute [simp] contract.eq_def + +variable {F : Type*} [Semiring F] {p : Polynomial F} + +open Polynomial + +lemma eval_liftF_of_lt {f : Fin m → F} (h : n < m) : + eval (liftF f n) p = eval (f ⟨n, h⟩) p := by + aesop (add simp liftF) + +@[simp] +lemma liftF'_p_coeff {p : F[X]} {k : ℕ} {i : Fin k} : liftF' p.coeff i = p.coeff i := by + simp [liftF'] + +end Lift + +end Fin diff --git a/ArkLib/Data/Fin/Pad.lean b/ArkLib/Data/Fin/Pad.lean new file mode 100644 index 000000000..904f1bf06 --- /dev/null +++ b/ArkLib/Data/Fin/Pad.lean @@ -0,0 +1,31 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.Data.Fin.Basic + +/-! + # Padding of `Fin`-indexed vectors +-/ + +namespace Fin + +-- Is it worth it defining a dependent version of padding? + +variable {α : Sort*} + +/-- Pad a `Fin`-indexed vector on the right with an element `a`. + +This becomes truncation if `n < m`. -/ +def rightpad (n : ℕ) (a : α) {m : ℕ} (v : Fin m → α) : Fin n → α := + fun i => if h : i < m then v ⟨i, h⟩ else a + +/-- Pad a `Fin`-indexed vector on the left with an element `a`. + +This becomes truncation if `n < m`. -/ +def leftpad (n : ℕ) (a : α) {m : ℕ} (v : Fin m → α) : Fin n → α := + fun i => if h : n - m ≤ i then v ⟨i - (n - m), by omega⟩ else a + +end Fin diff --git a/ArkLib/Data/Hash/DomainSep.lean b/ArkLib/Data/Hash/DomainSep.lean new file mode 100644 index 000000000..fe2d0f3c2 --- /dev/null +++ b/ArkLib/Data/Hash/DomainSep.lean @@ -0,0 +1,269 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Init +import ArkLib.Data.Hash.DuplexSponge + +/-! + # Domain Separator + + This file contains the API for domain separators as defined in the + [spongefish](https://github.com/arkworks-rs/spongefish) Rust library. + + The API is designed to be as close as possible to the Rust implementation (as of June 22, 2025). + + A domain separator is a string that specifies the protocol in a simple, + non-ambiguous, human-readable format. A typical example is the following: + + ```text + domain-separator A32generator A32public-key R A32commitment S32challenge A32response + ``` + The domain-separator is a user-specified string uniquely identifying the end-user application + (to avoid cross-protocol attacks). + The letter `A` indicates the absorption of a public input (an `ABSORB`), while the letter `S` + indicates the squeezing (a `SQUEEZE`) of a challenge. + The letter `R` indicates a ratcheting operation: ratcheting means invoking the hash function + even on an incomplete block. + It provides forward secrecy and allows it to start from a clean rate. + After the operation type, is the number of elements in base 10 that are being absorbed/squeezed. + Then, follows the label associated with the element being absorbed/squeezed. This often comes + from the underlying description of the protocol. The label cannot start with a digit or contain + the NULL byte. + + NOTE: this file is generated by Claude 4 Sonnet and has not been fully proof-read. +-/ + +/-- Errors that can occur during domain separator operations. + +Rust interface: +```rust +pub struct DomainSeparatorMismatch(String); +``` +-/ +structure DomainSeparatorMismatch where + message : String +deriving Inhabited, DecidableEq, Repr + +/-- The domain separator of an interactive protocol. + +Rust interface: +```rust +#[derive(Clone)] +pub struct DomainSeparator +where + U: Unit, + H: DuplexSpongeInterface, +{ + /// Encoded domain separator string representing the sequence of sponge operations. + io: String, + /// Marker for the sponge hash function and unit type used. + _hash: PhantomData<(H, U)>, +} +``` + +The struct [`DomainSeparator`] guarantees the creation of a valid domain separator string, +whose lengths are coherent with the types described in the protocol. No information about +the types themselves is stored in a domain separator. +This means that [`ProverState`] or [`VerifierState`] instances can generate successfully +a protocol transcript respecting the length constraint but not the types. +-/ +structure DomainSeparator (U : Type) [SpongeUnit U] (H : Type*) [DuplexSpongeInterface U H] where + /-- Encoded domain separator string representing the sequence of sponge operations. -/ + io : String +deriving Repr + +namespace DomainSeparator + +/-- The null byte separator between operations in the domain separator (unicode/ASCII value 0) + and as such is the only forbidden character in labels. -/ +abbrev SEP_BYTE : String := ⟨[Char.ofNat 0]⟩ + +/-- Sponge operations. + +Rust interface: +```rust +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Op { + /// Indicates absorption of `usize` lanes. + /// + /// In a tag, absorb is indicated with 'A'. + Absorb(usize), + /// Indicates processing of out-of-band message + /// from prover to verifier. + /// + /// This is useful for e.g. adding merkle proofs to the proof. + Hint, + /// Indicates squeezing of `usize` lanes. + /// + /// In a tag, squeeze is indicated with 'S'. + Squeeze(usize), + /// Indicates a ratchet operation. + /// + /// For sponge functions, we squeeze sizeof(capacity) lanes + /// and initialize a new state filling the capacity. + /// This allows for a more efficient preprocessing, and for removal of + /// private information stored in the rate. + Ratchet, +} +``` +-/ +inductive Op where + /-- Indicates absorption of `count` lanes. In a tag, absorb is indicated with 'A'. -/ + | Absorb (count : Nat) + /-- Indicates processing of out-of-band message from prover to verifier. + This is useful for e.g. adding merkle proofs to the proof. -/ + | Hint + /-- Indicates squeezing of `count` lanes. + In a tag, squeeze is indicated with 'S'. -/ + | Squeeze (count : Nat) + /-- Indicates a ratchet operation. + For sponge functions, we squeeze sizeof(capacity) lanes + and initialize a new state filling the capacity. + This allows for a more efficient preprocessing, and for removal of + private information stored in the rate. -/ + | Ratchet +deriving DecidableEq, Repr + +variable {H : Type*} {U : Type} [SpongeUnit U] [DuplexSpongeInterface U H] + +/-- Create a new DomainSeparator with the domain separator. + +Rust interface: +```rust +pub fn new(session_identifier: &str) -> Self +``` +-/ +def new (sessionIdentifier : String) : DomainSeparator U H := + -- TODO: Add assertion that sessionIdentifier doesn't contain SEP_BYTE + { io := sessionIdentifier } + +/-- Create a DomainSeparator from a string directly. + +Rust interface: +```rust +pub const fn from_string(io: String) -> Self +``` +-/ +def fromString (io : String) : DomainSeparator U H := + { io := io } + +/-- Absorb `count` native elements. + +Rust interface: +```rust +pub fn absorb(self, count: usize, label: &str) -> Self +``` +-/ +def absorb (ds : DomainSeparator U H) (count : Nat) (label : String) : DomainSeparator U H := + -- TODO: Add assertions: + -- - count > 0 + -- - label doesn't contain SEP_BYTE + -- - label doesn't start with a digit + { io := ds.io ++ SEP_BYTE ++ s!"A{count}" ++ label } + +/-- Hint `count` native elements. + +Rust interface: +```rust +pub fn hint(self, label: &str) -> Self +``` +-/ +def hint (ds : DomainSeparator U H) (label : String) : DomainSeparator U H := + -- TODO: Add assertion that label doesn't contain SEP_BYTE + { io := ds.io ++ SEP_BYTE ++ "H" ++ label } + +/-- Squeeze `count` native elements. + +Rust interface: +```rust +pub fn squeeze(self, count: usize, label: &str) -> Self +``` +-/ +def squeeze (ds : DomainSeparator U H) (count : Nat) (label : String) : DomainSeparator U H := + -- TODO: Add assertions: + -- - count > 0 + -- - label doesn't contain SEP_BYTE + -- - label doesn't start with a digit + { io := ds.io ++ SEP_BYTE ++ s!"S{count}" ++ label } + +/-- Ratchet the state. + +Rust interface: +```rust +pub fn ratchet(self) -> Self +``` +-/ +def ratchet (ds : DomainSeparator U H) : DomainSeparator U H := + { io := ds.io ++ SEP_BYTE ++ "R" } + +/-- Return the domain separator as bytes. + +Rust interface: +```rust +pub fn as_bytes(&self) -> &[u8] +``` +-/ +def asBytes (ds : DomainSeparator U H) : ByteArray := + ds.io.toUTF8 + +/-- Parse the given domain separator into a sequence of [`Op`]'s. + +Rust interface: +```rust +pub(crate) fn finalize(&self) -> VecDeque +``` +-/ +def finalize (ds : DomainSeparator U H) : Array Op := + -- TODO: Implement parsing logic + sorry + +end DomainSeparator + +/-- Type class for byte-oriented domain separators. + +Rust interface: +```rust +pub trait ByteDomainSeparator { + fn add_bytes(self, count: usize, label: &str) -> Self; + fn hint(self, label: &str) -> Self; + fn challenge_bytes(self, count: usize, label: &str) -> Self; +} +``` +-/ +class ByteDomainSeparator (α : Type*) where + /-- Add bytes to the domain separator (equivalent to absorb). -/ + addBytes : α → Nat → String → α + /-- Add a hint to the domain separator. -/ + hint : α → String → α + /-- Challenge bytes from the domain separator (equivalent to squeeze). -/ + challengeBytes : α → Nat → String → α + +/-- Implementation of ByteDomainSeparator for DomainSeparator. + +Rust implementation: +```rust +impl ByteDomainSeparator for DomainSeparator { + #[inline] + fn add_bytes(self, count: usize, label: &str) -> Self { + self.absorb(count, label) + } + + fn hint(self, label: &str) -> Self { + self.hint(label) + } + + #[inline] + fn challenge_bytes(self, count: usize, label: &str) -> Self { + self.squeeze(count, label) + } +} +``` +-/ +instance {H : Type*} {U : Type} [SpongeUnit U] [DuplexSpongeInterface U H] : + ByteDomainSeparator (DomainSeparator U H) where + addBytes ds count label := ds.absorb count label + hint ds label := ds.hint label + challengeBytes ds count label := ds.squeeze count label diff --git a/ArkLib/Data/Hash/DuplexSponge.lean b/ArkLib/Data/Hash/DuplexSponge.lean new file mode 100644 index 000000000..0a8ede991 --- /dev/null +++ b/ArkLib/Data/Hash/DuplexSponge.lean @@ -0,0 +1,303 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.Data.Classes.HasSize +import ArkLib.Data.Classes.Initialize +import ArkLib.Data.Classes.Serde +import ArkLib.Data.Classes.Zeroize + +/-! + # Duplex Sponge API + + This file contains the API for the duplex sponge, which is a sponge that can be used to hash + data. + + The API is based on the [spongefish](https://github.com/arkworks-rs/spongefish) Rust library. + + The API is designed to be as close as possible to the Rust implementation (as of June 22, 2025). + + The API is subject to change as spongefish changes. +-/ + +/-- Type class for types that can be used as units in a cryptographic sponge. + +Following the [spongefish](https://github.com/arkworks-rs/spongefish) Rust library, we require the +following: + +- The type has zero (i.e. `zeroize` in Rust) +- The type can be serialized and deserialized to/from `ByteArray` +- The type has a fixed size in bytes +- The type implements `write` and `read` methods, which are used to write and read the unit to/from + an IO stream. They are implemented by default using the `serialize` and `deserialize` methods, and + the `IO.FS.Stream.{read/write}` functions. +-/ +class SpongeUnit (α : Type) extends Zeroize α, Serde α ByteArray, HasSize α UInt8 where + /-- + Rust interface: + ```rust + /// Write a bunch of units in the wire. + fn write(bunch: &[Self], w: &mut impl std::io::Write) -> Result<(), std::io::Error>; + ``` + Default implementation: serialize each unit of `α`, then write to stream + -/ + write (stream : IO.FS.Stream) : Array α → IO Unit := + Array.foldlM (fun _ a => IO.FS.Stream.write stream (serialize a)) () + /-- + Rust interface: + ```rust + /// Read a bunch of units from the wire + fn read(r: &mut impl std::io::Read, bunch: &mut [Self]) -> Result<(), std::io::Error>; + ``` + Default implementation: read `byteSize * array.length` bytes from stream, deserialize each + unit of `α`, then return the array if there is no error, otherwise throw an error + -/ + read (stream : IO.FS.Stream) : Array α → IO (Array α) := + fun arr => do + let bytes ← Array.mapM (fun _ => IO.FS.Stream.read stream (HasSize.size α UInt8)) arr + let units := Array.mapM deserialize bytes + if h : units.isSome + then return units.get h + else IO.throwServerError "Failed to read units" + +/-- Type class for types that can be used as a duplex sponge, with respect to the sponge unit type + `U`. + +Following the [spongefish](https://github.com/arkworks-rs/spongefish) Rust library, we require the +following: + +- The type is inhabited (`Default` in Rust), and can be zeroized (i.e. `Zeroize` in Rust) +- The type can be initialized from a 32-byte vector (seed) +- There are 3 methods: + - `absorbUnchecked` to absorb an array of units in the sponge + - `squeezeUnchecked` to squeeze out an array of units, changing the state of the sponge + - `ratchetUnchecked` to ratchet the state of the sponge +-/ +class DuplexSpongeInterface (U : Type) [SpongeUnit U] (α : Type*) + extends Inhabited α, Zeroize α, Initialize α (Vector UInt8 32) where + /-- Absorb new elements in the sponge. + ```rust + fn absorb_unchecked(&mut self, input: &[U]) -> &mut Self; + ``` + -/ + absorbUnchecked : α × Array U → α + + /-- Squeeze out new elements, changing the state of the sponge. + ```rust + fn squeeze_unchecked(&mut self, output: &mut [U]) -> &mut Self; + ``` + -/ + squeezeUnchecked : α × Array U → α × Array U + + /-- Ratcheting. + ```rust + fn ratchet_unchecked(&mut self) -> &mut Self; + ``` + + More notes from the Rust implementation: + ```rust + /// This operations makes sure that different elements are processed in different blocks. + /// Right now, this is done by: + /// - permuting the state. + /// - zero rate elements. + /// This has the effect that state holds no information about the elements absorbed so far. + /// The resulting state is compressed. + ``` + -/ + ratchetUnchecked : α → α + + +/-- Type class for storing the length & rate of a sponge permutation. -/ +class SpongePermutationSize where + /-- The width of the sponge, equal to rate R plus capacity. -/ + N : Nat + /-- The rate of the sponge. -/ + R : Nat + /-- The rate is less than the width. -/ + R_lt_N : R < N := by omega + /-- The rate is non-zero (i.e. positive). -/ + [neZero_R : NeZero R] + +attribute [instance] SpongePermutationSize.neZero_R + +namespace SpongePermutationSize + +variable [sz : SpongePermutationSize] + +/-- The capacity of the sponge, defined as `N - R`, the width minus the rate. -/ +def C : Nat := sz.N - sz.R + +instance [sz : SpongePermutationSize] : NeZero sz.C where + out := by + have := sz.R_lt_N + simp [C]; omega + +end SpongePermutationSize + +/-- Type class for cryptographic permutations used in sponge constructions. + +Rust interface: +```rust +pub trait Permutation: Zeroize + Default + Clone + AsRef<[Self::U]> + AsMut<[Self::U]> { + type U: Unit; + const N: usize; // The width of the sponge + const R: usize; // The rate of the sponge + fn new(iv: [u8; 32]) -> Self; + fn permute(&mut self); +} +``` +Note that we do not quite know how to handle `AsRef` and `AsMut`. My interpretation is that they +basically provide a way to get to the underlying state of the sponge, which is `Vector U N`. +Because of this, I give a tentative API for `AsRef` and `AsMut` basically as a lens between `α` and +`Vector U N` (i.e. `view` and `update`). + +TODO: figure out the needed properties here +-/ +class SpongePermutation (U : Type) [SpongeUnit U] (α : Type*) extends + Inhabited α, + Zeroize α, + SpongePermutationSize, + Initialize α (Vector UInt8 32) where + -- TENTATIVE: a lens between `α` and `Vector U N` + view : α → Vector U N + update : α → Vector U N → α + + /-- Permute the **state** of the sponge. Note that this does _not_ imply a permutation (i.e. + bijection) for the entire type. + + TODO: figure out the needed properties here (that it is a permutation on the state?) -/ + permute : α → α + + +-- need to "descend" from `permute : α → α` to `permuteState : Vector U N → Vector U N`, and then +-- for the reverse direction + +class LawfulSpongePermutation (U : Type) [SpongeUnit U] (α : Type*) [SpongePermutation U α] where + /- TODO: the permutation is a bijection on the state. -/ + +/-- A cryptographic duplex sponge. + +Rust interface: +```rust +#[derive(Clone, PartialEq, Eq, Default, Zeroize, ZeroizeOnDrop)] +pub struct DuplexSponge { + permutation: C, + absorb_pos: usize, + squeeze_pos: usize, +} +``` +-/ +structure DuplexSponge (U : Type) [SpongeUnit U] (C : Type*) [SpongePermutation U C] where + permutation : C + /-- Current position in the rate portion for absorbing data (0 ≤ absorbPos < R) -/ + absorbPos : Fin (SpongePermutationSize.R) + /-- Current position in the rate portion for squeezing data (0 ≤ squeezePos ≤ R) -/ + squeezePos : Fin (SpongePermutationSize.R) +deriving Inhabited + +namespace DuplexSponge + +variable {U : Type} {C : Type*} [SpongeUnit U] [SpongePermutation U C] + +-- Make DuplexSponge zeroizable +instance : Zeroize (DuplexSponge U C) where + zeroize sponge := { + permutation := Zeroize.zeroize sponge.permutation, + absorbPos := 0, + squeezePos := 0 + } + +-- Make DuplexSponge initializable from 32-byte vector +instance : Initialize (DuplexSponge U C) (Vector UInt8 32) where + new iv := { + permutation := Initialize.new iv, + absorbPos := 0, + squeezePos := 0 + } + +/-- +## Algorithm Documentation for DuplexSponge Implementation + +(Generated by Claude 4 Sonnet based on the Rust implementation) + +### absorbUnchecked: Absorb an array of units into the sponge +Algorithm (from Rust implementation): +1. Process input array in chunks that fit within the remaining rate space +2. While there's still input data: + - If absorb_pos == R (rate is full): permute the state, reset absorb_pos to 0 + - Otherwise: + * Calculate chunk_size = min(remaining_input.length, R - absorb_pos) + * Copy chunk into state[absorb_pos..absorb_pos + chunk_size] + * Update absorb_pos += chunk_size + * Continue with remaining input +3. After processing all input, set squeeze_pos = R (force permutation on next squeeze) + +Key insight: The sponge state (Vector U N) has the first R elements as the "rate" portion +where data is absorbed/squeezed, and the last (N-R) elements as the "capacity" portion +that provides security but is never directly touched. + +### squeezeUnchecked: Squeeze out an array of units from the sponge +Algorithm (from Rust implementation): +1. If output array is empty, return immediately +2. Process output in chunks: + - If squeeze_pos == R: + * Permute the state first + * Reset squeeze_pos = 0, absorb_pos = 0 + - Calculate chunk_size = min(remaining_output.length, R - squeeze_pos) + - Copy state[squeeze_pos..squeeze_pos + chunk_size] into output + - Update squeeze_pos += chunk_size + - Recursively handle remaining output +3. Return updated sponge and filled output array + +Note: Unlike absorb which processes input sequentially, squeeze may need to permute +multiple times if the requested output is larger than what's available in the rate. + +### ratchetUnchecked: Ratchet the sponge state for domain separation +Algorithm (from Rust implementation): +1. Permute the state (ensures domain separation from previous operations) +2. Zero out the rate portion: state[0..R] = all zeros + (This erases any information about previously absorbed elements) +3. Set squeeze_pos = R (forces permutation on next squeeze operation) + +Purpose: This operation ensures that different elements are processed in different +blocks, providing domain separation. After ratcheting, the state holds no information +about elements absorbed so far - the state is "compressed" and secure. +-/ + +-- Implement DuplexSpongeInterface for DuplexSponge. TODO: port from the Rust implementation +instance : DuplexSpongeInterface U (DuplexSponge U C) where + absorbUnchecked := sorry + squeezeUnchecked := sorry + ratchetUnchecked := sorry + +end DuplexSponge + +namespace UInt8 + +-- Implement SpongeUnit for UInt8 + +instance : Zeroize UInt8 where + zeroize _ := 0 + +instance : Serialize UInt8 ByteArray where + serialize byte := ByteArray.mk #[byte] + +/-- Deserialize a single byte from a byte array. Gives `none` if the array is not of size 1. -/ +instance : DeserializeOption UInt8 ByteArray where + deserialize bytes := + if h : bytes.size = 1 then + some bytes[0] + else + none + +instance : Serde UInt8 ByteArray where + +instance : HasSize UInt8 UInt8 where + size := 1 + toFun := ⟨fun byte => #v[byte], by intro x y; simp⟩ + +instance : SpongeUnit UInt8 where + +end UInt8 diff --git a/ArkLib/Data/Math/Basic.lean b/ArkLib/Data/Math/Basic.lean index 9d76b84f1..8b65b16fa 100644 --- a/ArkLib/Data/Math/Basic.lean +++ b/ArkLib/Data/Math/Basic.lean @@ -74,7 +74,7 @@ lemma iterateRec_lt_base {α : Type*} {b k : ℕ} (op : α → α) {h : b ≥ 2} theorem iterateRec_eq_iterate {α : Type*} {b : ℕ} {h : b ≥ 2} (op : α → α) (n : Nat) : Nat.iterateRec op n b h = op^[n] := by induction n using Nat.caseStrongRecOn with - | zero => simp [Nat.iterateRec] + | zero => simp | ind k ih => unfold Nat.iterateRec have : (k + 1) / b ≤ k := by @@ -145,7 +145,7 @@ theorem rightpad_eq_if_rightpad_eq_of_ge (l l' : List α) (m n n' : Nat) (h : n max n l.length = (rightpad n unit l).length := Eq.symm (rightpad_length n unit l) _ = (rightpad n' unit l').length := congrArg length hEq _ = max n' l'.length := rightpad_length n' unit l' - simp [hEq, hLen] + simp [hLen] sorry @[simp] theorem rightpad_twice_eq_rightpad_max (m n : Nat) (unit : α) (l : List α) : @@ -173,17 +173,17 @@ theorem rightpad_eq_if_rightpad_eq_of_ge (l l' : List α) (m n n' : Nat) (h : n rcases (Nat.lt_or_ge i l.length) with h_lt | h_ge · have h_lt': i < (rightpad n unit l).length := by rw [rightpad_length]; omega simp only [h_lt, h_lt', getD_eq_getElem] -- eliminate `getD` - simp [h_lt, getElem_append] + simp [h_lt] rw [getD_eq_default _ _ h_ge] -- eliminate second `getD` for `unit` rcases (Nat.lt_or_ge i n) with h_lt₂ | h_ge₂ · have h_lt' : i < (rightpad n unit l).length := by rw [rightpad_length]; omega rw [getD_eq_getElem _ _ h_lt'] -- eliminate first `getD` - simp [h_ge, getElem_append] + simp [h_ge] · have h_ge' : i ≥ (rightpad n unit l).length := by rw [rightpad_length]; omega rw [getD_eq_default _ _ h_ge'] -- eliminate first `getD` theorem rightpad_getElem_eq_getD {a b : List α} {unit : α} {i : Nat} - (h: i < (a.rightpad b.length unit).length) : + (h : i < (a.rightpad b.length unit).length) : (a.rightpad b.length unit)[i] = a.getD i unit := by rw [← rightpad_getD_eq_getD a b.length, getD_eq_getElem _ _ h] @@ -233,14 +233,14 @@ def toNum {R : Type _} [Zero R] [DecidableEq R] (a : Array R) : ℕ := theorem leftpad_toList {a : Array α} {n : Nat} {unit : α} : a.leftpad n unit = mk (a.toList.leftpad n unit) := by induction h : a.toList with - | nil => simp_all [h] - | cons hd tl ih => simp_all [ih, h, size_eq_length_toList] + | nil => simp_all + | cons hd tl ih => simp_all [size_eq_length_toList] theorem rightpad_toList {a : Array α} {n : Nat} {unit : α} : a.rightpad n unit = mk (a.toList.rightpad n unit) := by induction h : a.toList with - | nil => simp_all [h] - | cons hd tl ih => simp_all [ih, h, size_eq_length_toList] + | nil => simp_all + | cons hd tl ih => simp_all [size_eq_length_toList] theorem rightpad_getElem_eq_getD {a : Array α} {n : Nat} {unit : α} {i : Nat} (h : i < (a.rightpad n unit).size) : (a.rightpad n unit)[i] = a.getD i unit := by @@ -281,7 +281,7 @@ where find ⟨ i, Nat.lt_of_succ_lt h ⟩ /-- if findIdxRev? finds an index, the condition is satisfied on that element -/ -def findIdxRev?_def {cond} {as: Array α} {k : Fin as.size} : +def findIdxRev?_def {cond} {as : Array α} {k : Fin as.size} : findIdxRev? cond as = some k → cond as[k] := by suffices aux : ∀ i, findIdxRev?.find cond as i = some k → cond as[k] by apply aux intro i @@ -292,7 +292,7 @@ def findIdxRev?_def {cond} {as: Array α} {k : Fin as.size} : | case3 => unfold findIdxRev?.find; simp [*]; assumption /-- if findIdxRev? finds an index, then for every greater index the condition doesn't hold -/ -def findIdxRev?_maximal {cond} {as: Array α} {k : Fin as.size} : +def findIdxRev?_maximal {cond} {as : Array α} {k : Fin as.size} : findIdxRev? cond as = some k → ∀ j : Fin as.size, j > k → ¬ cond as[j] := by suffices aux : ∀ i, findIdxRev?.find cond as i = some k → ∀ j : Fin as.size, j > k → j.val < i → ¬ cond as[j] by @@ -343,7 +343,7 @@ theorem findIdxRev?_emtpy_none {cond} {as : Array α} (h : as = #[]) : simp /-- if the condition is true on some element, then findIdxRev? finds something -/ -theorem findIdxRev?_eq_some {cond} {as : Array α} (h: ∃ i, ∃ hi : i < as.size, cond as[i]) : +theorem findIdxRev?_eq_some {cond} {as : Array α} (h : ∃ i, ∃ hi : i < as.size, cond as[i]) : ∃ k : Fin as.size, findIdxRev? cond as = some k := by obtain ⟨ i, hi, hcond ⟩ := h diff --git a/ArkLib/Data/Math/HList.lean b/ArkLib/Data/Math/HList.lean index 5563b9f6e..25537c642 100644 --- a/ArkLib/Data/Math/HList.lean +++ b/ArkLib/Data/Math/HList.lean @@ -92,8 +92,7 @@ variable {α : Type u} {n : ℕ} @[simp] lemma List.get_nil (i : Fin 0) (a : α) : [].get i = a := by exact isEmptyElim i -/-- - Dependent vectors +/-- Dependent vectors -/ def DVec {m : Type v} (α : m → Type u) : Type (max u v) := ∀ i, α i diff --git a/ArkLib/Data/Matrix/Basic.lean b/ArkLib/Data/Matrix/Basic.lean new file mode 100644 index 000000000..94ec39cbe --- /dev/null +++ b/ArkLib/Data/Matrix/Basic.lean @@ -0,0 +1,41 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Data.Matrix.Hadamard +import ArkLib.Data.Fin.Pad +import ArkLib.Data.MvPolynomial.Multilinear + +/-! + # Auxiliary definitions and lemmas for matrices +-/ + + +namespace Matrix + +variable {α : Type*} + +def rightpad (m₂ n₂ : ℕ) (a : α) {m₁ n₁ : ℕ} (M : Matrix (Fin m₁) (Fin n₁) α) : + Matrix (Fin m₂) (Fin n₂) α := + Fin.rightpad m₂ (fun _ => a) (Fin.rightpad n₂ a ∘ M) + +def leftpad (m₂ n₂ : ℕ) (a : α) {m₁ n₁ : ℕ} (M : Matrix (Fin m₁) (Fin n₁) α) : + Matrix (Fin m₂) (Fin n₂) α := + Fin.leftpad m₂ (fun _ => a) (Fin.leftpad n₂ a ∘ M) + +end Matrix + +namespace Matrix + +open MvPolynomial + +variable {R : Type*} [CommRing R] {m n : ℕ} + +/-- Convert a matrix of dimensions `2^m × 2^n` to a nested multilinear polynomial + `R[X Fin n][X Fin m]`. Note the order of nesting (i.e. `m` is on the outside). -/ +noncomputable def toMLE (A : Matrix (Fin (2 ^ m)) (Fin (2 ^ n)) R) : R[X Fin n][X Fin m] := + MLE' (MLE' ∘ A) + +end Matrix diff --git a/ArkLib/Data/Matrix/Sparse.lean b/ArkLib/Data/Matrix/Sparse.lean new file mode 100644 index 000000000..ed7c53b20 --- /dev/null +++ b/ArkLib/Data/Matrix/Sparse.lean @@ -0,0 +1,34 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.Data.Matrix.Basic + +/-! + # Sparse representation of matrices +-/ + +/-- The sparse representation of a matrix `m → n → α` consists of: +- The number of non-zero entries `k : ℕ` +- The row indices `row : Fin k → m` +- The column indices `col : Fin k → n` +- The values `val : Fin k → α` + +This representation is **not** unique. In particular, we may have duplicate `(row, col)` pairs, and +some `val` may be zero. +-/ +structure SparseMatrix (m n α : Type*) where + numEntries : ℕ + row : Fin numEntries → m + col : Fin numEntries → n + val : Fin numEntries → α +deriving Inhabited, DecidableEq + +/-- Convert a sparse matrix to a regular (dense) matrix. For each entry `(i, j)` of the matrix, we + simply sum over all `k` such that `(row k, col k) = (i, j)`. +-/ +def SparseMatrix.toMatrix {m n α : Type*} [DecidableEq m] [DecidableEq n] [AddCommMonoid α] + (A : SparseMatrix m n α) : Matrix m n α := + fun i j => ∑ k : Fin A.numEntries, if A.row k = i ∧ A.col k = j then A.val k else 0 diff --git a/ArkLib/Data/MlPoly/Basic.lean b/ArkLib/Data/MlPoly/Basic.lean index 8cb32edb2..0acc2f2b2 100644 --- a/ArkLib/Data/MlPoly/Basic.lean +++ b/ArkLib/Data/MlPoly/Basic.lean @@ -41,7 +41,7 @@ scoped notation:80 a " *ᵥ " b => dotProduct a b def dotProduct_cons (a : R) (b : Vector R n) (c : R) (d : Vector R n) : dotProduct (cons a b) (cons c d) = a * c + dotProduct b d := by - simp [dotProduct, cons, get, foldl] + simp [dotProduct, cons, foldl] -- rw [← Array.foldl_toList] sorry @@ -122,47 +122,47 @@ def zsmul [SMul ℤ R] (m : ℤ) (p : MlPoly R n) : MlPoly R n := p.map (fun a = instance [AddCommMonoid R] : AddCommMonoid (MlPoly R n) where add := add add_assoc a b c := by - show Vector.zipWith (· + ·) (Vector.zipWith (· + ·) a b) c = + change Vector.zipWith (· + ·) (Vector.zipWith (· + ·) a b) c = Vector.zipWith (· + ·) a (Vector.zipWith (· + ·) b c) ext; simp [add_assoc] add_comm a b := by - show Vector.zipWith (· + ·) a b = Vector.zipWith (· + ·) b a + change Vector.zipWith (· + ·) a b = Vector.zipWith (· + ·) b a ext; simp [add_comm] zero := zero zero_add a := by - show Vector.zipWith (· + ·) (Vector.replicate (2 ^ n) 0) a = a + change Vector.zipWith (· + ·) (Vector.replicate (2 ^ n) 0) a = a ext; simp add_zero a := by - show Vector.zipWith (· + ·) a (Vector.replicate (2 ^ n) 0) = a + change Vector.zipWith (· + ·) a (Vector.replicate (2 ^ n) 0) = a ext; simp nsmul := nsmul nsmul_zero a := by - show Vector.map (fun a ↦ 0 • a) a = Vector.replicate (2 ^ n) 0 + change Vector.map (fun a ↦ 0 • a) a = Vector.replicate (2 ^ n) 0 ext; simp nsmul_succ n a := by - show a.map (fun a ↦ (n + 1) • a) = Vector.zipWith (· + ·) (Vector.map (fun a ↦ n • a) a) a + change a.map (fun a ↦ (n + 1) • a) = Vector.zipWith (· + ·) (Vector.map (fun a ↦ n • a) a) a ext i; simp; exact AddMonoid.nsmul_succ n a[i] instance [Semiring R] : Module R (MlPoly R n) where smul := smul one_smul a := by - show Vector.map (fun a ↦ 1 * a) a = a + change Vector.map (fun a ↦ 1 * a) a = a ext; simp mul_smul r s a := by simp [HSMul.hSMul, smul] smul_zero a := by - show Vector.map (fun a_1 ↦ a * a_1) (Vector.replicate (2 ^ n) 0) = Vector.replicate (2 ^ n) 0 + change Vector.map (fun a_1 ↦ a * a_1) (Vector.replicate (2 ^ n) 0) = Vector.replicate (2 ^ n) 0 ext; simp smul_add r a b := by - show Vector.map (fun a ↦ r * a) (Vector.zipWith (· + ·) a b) = + change Vector.map (fun a ↦ r * a) (Vector.zipWith (· + ·) a b) = Vector.zipWith (· + ·) (Vector.map (fun a ↦ r * a) a) (Vector.map (fun a ↦ r * a) b) ext; simp [left_distrib] add_smul r s a := by - show Vector.map (fun a ↦ (r + s) * a) a = + change Vector.map (fun a ↦ (r + s) * a) a = Vector.zipWith (· + ·) (Vector.map (fun a ↦ r * a) a) (Vector.map (fun a ↦ s * a) a) ext; simp [right_distrib] zero_smul a := by - show Vector.map (fun a ↦ 0 * a) a = Vector.replicate (2 ^ n) 0 + change Vector.map (fun a ↦ 0 * a) a = Vector.replicate (2 ^ n) 0 ext; simp variable [CommRing R] @@ -228,12 +228,12 @@ Example: `lagrangeBasis #v[(1 : ℤ), 2, 3]` should return `[0, 0, 0, 0, 2, -3, `eq([1,2,3], [1,1,1]) = (1·1 + 0·0)·(2·1 + (-1)·0)·(3·1 + (-2)·0) = 1·2·3 = 6` -/ example : lagrangeBasis #v[(1 : ℤ), 2, 3] = #v[0, 0, 0, 0, 2, -3, -4, 6] := by - simp [lagrangeBasis, lagrangeBasisAux, Array.ofFn, Array.ofFn.go] + simp [lagrangeBasis, lagrangeBasisAux] /-- The `i`-th element of `lagrangeBasis w` is the product of `w[j]` if the `j`-th bit of `i` is 1, and `1 - w[j]` if the `j`-th bit of `i` is 0. -/ theorem lagrangeBasis_getElem {w : Vector R n} (i : Fin (2 ^ n)) : - (lagrangeBasis w)[i] = ∏ j : Fin n, if (BitVec.ofFin i).getLsb' j then w[j] else 1 - w[j] := by + (lagrangeBasis w)[i] = ∏ j : Fin n, if (BitVec.ofFin i).getLsb j then w[j] else 1 - w[j] := by sorry variable {S : Type*} [CommRing S] @@ -272,8 +272,8 @@ If the `j`‑th least significant bit of the index `i` is `1`, we replace `v[i]` fun v => letI stride : ℕ := 2 ^ j.val -- distance to the "partner" index Vector.ofFn (fun i : Fin (2 ^ n) => - if (BitVec.ofFin i).getLsb' j then - v[i] + v[i - stride] + if (BitVec.ofFin i).getLsb j then + v[i] + v[i - stride]'(Nat.sub_lt_of_lt i.isLt) else v[i]) @@ -298,8 +298,8 @@ If the `j`‑th least significant bit of the index `i` is `1`, we replace `v[i]` fun v => letI stride : ℕ := 2 ^ j.val -- distance to the "partner" index Vector.ofFn (fun i : Fin (2 ^ n) => - if (BitVec.ofFin i).getLsb' j then - v[i] - v[i - stride] + if (BitVec.ofFin i).getLsb j then + v[i] - v[i - stride]'(Nat.sub_lt_of_lt i.isLt) else v[i]) diff --git a/ArkLib/Data/MvPolynomial/LinearMvExtension.lean b/ArkLib/Data/MvPolynomial/LinearMvExtension.lean new file mode 100644 index 000000000..8abe23c0f --- /dev/null +++ b/ArkLib/Data/MvPolynomial/LinearMvExtension.lean @@ -0,0 +1,85 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.Basic +import Mathlib.Algebra.MvPolynomial.Eval +import Mathlib.Algebra.Polynomial.Eval.Defs + +/-! + # Conversion of Univariate polynomials to Multilinear polynomials + + Univariate polynomials of degree < 2ᵐ can be writen as degree wise linear + m-variate polynomials by `∑ aᵢ Xⁱ → ∑ aᵢ ∏ⱼ Xⱼ^(bitⱼ(i))` -/ + +namespace LinearMvExtension + +noncomputable section + +open MvPolynomial + +variable {F : Type*} [CommSemiring F] {m : ℕ} + +/-- Given integers m and i this computes monomial exponents + `( σ(0), ..., σ(m-1) ) = ( bit₀(i), ..., bitₘ₋₁(i) )` + such that we have `X_0^σ(0)⬝ ⋯ ⬝ X_(m-1)^σ(m-1)`. + For `i ≥ 2ᵐ` this is the bit reprsentation of `(i mod 2ᵐ)` -/ +def bitExpo (i : ℕ) : (Fin m) →₀ ℕ := + Finsupp.onFinset Finset.univ + (fun j => if Nat.testBit i j.1 then 1 else 0) + (by intro j hj; simp) + +/-- The linear map that maps univariate polynomials of degree < 2ᵐ onto + degree wise linear m-variate polynomials, sending + `aᵢ Xⁱ ↦ aᵢ ∏ⱼ Xⱼ^(bitⱼ(i))`, where `bitⱼ(i)` is the j-th binary digit of `(i mod 2ᵐ)`. -/ +def linearMvExtension : + Polynomial.degreeLT F (2^m) →ₗ[F] MvPolynomial (Fin m) F where + -- p(X) = aᵢ Xᶦ ↦ aᵢ ∏ⱼ Xⱼ^(bitⱼ(i)) + toFun p := (p : Polynomial F).sum fun i a => + MvPolynomial.monomial (bitExpo i) a + map_add' := by + rintro p q + simp [Polynomial.sum_add_index] + map_smul' := by + rintro c p + simp [Polynomial.sum_smul_index] + sorry + +/-- `partialEval` takes a m-variate polynomial f and a k-vector α as input, + partially evaluates f(X_0, X_1,..X_(m-1)) at {X_0 = α_0, X_1 = α_1,.., X_{k-1} = α_{k-1}} + and returns a (m-k)-variate polynomial. -/ +def partialEval {k : ℕ} (f : MvPolynomial (Fin m) F) (α : Fin k → F) (h : k ≤ m) : + MvPolynomial (Fin (m - k)) F := + let φ : Fin m → MvPolynomial (Fin (m - k)) F := fun i => + if h' : i.val < k then + C (α ⟨i.val, h'⟩) + else + let j := i.val - k + let j' : Fin (m - k) := ⟨j, sorry⟩ + X j' + eval₂ C φ f + +/-- The Semiring morphism that maps m-variate polynomials onto univariate + polynomials by evaluating them at `(X^(2⁰), ... , X^(2ᵐ⁻¹))`, i.e. sending + `aₑ X₀^σ(0) ⬝ ⋯ ⬝ Xₘ₋₁^σ(m-1) → aₑ (X^(2⁰))^σ(0) ⬝ ⋯ ⬝ (X^(2ᵐ⁻¹))^σ(m-1)` + for all `σ : Fin m → ℕ` -/ +def powAlgHom : + MvPolynomial (Fin m) F →ₐ[F] Polynomial F := + aeval fun j => Polynomial.X ^ (2 ^ (j : ℕ)) + +/- The linear map optained by forgetting the multiplicative structure-/ +def powContraction : + MvPolynomial (Fin m) F →ₗ[F] Polynomial F := + powAlgHom.toLinearMap + +/- Evaluating m-variate polynomials on (X^(2⁰), ... , X^(2ᵐ⁻¹) ) is + right inverse to linear multivariate extensions on F^(< 2ᵐ)[X] -/ +lemma powContraction_is_right_inverse_to_linearMvExtension + (p : Polynomial.degreeLT F (2^m)) : + powContraction.comp linearMvExtension p = p := by sorry + +end + +end LinearMvExtension diff --git a/ArkLib/Data/MvPolynomial/Multilinear.lean b/ArkLib/Data/MvPolynomial/Multilinear.lean index 893f45e55..af1caacad 100644 --- a/ArkLib/Data/MvPolynomial/Multilinear.lean +++ b/ArkLib/Data/MvPolynomial/Multilinear.lean @@ -108,10 +108,16 @@ theorem eqPolynomial_eval_zeroOne (r x : σ → Fin 2) : variable [DecidableEq σ] -/-- Multilinear extension of evaluations on the hypercube -/ +/-- Multilinear extension of evaluations on the `σ`-indexed hypercube, where the evaluations are + represented as `(σ → Fin 2) → R` -/ def MLE (evals : (σ → Fin 2) → R) : MvPolynomial σ R := ∑ x : σ → Fin 2, (eqPolynomial (x : σ → R)) * C (evals x) +/-- Multilinear extension of evaluations on the `n`-dimensional hypercube, where the evaluations are + represented as `Fin (2 ^ n) → R` -/ +def MLE' {n : ℕ} (evals : Fin (2 ^ n) → R) : MvPolynomial (Fin n) R := + MLE (evals ∘ finFunctionFinEquiv) + theorem MLE_expanded (evals : (σ → Fin 2) → R) : MLE evals = ∑ x : σ → Fin 2, (∏ i : σ, ((1 - C (x i : R)) * (1 - X i) + C (x i : R) * X i)) * C (evals x) := by diff --git a/ArkLib/Data/MvPolynomial/Sumcheck.lean b/ArkLib/Data/MvPolynomial/Sumcheck.lean index bdd711366..709f56a00 100644 --- a/ArkLib/Data/MvPolynomial/Sumcheck.lean +++ b/ArkLib/Data/MvPolynomial/Sumcheck.lean @@ -4,10 +4,8 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import Mathlib.Algebra.MvPolynomial.Equiv -import Mathlib.Data.Multiset.Bind import ArkLib.Data.MvPolynomial.Degrees -import Mathlib +import Mathlib.Algebra.MvPolynomial.Monad /-! # Auxiliary functions for sum-check over multivariate polynomials @@ -76,8 +74,7 @@ theorem degrees_peval {x : σ₁ → R} {f : σ → σ₁ ⊕ σ₂} {p : MvPoly end PartialEval -/-- - A `R`-linear mapping that sends +/-- A `R`-linear mapping that sends `p(X_0,\dots,X_{m-1},X_m,\dots,X_{m+n-1})` to diff --git a/ArkLib/Data/CodingTheory/BivariatePoly.lean b/ArkLib/Data/Polynomial/Bivariate.lean similarity index 97% rename from ArkLib/Data/CodingTheory/BivariatePoly.lean rename to ArkLib/Data/Polynomial/Bivariate.lean index 8121ce6cd..dced25ce7 100644 --- a/ArkLib/Data/CodingTheory/BivariatePoly.lean +++ b/ArkLib/Data/Polynomial/Bivariate.lean @@ -8,8 +8,6 @@ import Mathlib.Algebra.Polynomial.Eval.Defs import Mathlib.Algebra.Polynomial.Bivariate import Mathlib.Data.Fintype.Defs - -open Classical open Polynomial open Polynomial.Bivariate @@ -22,12 +20,11 @@ variable {F : Type} [Semiring F] (f g : F[X][Y]) /-- -The set of coeffiecients of a bivatiate polynomial. +The set of coefficients of a bivariate polynomial. -/ -def coeffs : Finset F[X] := f.support.image (fun n => f.coeff n) +def coeffs [DecidableEq F] : Finset F[X] := f.support.image (fun n => f.coeff n) -/--- -The coeffiecient of `Y^n` is a polynomial in `X`. +/-- The coeffiecient of `Y^n` is a polynomial in `X`. -/ def coeff_Y_n (n : ℕ) : F[X] := f.coeff n @@ -254,6 +251,7 @@ def totalDegree (f : F[X][Y]) : ℕ := lemma monomial_xy_degree (n m : ℕ) (a : F) (ha : a ≠ 0) : totalDegree (monomial_xy n m a) = n + m := by + classical unfold totalDegree rw [ diff --git a/ArkLib/Data/Polynomial/Interface.lean b/ArkLib/Data/Polynomial/Interface.lean index 766b37870..749ee561d 100644 --- a/ArkLib/Data/Polynomial/Interface.lean +++ b/ArkLib/Data/Polynomial/Interface.lean @@ -1,5 +1,5 @@ -import Mathlib -import ArkLib.Data.Fin.Basic +import ArkLib.Data.Fin.Lift +import Mathlib.RingTheory.Polynomial.Basic section PolynomialInterface @@ -9,7 +9,7 @@ variable {F : Type*} [Semiring F] {deg : ℕ} {coeffs : Fin deg → F} lemma natDegree_lt_of_lbounded_zero_coeff {p : F[X]} [NeZero deg] (h : ∀ i, deg ≤ i → p.coeff i = 0) : p.natDegree < deg := by - aesop (add unsafe [(by by_contra), (by specialize h p.natDegree)]) + aesop (add unsafe [(by by_contra), (by specialize h p.natDegree)]) def coeffsOfPolynomial (p : F[X]) : Fin deg → F := fun ⟨x, _⟩ ↦ p.coeff x diff --git a/ArkLib/Data/Tree/Binary.lean b/ArkLib/Data/Tree/Binary.lean new file mode 100644 index 000000000..8ecff888e --- /dev/null +++ b/ArkLib/Data/Tree/Binary.lean @@ -0,0 +1,167 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Data.Tree.Basic + +/-! + # Binary Trees + + This file defines binary tree data structures with different value storage patterns. + + We provide two main variants: + - `BinaryTree`: Trees where both leaf and internal nodes can hold values + - `BinaryLeafTree`: Trees where only leaf nodes hold values, internal nodes are structural + + These complement Mathlib's `Tree` type (which stores values only in internal nodes) + and are particularly useful for Merkle tree implementations and other cryptographic + applications where different node types serve different purposes. + + ## Comparison with Mathlib's Tree + + - **Mathlib's `Tree α`**: Values only in internal nodes (traditional binary tree) + - **`BinaryTree α`**: Values in both leaf and internal nodes (maximum flexibility) + - **`BinaryLeafTree α`**: Values only in leaf nodes (common in Merkle trees) +-/ + +/-- A binary tree with (possibly null) values stored at both leaf and internal nodes. + + This provides maximum flexibility where both leaf and internal nodes can carry data. + Useful for complex tree structures where different types of nodes serve different purposes. -/ +inductive BinaryTree (α : Type*) where + | leaf : α → BinaryTree α + | node : α → BinaryTree α → BinaryTree α → BinaryTree α + | nil : BinaryTree α + deriving Inhabited, Repr + +/-- A binary tree where only leaf nodes can have values. + Internal nodes are purely structural and do not store values. + + This is commonly used as input to Merkle tree construction where + only the leaf data matters for the cryptographic commitments. -/ +inductive BinaryLeafTree (α : Type*) where + | leaf : α → BinaryLeafTree α + | node : BinaryLeafTree α → BinaryLeafTree α → BinaryLeafTree α + | nil : BinaryLeafTree α + deriving Inhabited, Repr + +namespace BinaryTree + +variable {α : Type*} + +/-- Get the root value of a binary tree, if it exists. -/ +def getRoot : BinaryTree α → Option α + | BinaryTree.nil => none + | BinaryTree.leaf a => some a + | BinaryTree.node a _ _ => some a + +/-- Check if the tree is a leaf. -/ +def isLeaf : BinaryTree α → Bool + | BinaryTree.leaf _ => true + | _ => false + +/-- Check if the tree is empty. -/ +def isEmpty : BinaryTree α → Bool + | BinaryTree.nil => true + | _ => false + +/-- Get the left child of the tree. -/ +def left : BinaryTree α → BinaryTree α + | BinaryTree.nil => BinaryTree.nil + | BinaryTree.leaf _ => BinaryTree.nil + | BinaryTree.node _ l _ => l + +/-- Get the right child of the tree. -/ +def right : BinaryTree α → BinaryTree α + | BinaryTree.nil => BinaryTree.nil + | BinaryTree.leaf _ => BinaryTree.nil + | BinaryTree.node _ _ r => r + +/-- Find the path from root to a leaf with the given value. + Returns a list of boolean directions (false = left, true = right). -/ +def findPath [DecidableEq α] (a : α) : BinaryTree α → Option (List Bool) + | BinaryTree.nil => none + | BinaryTree.leaf b => if a = b then some [] else none + | BinaryTree.node _ left right => + match findPath a left with + | some path => some (false :: path) + | none => + match findPath a right with + | some path => some (true :: path) + | none => none + + + +end BinaryTree + +namespace BinaryLeafTree + +variable {α : Type*} + +/-- Check if the tree is a leaf. -/ +def isLeaf : BinaryLeafTree α → Bool + | BinaryLeafTree.leaf _ => true + | _ => false + +/-- Check if the tree is empty. -/ +def isEmpty : BinaryLeafTree α → Bool + | BinaryLeafTree.nil => true + | _ => false + +/-- Get the left child of the tree. -/ +def left : BinaryLeafTree α → BinaryLeafTree α + | BinaryLeafTree.nil => BinaryLeafTree.nil + | BinaryLeafTree.leaf _ => BinaryLeafTree.nil + | BinaryLeafTree.node l _ => l + +/-- Get the right child of the tree. -/ +def right : BinaryLeafTree α → BinaryLeafTree α + | BinaryLeafTree.nil => BinaryLeafTree.nil + | BinaryLeafTree.leaf _ => BinaryLeafTree.nil + | BinaryLeafTree.node _ r => r + +/-- Find the path from root to a leaf with the given value. + Returns a list of boolean directions (false = left, true = right). -/ +def findPath [DecidableEq α] (a : α) : BinaryLeafTree α → Option (List Bool) + | BinaryLeafTree.nil => none + | BinaryLeafTree.leaf b => if a = b then some [] else none + | BinaryLeafTree.node left right => + match findPath a left with + | some path => some (false :: path) + | none => + match findPath a right with + | some path => some (true :: path) + | none => none + +end BinaryLeafTree + +-- Example usage: +section Examples + +-- Example: +-- 1 +-- / \ +-- 2 3 +-- / \ \ +-- 4 5 6 +-- / +-- 7 +-- A tree with values at both leaf and internal nodes +def BinaryTree.example : BinaryTree (Fin 10) := + .node 1 + (.node 2 + (.leaf 4) + (.node 5 (.leaf 7) .nil)) + (.node 3 + .nil + (.leaf 6)) + +-- A leaf tree where only leaves hold values +def BinaryLeafTree.example : BinaryLeafTree (Fin 10) := + .node + (.node (.leaf 1) (.leaf 2)) + (.node (.leaf 3) .nil) + +end Examples diff --git a/ArkLib/Data/Tree/General.lean b/ArkLib/Data/Tree/General.lean new file mode 100644 index 000000000..71c3f6074 --- /dev/null +++ b/ArkLib/Data/Tree/General.lean @@ -0,0 +1,154 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import Mathlib.Data.Tree.Basic + +/-! + # General Trees with Arbitrary Arities + + This file defines general tree data structures where nodes can have an arbitrary number + of children (represented as lists). + + We provide two main variants: + - `GeneralTree`: Trees where every node (including internal nodes) holds a value + - `GeneralLeafTree`: Trees where only leaf nodes hold values, internal nodes are structural + + Both support arbitrary arities and include helper functions for common operations like + finding paths, checking properties, and accessing structure information. +-/ + +/-- A general tree with arbitrary arity where every node (including internal nodes) holds a value. + The children are represented as a list, allowing for any number of child nodes. -/ +inductive GeneralTree (α : Type*) where + | leaf : α → GeneralTree α + | node : α → List (GeneralTree α) → GeneralTree α + | nil : GeneralTree α + deriving Inhabited, Repr + +/-- A general tree with arbitrary arity where only leaf nodes hold values. + Internal nodes are purely structural and do not store values. + Used as input to general Merkle tree construction. -/ +inductive GeneralLeafTree (α : Type*) where + | leaf : α → GeneralLeafTree α + | node : List (GeneralLeafTree α) → GeneralLeafTree α + | nil : GeneralLeafTree α + deriving Inhabited, Repr + +namespace GeneralTree + +variable {α : Type*} + +/-- Get the root value of a general tree, if it exists. -/ +def getRoot : GeneralTree α → Option α + | GeneralTree.nil => none + | GeneralTree.leaf a => some a + | GeneralTree.node a _ => some a + +/-- Get the number of children of a node. -/ +def arity : GeneralTree α → Nat + | GeneralTree.nil => 0 + | GeneralTree.leaf _ => 0 + | GeneralTree.node _ children => children.length + +/-- Get the children of a node. -/ +def children : GeneralTree α → List (GeneralTree α) + | GeneralTree.nil => [] + | GeneralTree.leaf _ => [] + | GeneralTree.node _ children => children + +/-- Check if the tree is a leaf. -/ +def isLeaf : GeneralTree α → Bool + | GeneralTree.leaf _ => true + | _ => false + +/-- Check if the tree is empty. -/ +def isEmpty : GeneralTree α → Bool + | GeneralTree.nil => true + | _ => false + +/-- Find the path from root to a leaf with the given value. + The path is represented as a list of child indices. -/ +def findPath [DecidableEq α] (a : α) : GeneralTree α → Option (List Nat) + | GeneralTree.nil => none + | GeneralTree.leaf b => if a = b then some [] else none + | GeneralTree.node _ children => findPathInChildren a children 0 +where + findPathInChildren (a : α) (children : List (GeneralTree α)) (idx : Nat) : Option (List Nat) := + match children with + | [] => none + | child :: rest => + match findPath a child with + | some path => some (idx :: path) + | none => findPathInChildren a rest (idx + 1) + +end GeneralTree + +namespace GeneralLeafTree + +variable {α : Type*} + +/-- Get the number of children of a node. -/ +def arity : GeneralLeafTree α → Nat + | GeneralLeafTree.nil => 0 + | GeneralLeafTree.leaf _ => 0 + | GeneralLeafTree.node children => children.length + +/-- Get the children of a node. -/ +def children : GeneralLeafTree α → List (GeneralLeafTree α) + | GeneralLeafTree.nil => [] + | GeneralLeafTree.leaf _ => [] + | GeneralLeafTree.node children => children + +/-- Check if the tree is a leaf. -/ +def isLeaf : GeneralLeafTree α → Bool + | GeneralLeafTree.leaf _ => true + | _ => false + +/-- Check if the tree is empty. -/ +def isEmpty : GeneralLeafTree α → Bool + | GeneralLeafTree.nil => true + | _ => false + +/-- Find the path from root to a leaf with the given value. + The path is represented as a list of child indices. -/ +def findPath [DecidableEq α] (a : α) : GeneralLeafTree α → Option (List Nat) + | GeneralLeafTree.nil => none + | GeneralLeafTree.leaf b => if a = b then some [] else none + | GeneralLeafTree.node children => findPathInChildren a children 0 +where + findPathInChildren (a : α) (children : List (GeneralLeafTree α)) (idx : Nat) : + Option (List Nat) := + match children with + | [] => none + | child :: rest => + match findPath a child with + | some path => some (idx :: path) + | none => findPathInChildren a rest (idx + 1) + +end GeneralLeafTree + +-- Example usage: +section Examples + +-- A general tree where every node holds a value +def GeneralTree.example : GeneralTree (Fin 10) := + .node 1 [ + .leaf 2, + .node 3 [.leaf 4, .leaf 5, .leaf 6], + .leaf 7, + .node 8 [.leaf 9, .nil] + ] + +-- A general leaf tree where only leaves hold values +def GeneralLeafTree.example : GeneralLeafTree (Fin 10) := + .node [ + .leaf 1, + .node [.leaf 2, .leaf 3, .leaf 4], + .leaf 5, + .node [.leaf 6, .nil] + ] + +end Examples diff --git a/ArkLib/Data/UniPoly/Basic.lean b/ArkLib/Data/UniPoly/Basic.lean index 9a163adc2..eab7c14ee 100644 --- a/ArkLib/Data/UniPoly/Basic.lean +++ b/ArkLib/Data/UniPoly/Basic.lean @@ -22,7 +22,7 @@ open Polynomial For example the Array `#[1,2,3]` represents the polynomial `1 + 2x + 3x^2`. Two arrays may represent the same polynomial via zero-padding, for example `#[1,2,3] = #[1,2,3,0,0,0,...]`. - -/ +-/ @[reducible, inline, specialize] def UniPoly (R : Type*) := Array R @@ -53,7 +53,7 @@ def C (r : R) : UniPoly R := #[r] def X : UniPoly R := #[0, 1] /-- Return the index of the last non-zero coefficient of a `UniPoly` -/ -def last_nonzero (p: UniPoly R) : Option (Fin p.size) := +def last_nonzero (p : UniPoly R) : Option (Fin p.size) := p.findIdxRev? (· != 0) /-- Remove leading zeroes from a `UniPoly`. Requires `BEq` to check if the coefficients are zero. -/ @@ -84,7 +84,7 @@ theorem last_nonzero_none [LawfulBEq R] {p : UniPoly R} : rw [bne_iff_ne, ne_eq, not_not] apply_assumption -theorem last_nonzero_some [LawfulBEq R] {p : UniPoly R} {i} (hi: i < p.size) (h: p[i] ≠ 0) : +theorem last_nonzero_some [LawfulBEq R] {p : UniPoly R} {i} (hi : i < p.size) (h : p[i] ≠ 0) : ∃ k, p.last_nonzero = some k := Array.findIdxRev?_eq_some ⟨i, hi, bne_iff_ne.mpr h⟩ @@ -103,7 +103,7 @@ theorem last_nonzero_spec [LawfulBEq R] {p : UniPoly R} {k} : -- the property of `last_nonzero_spec` uniquely identifies an element, -- and that allows us to prove the reverse as well -def last_nonzero_prop {p : UniPoly R} (k: Fin p.size) : Prop := +def last_nonzero_prop {p : UniPoly R} (k : Fin p.size) : Prop := p[k] ≠ 0 ∧ (∀ j, (hj : j < p.size) → j > k → p[j] = 0) lemma last_nonzero_unique {p : UniPoly Q} {k k' : Fin p.size} : @@ -117,7 +117,7 @@ lemma last_nonzero_unique {p : UniPoly Q} {k k' : Fin p.size} : have : p[k] = 0 := h' k k.is_lt (Nat.lt_of_not_ge k_not_le) contradiction -theorem last_nonzero_some_iff [LawfulBEq R] {p : UniPoly R} {k} : +theorem last_nonzero_some_iff [LawfulBEq R] {p : UniPoly R} {k} : p.last_nonzero = some k ↔ (p[k] ≠ 0 ∧ (∀ j, (hj : j < p.size) → j > k → p[j] = 0)) := by constructor @@ -196,7 +196,7 @@ theorem size_le_size (p : UniPoly R) : p.trim.size ≤ p.size := by attribute [simp] Array.getElem?_eq_none -theorem coeff_eq_getElem_of_lt [LawfulBEq R] {p : UniPoly R} {i} (hi: i < p.size) : +theorem coeff_eq_getElem_of_lt [LawfulBEq R] {p : UniPoly R} {i} (hi : i < p.size) : p.trim.coeff i = p[i] := by induction p using induct with | case1 p h_empty h_all_zero => @@ -236,7 +236,7 @@ lemma coeff_eq_zero {p : UniPoly Q} : (∀ i, (hi : i < p.size) → p[i] = 0) ↔ ∀ i, p.coeff i = 0 := by constructor <;> intro h i - · cases Nat.lt_or_ge i p.size <;> simp [h, *] + · cases Nat.lt_or_ge i p.size <;> simp [*] · intro hi; specialize h i; simp [hi] at h; assumption lemma eq_degree_of_equiv [LawfulBEq R] {p q : UniPoly R} : equiv p q → p.degree = q.degree := by @@ -297,7 +297,7 @@ theorem canonical_of_size_zero {p : UniPoly R} : p.size = 0 → p.trim = p := by suffices h_empty : p = #[] by rw [h_empty]; exact canonical_empty exact Array.eq_empty_of_size_eq_zero h -theorem canonical_nonempty_iff [LawfulBEq R] {p : UniPoly R} (hp: p.size > 0) : +theorem canonical_nonempty_iff [LawfulBEq R] {p : UniPoly R} (hp : p.size > 0) : p.trim = p ↔ p.last_nonzero = some ⟨ p.size - 1, Nat.pred_lt_self hp ⟩ := by unfold trim @@ -323,7 +323,7 @@ theorem canonical_nonempty_iff [LawfulBEq R] {p : UniPoly R} (hp: p.size > 0) : right exact le_refl _ -theorem last_nonzero_last_iff [LawfulBEq R] {p : UniPoly R} (hp: p.size > 0) : +theorem last_nonzero_last_iff [LawfulBEq R] {p : UniPoly R} (hp : p.size > 0) : p.last_nonzero = some ⟨ p.size - 1, Nat.pred_lt_self hp ⟩ ↔ p.getLast hp ≠ 0 := by induction p using last_nonzero_induct with @@ -363,14 +363,14 @@ theorem non_zero_map [LawfulBEq R] (f : R → R) (hf : ∀ r, f r = 0 → r = 0) intro fp p_canon by_cases hp : p.size > 0 -- positive case - apply canonical_iff.mpr - intro hfp - have h_nonzero := canonical_iff.mp p_canon hp - have : fp.getLast hfp = f (p.getLast hp) := by simp [Array.getLast, fp] - rw [this] - by_contra h_zero - specialize hf (p.getLast hp) h_zero - contradiction + · apply canonical_iff.mpr + intro hfp + have h_nonzero := canonical_iff.mp p_canon hp + have : fp.getLast hfp = f (p.getLast hp) := by simp [Array.getLast, fp] + rw [this] + by_contra h_zero + specialize hf (p.getLast hp) h_zero + contradiction -- zero case have : p.size = 0 := by linarith have : fp.size = 0 := by simp [this, fp] @@ -379,7 +379,7 @@ theorem non_zero_map [LawfulBEq R] (f : R → R) (hf : ∀ r, f r = 0 → r = 0) /-- Canonical polynomials enjoy a stronger extensionality theorem: they just need to agree at all coefficients (without assuming equal sizes) -/ -theorem canonical_ext [LawfulBEq R] {p q : UniPoly R} (hp: p.trim = p) (hq: q.trim = q) : +theorem canonical_ext [LawfulBEq R] {p q : UniPoly R} (hp : p.trim = p) (hq : q.trim = q) : equiv p q → p = q := by intro h_equiv rw [← hp, ← hq] @@ -543,14 +543,14 @@ variable (p q r : UniPoly R) lemma matchSize_size_eq {p q : UniPoly Q} : let (p', q') := Array.matchSize p q 0 p'.size = q'.size := by - show (Array.rightpad _ _ _).size = (Array.rightpad _ _ _).size + change (Array.rightpad _ _ _).size = (Array.rightpad _ _ _).size rw [Array.size_rightpad, Array.size_rightpad] omega lemma matchSize_size {p q : UniPoly Q} : let (p', _) := Array.matchSize p q 0 p'.size = max p.size q.size := by - show (Array.rightpad _ _ _).size = max (Array.size _) (Array.size _) + change (Array.rightpad _ _ _).size = max (Array.size _) (Array.size _) rw [Array.size_rightpad] omega @@ -561,10 +561,10 @@ lemma zipWith_size {R} {f : R → R → R} {a b : Array R} (h : a.size = b.size) -- TODO we could generalize the next few lemmas to matchSize + zipWith f for any f theorem add_size {p q : UniPoly Q} : (add_raw p q).size = max p.size q.size := by - show (Array.zipWith _ _ _ ).size = max p.size q.size + change (Array.zipWith _ _ _ ).size = max p.size q.size rw [zipWith_size matchSize_size_eq, matchSize_size] -theorem add_coeff {p q : UniPoly Q} {i: ℕ} (hi: i < (add_raw p q).size) : +theorem add_coeff {p q : UniPoly Q} {i : ℕ} (hi : i < (add_raw p q).size) : (add_raw p q)[i] = p.coeff i + q.coeff i := by simp [add_raw] @@ -573,7 +573,7 @@ theorem add_coeff {p q : UniPoly Q} {i: ℕ} (hi: i < (add_raw p q).size) : -- repeat rw [List.rightpad_getElem_eq_getD] -- simp only [List.getD_eq_getElem?_getD, Array.getElem?_eq_toList] -theorem add_coeff? (p q : UniPoly Q) (i: ℕ) : +theorem add_coeff? (p q : UniPoly Q) (i : ℕ) : (add_raw p q).coeff i = p.coeff i + q.coeff i := by rcases (Nat.lt_or_ge i (add_raw p q).size) with h_lt | h_ge @@ -613,7 +613,7 @@ theorem add_zero (hp : p.canonical) : p + 0 = p := by rw [add_comm, zero_add p hp] theorem add_assoc [LawfulBEq R] : p + q + r = p + (q + r) := by - show (add_raw p q).trim + r = p + (add_raw q r).trim + change (add_raw p q).trim + r = p + (add_raw q r).trim rw [trim_add_trim, add_comm p, trim_add_trim, add_comm _ p] apply congrArg trim ext i @@ -637,7 +637,7 @@ theorem nsmul_raw_succ (n : ℕ) (p : UniPoly Q) : simp [add_coeff, hi] rw [_root_.add_mul (R:=Q) n 1 p[i], one_mul] -theorem nsmul_succ [LawfulBEq R] (n : ℕ) {p: UniPoly R} : nsmul (n + 1) p = nsmul n p + p := by +theorem nsmul_succ [LawfulBEq R] (n : ℕ) {p : UniPoly R} : nsmul (n + 1) p = nsmul n p + p := by unfold nsmul rw [trim_add_trim] apply congrArg trim @@ -739,8 +739,7 @@ theorem eval_toPoly_eq_eval (x : Q) (p : UniPoly Q) : p.toPoly.eval x = p.eval x rw [← Array.foldl_hom (Polynomial.eval x) (g₁ := fun acc (t : Q × ℕ) ↦ acc + Polynomial.C t.1 * Polynomial.X ^ t.2) (g₂ := fun acc (a, i) ↦ acc + a * x ^ i) ] - congr - exact Polynomial.eval_zero + · congr; exact Polynomial.eval_zero simp /-- characterize `p.toPoly` by showing that its coefficients are exactly the coefficients of `p` -/ @@ -748,7 +747,7 @@ lemma coeff_toPoly {p : UniPoly Q} {n : ℕ} : p.toPoly.coeff n = p.coeff n := b unfold toPoly eval₂ let f := fun (acc: Q[X]) ((a,i): Q × ℕ) ↦ acc + Polynomial.C a * Polynomial.X ^ i - show (Array.foldl f 0 p.zipIdx).coeff n = p.coeff n + change (Array.foldl f 0 p.zipIdx).coeff n = p.coeff n -- we slightly weaken the goal, to use `Array.foldl_induction` let motive (size: ℕ) (acc: Q[X]) := acc.coeff n = if (n < size) then p.coeff n else 0 @@ -762,10 +761,10 @@ lemma coeff_toPoly {p : UniPoly Q} {n : ℕ} : p.toPoly.coeff n = p.coeff n := b rw [coeff, Array.getD_eq_getD_getElem?, Array.getElem?_eq_none hn, Option.getD_none] apply Array.foldl_induction motive - · show motive 0 0 + · change motive 0 0 simp [motive] - show ∀ (i : Fin p.zipIdx.size) acc, motive i acc → motive (i + 1) (f acc p.zipIdx[i]) + change ∀ (i : Fin p.zipIdx.size) acc, motive i acc → motive (i + 1) (f acc p.zipIdx[i]) unfold motive f intros i acc h have i_lt_p : i < p.size := by linarith [i.is_lt] @@ -849,7 +848,7 @@ theorem trim_toImpl [LawfulBEq R] (p : R[X]) : p.toImpl.trim = p.toImpl := by /-- on canonical `UniPoly`s, `toImpl` is also a left-inverse of `toPoly`. in particular, `toPoly` is a bijection from `UniPolyC` to `Polynomial`. -/ lemma toImpl_toPoly_of_canonical [LawfulBEq R] (p: UniPolyC R) : p.toPoly.toImpl = p := by - -- we will show something slightly more general: `toPoly` is injective on canonical polynomials + -- we will change something slightly more general: `toPoly` is injective on canonical polynomials suffices h_inj : ∀ q : UniPolyC R, p.toPoly = q.toPoly → p = q by have : p.toPoly = p.toPoly.toImpl.toPoly := by rw [toPoly_toImpl] exact h_inj ⟨ p.toPoly.toImpl, trim_toImpl p.toPoly ⟩ this |> congrArg Subtype.val |>.symm @@ -861,7 +860,7 @@ lemma toImpl_toPoly_of_canonical [LawfulBEq R] (p: UniPolyC R) : p.toPoly.toImpl exact hpq |> congrArg (fun p => p.coeff i) /-- the roundtrip to and from mathlib maps a `UniPoly` to its trimmed/canonical representative -/ -theorem toImpl_toPoly [LawfulBEq R] (p: UniPoly R) : p.toPoly.toImpl = p.trim := by +theorem toImpl_toPoly [LawfulBEq R] (p : UniPoly R) : p.toPoly.toImpl = p.trim := by rw [← toPoly_trim] exact toImpl_toPoly_of_canonical ⟨ p.trim, Trim.trim_twice p⟩ @@ -885,7 +884,7 @@ def equiv (p q : UniPoly R) : Prop := /-- Reflexivity of the equivalence relation. -/ @[simp] theorem equiv_refl (p : UniPoly Q) : equiv p p := - by simp [equiv, List.matchSize] + by simp [equiv] /-- Symmetry of the equivalence relation. -/ @[simp] theorem equiv_symm {p q : UniPoly Q} : equiv p q → equiv q p := @@ -895,7 +894,7 @@ open List in /-- Transitivity of the equivalence relation. -/ @[simp] theorem equiv_trans {p q r : UniPoly Q} : equiv p q → equiv q r → equiv p r := fun hpq hqr => by - simp_all [equiv, Array.matchSize] + simp_all [equiv] sorry -- have hpq' := (List.matchSize_eq_iff_forall_eq p.toList q.toList 0).mp hpq -- have hqr' := (List.matchSize_eq_iff_forall_eq q.toList r.toList 0).mp hqr @@ -914,11 +913,11 @@ instance instSetoidUniPoly: Setoid (UniPoly R) where r := equiv iseqv := instEquivalenceEquiv -/-- The quotient of `UniPoly R` by `UniPoly.equiv`. This will be shown to be equivalent to +/-- The quotient of `UniPoly R` by `UniPoly.equiv`. This will be changen to be equivalent to `Polynomial R`. -/ def QuotientUniPoly := Quotient (@instSetoidUniPoly R _) --- TODO: show that operations on `UniPoly` descend to `QuotientUniPoly` +-- TODO: change that operations on `UniPoly` descend to `QuotientUniPoly` diff --git a/ArkLib/OracleReduction/Transform/BCS.lean b/ArkLib/OracleReduction/BCS/Basic.lean similarity index 100% rename from ArkLib/OracleReduction/Transform/BCS.lean rename to ArkLib/OracleReduction/BCS/Basic.lean diff --git a/ArkLib/OracleReduction/Basic.lean b/ArkLib/OracleReduction/Basic.lean index 809511af6..c19855123 100644 --- a/ArkLib/OracleReduction/Basic.lean +++ b/ArkLib/OracleReduction/Basic.lean @@ -1,11 +1,10 @@ /- -Copyright (c) 2024 ArkLib Contributors. All rights reserved. +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import ArkLib.OracleReduction.Prelude -import ArkLib.OracleReduction.OracleInterface +import ArkLib.OracleReduction.ProtocolSpec /-! # Interactive (Oracle) Reductions @@ -16,7 +15,7 @@ format: - The protocol proceeds over a number of steps. In each step, either the prover or the verifier sends a message to the other. We assume that this sequence of interactions is fixed in advance, - and is described by a specification `ProtocolSpec` (see below). + and is described by a protocol specification (see `ProtocolSpec.lean`). Note that we do _not_ require interleaving prover's messages with verifier's challenges, for maximum flexibility in defining reductions. @@ -59,188 +58,27 @@ We can then specialize our definition to obtain specific instantiations in the l We note that this file only defines the type signature of IORs. The semantics of executing an IOR can be found in `Execution.lean`, while the security notions are found in the `Security` folder. +Note the appearance of the various dependencies in the type signatures: +- `oSpec : OracleSpec ι` comes first, as we expect this to be the ambient (fixed) shared oracle + specification for the protocol +- `StmtIn` comes next, as the type of the input statement to the protocol +- Then we have `OStmtIn` for the type of the oracle input statements (for oracle reductions), + followed by `WitIn`, the type of the input witness +- Then we have `StmtOut` for the type of the output statement, followed by `OStmtOut` for the type + of the output oracle statements, and finally `WitOut` for the type of the output witness +- Finally, we have `pSpec : ProtocolSpec n`, which is the protocol specification for the (oracle) + reduction + +We arrange things in this way for potential future extensions, where later types may depend on +earlier types (i.e. `WitIn`, `StmtOut`, or `pSpec` may depend on `StmtIn`; though we do not expect, +say, `StmtOut` or `pSpec` to depend on the witness types, as that is not available to the (oracle) +verifier). -/ -open OracleComp OracleSpec SubSpec - -section Format - -/-- Type signature for an interactive protocol, with `n` messages exchanged. -/ -@[reducible] -def ProtocolSpec (n : ℕ) := Fin n → Direction × Type - -variable {n : ℕ} - -namespace ProtocolSpec - -@[simp] -abbrev getDir (pSpec : ProtocolSpec n) (i : Fin n) := pSpec i |>.1 - -@[simp] -abbrev getType (pSpec : ProtocolSpec n) (i : Fin n) := pSpec i |>.2 - -/-- Subtype of `Fin n` for the indices corresponding to messages in a protocol specification -/ -@[reducible, simp] -def MessageIdx (pSpec : ProtocolSpec n) := - {i : Fin n // (pSpec i).1 = Direction.P_to_V} - -/-- Subtype of `Fin n` for the indices corresponding to challenges in a protocol specification -/ -@[reducible, simp] -def ChallengeIdx (pSpec : ProtocolSpec n) := - {i : Fin n // (pSpec i).1 = Direction.V_to_P} - -instance {pSpec : ProtocolSpec n} : CoeHead (MessageIdx pSpec) (Fin n) where - coe := fun i => i.1 -instance {pSpec : ProtocolSpec n} : CoeHead (ChallengeIdx pSpec) (Fin n) where - coe := fun i => i.1 - -/-- The type of the `i`-th message in a protocol specification. - -This does not distinguish between messages received in full or as an oracle. -/ -@[reducible, inline, specialize, simp] -def Message (pSpec : ProtocolSpec n) (i : MessageIdx pSpec) := (pSpec i.val).2 - -/-- The type of the `i`-th challenge in a protocol specification -/ -@[reducible, inline, specialize, simp] -def Challenge (pSpec : ProtocolSpec n) (i : ChallengeIdx pSpec) := (pSpec i.val).2 - -/-- The type of all messages in a protocol specification -/ -@[reducible, inline, specialize] -def Messages (pSpec : ProtocolSpec n) : Type := ∀ i, pSpec.Message i - -/-- The type of all challenges in a protocol specification -/ -@[reducible, inline, specialize] -def Challenges (pSpec : ProtocolSpec n) : Type := ∀ i, pSpec.Challenge i - -/-- The specification of whether each message in a protocol specification is available in full - (`None`) or received as an oracle (`Some (instOracleInterface (pSpec.Message i))`). - - This is defined as a type class for notational convenience. -/ -class OracleInterfaces (pSpec : ProtocolSpec n) where - oracleInterfaces : ∀ i, Option (OracleInterface (pSpec.Message i)) - -section OracleInterfaces - -variable (pSpec : ProtocolSpec n) [inst : OracleInterfaces pSpec] - -/-- Subtype of `pSpec.MessageIdx` for messages that are received as oracles -/ -@[reducible, inline, specialize] -def OracleMessageIdx := {i : pSpec.MessageIdx // (inst.oracleInterfaces i).isSome } - -/-- The oracle interface instances for messages that are received as oracles -/ -instance {i : OracleMessageIdx pSpec} : OracleInterface (pSpec.Message i) := - (inst.oracleInterfaces i).get i.2 - -/-- Subtype of `pSpec.MessageIdx` for messages that are received in full -/ -@[reducible, inline, specialize] -def PlainMessageIdx := {i : pSpec.MessageIdx // (inst.oracleInterfaces i).isNone } - -/-- The type of messages that are received in full -/ -@[reducible, inline, specialize] -def PlainMessage (i : pSpec.PlainMessageIdx) := pSpec.Message i.1 - -/-- The type of messages that are received as oracles -/ -@[reducible, inline, specialize] -def OracleMessage (i : pSpec.OracleMessageIdx) := pSpec.Message i.1 - -def PlainMessages (pSpec : ProtocolSpec n) [OracleInterfaces pSpec] : Type := - ∀ i, pSpec.PlainMessage i - -def OracleMessages (pSpec : ProtocolSpec n) [OracleInterfaces pSpec] : Type := - ∀ i, pSpec.OracleMessage i - --- TODO: re-define `OracleReduction` to depend on these oracle interfaces --- Currently, we assume that _all_ messages are available as oracles in an oracle reduction - -end OracleInterfaces - -/-- There is only one protocol specification with 0 messages (the empty one) -/ -instance : Unique (ProtocolSpec 0) := inferInstance - -/-- A (partial) transcript of a protocol specification, indexed by some `k : Fin (n + 1)`, is a - list of messages from the protocol for all indices `i` less than `k`. -/ -@[reducible, inline, specialize] -def Transcript (k : Fin (n + 1)) (pSpec : ProtocolSpec n) := - (i : Fin k) → pSpec.getType (Fin.castLE (by omega) i) - -/-- There is only one transcript for the empty protocol -/ -instance {k : Fin 1} : Unique (Transcript k (default : ProtocolSpec 0)) where - default := fun i => () - uniq := by solve_by_elim - -/-- There is only one transcript for any protocol with cutoff index 0 -/ -instance {pSpec : ProtocolSpec n} : Unique (Transcript 0 pSpec) where - default := fun i => Fin.elim0 i - uniq := fun T => by ext i; exact Fin.elim0 i - -/-- The full transcript of an interactive protocol, which is a list of messages and challenges. - -Note that this is definitionally equal to `Transcript (Fin.last n) pSpec`. -/ -@[reducible, inline, specialize] -def FullTranscript (pSpec : ProtocolSpec n) := (i : Fin n) → pSpec.getType i - -/-- There is only one full transcript (the empty one) for an empty protocol -/ -instance : Unique (FullTranscript (default : ProtocolSpec 0)) := inferInstance - -variable {pSpec : ProtocolSpec n} - --- Potential natural re-indexing of messages and challenges. --- Not needed for now, but could be useful. - --- instance instFinEnumMessageIdx : FinEnum pSpec.MessageIdx := --- FinEnum.Subtype.finEnum fun x ↦ pSpec.getDir x = Direction.P_to_V --- instance instFinEnumChallengeIdx : FinEnum pSpec.ChallengeIdx := --- FinEnum.Subtype.finEnum fun x ↦ pSpec.getDir x = Direction.V_to_P - -/-- Nicely, a transcript up to the last round of the protocol is definitionally equivalent to a full - transcript. -/ -@[inline] -abbrev Transcript.toFull (T : Transcript (Fin.last n) pSpec) : FullTranscript pSpec := T - -/-- Add a message to the end of a partial transcript. This is definitionally equivalent to - `Fin.snoc`. -/ -@[inline] -abbrev Transcript.snoc {m : Fin n} (msg : pSpec.getType m) - (T : Transcript m.castSucc pSpec) : Transcript m.succ pSpec := Fin.snoc T msg - -@[reducible, inline, specialize] -def FullTranscript.messages (transcript : FullTranscript pSpec) (i : MessageIdx pSpec) := - transcript i.val - -@[reducible, inline, specialize] -def FullTranscript.challenges (transcript : FullTranscript pSpec) (i : ChallengeIdx pSpec) := - transcript i.val - -/-- Turn each verifier's challenge into an oracle, where querying a unit type gives back the - challenge -/ -@[reducible, inline, specialize] -instance instChallengeOracleInterface {pSpec : ProtocolSpec n} {i : pSpec.ChallengeIdx} : - OracleInterface (pSpec.Challenge i) where - Query := Unit - Response := pSpec.Challenge i - oracle := fun c _ => c - -/-- Query a verifier's challenge for a given challenge round `i` -/ -@[reducible, inline, specialize] -def getChallenge (pSpec : ProtocolSpec n) (i : pSpec.ChallengeIdx) : - OracleComp [pSpec.Challenge]ₒ (pSpec.Challenge i) := - (query i () : OracleQuery [pSpec.Challenge]ₒ (pSpec.Challenge i)) - -end ProtocolSpec - -open ProtocolSpec - --- Notation for the type signature of an interactive protocol -notation "𝒫——⟦" term "⟧⟶𝒱" => (Direction.P_to_V, term) -notation "𝒫⟵⟦" term "⟧——𝒱" => (Direction.V_to_P, term) - --- Test notation -def pSpecNotationTest : ProtocolSpec 2 := - ![ 𝒫——⟦ Polynomial (ZMod 101) ⟧⟶𝒱, - 𝒫⟵⟦ ZMod 101 ⟧——𝒱] +open OracleComp OracleSpec SubSpec ProtocolSpec -- Add an indexer? -structure Indexer (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) (Index : Type) +structure Indexer {ι : Type} (oSpec : OracleSpec ι) {n : ℕ} (pSpec : ProtocolSpec n) (Index : Type) (Encoding : Type) where encode : Index → OracleComp oSpec Encoding [OracleInterface : OracleInterface Encoding] @@ -256,7 +94,7 @@ structure ProverState (n : ℕ) where /-- Initialization of prover's state via inputting the statement and witness. -/ @[ext] structure ProverIn (StmtIn WitIn PrvState : Type) where - input : StmtIn → WitIn → PrvState + input : StmtIn × WitIn → PrvState -- initState : PrvState -- if honest prover, then expect that PrvState 0 = WitIn @@ -269,7 +107,7 @@ For maximum simplicity, we only define the `sendMessage` function as an oracle c other functions are pure. We may revisit this decision in the future. -/ @[ext] -structure ProverRound (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) +structure ProverRound {ι : Type} (oSpec : OracleSpec ι) {n : ℕ} (pSpec : ProtocolSpec n) extends ProverState n where /-- Send a message and update the prover's state -/ sendMessage (i : MessageIdx pSpec) : @@ -296,28 +134,30 @@ together. For completeness, we will require that the prover's output statement i the verifier's output statement. For soundness, we will only use the output statement generated by the verifier. -/ @[ext] -structure Prover (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) extends +structure Prover {ι : Type} (oSpec : OracleSpec ι) + (StmtIn WitIn StmtOut WitOut : Type) + {n : ℕ} (pSpec : ProtocolSpec n) extends ProverState n, ProverIn StmtIn WitIn (PrvState 0), - ProverRound pSpec oSpec, + ProverRound oSpec pSpec, ProverOut StmtOut WitOut (PrvState (Fin.last n)) /-- A verifier of an interactive protocol is a function that takes in the input statement and the transcript, and performs an oracle computation that outputs a new statement -/ @[ext] -structure Verifier (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - (StmtIn StmtOut : Type) where +structure Verifier {ι : Type} (oSpec : OracleSpec ι) + (StmtIn StmtOut : Type) {n : ℕ} (pSpec : ProtocolSpec n) where verify : StmtIn → FullTranscript pSpec → OracleComp oSpec StmtOut /-- An **(oracle) prover** in an interactive **oracle** reduction is a prover in the non-oracle reduction whose input statement also consists of the underlying messages for the oracle statements -/ @[reducible, inline] -def OracleProver (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) - {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) := - Prover pSpec oSpec (StmtIn × (∀ i, OStmtIn i)) WitIn (StmtOut × (∀ i, OStmtOut i)) WitOut +def OracleProver {ι : Type} (oSpec : OracleSpec ι) + (StmtIn : Type) {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) (WitIn : Type) + (StmtOut : Type) {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) (WitOut : Type) + {n : ℕ} (pSpec : ProtocolSpec n) := + Prover oSpec (StmtIn × (∀ i, OStmtIn i)) WitIn (StmtOut × (∀ i, OStmtOut i)) WitOut pSpec /-- An **(oracle) verifier** of an interactive **oracle** reduction consists of: @@ -333,10 +173,15 @@ def OracleProver (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) Intuitively, the oracle verifier cannot do anything more in returning the output oracle statements, other than specifying a subset of the ones it has received (and dropping the rest). -/ @[ext] -structure OracleVerifier (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] (StmtIn StmtOut : Type) - {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) where +structure OracleVerifier {ι : Type} (oSpec : OracleSpec ι) + (StmtIn : Type) {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) + (StmtOut : Type) {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) + {n : ℕ} (pSpec : ProtocolSpec n) + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] + [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] + where + -- This will be needed after the switch to `simOStmt` + -- [Oₛₒ : ∀ i, OracleInterface (OStmtOut i)] /-- The core verification logic. Takes the input statement `stmtIn` and all verifier challenges `challenges` (which are determined outside this function, typically by sampling for @@ -346,6 +191,12 @@ structure OracleVerifier (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpe verify : StmtIn → pSpec.Challenges → OracleComp (oSpec ++ₒ ([OStmtIn]ₒ ++ₒ [pSpec.Message]ₒ)) StmtOut + -- TODO: this seems like the right way for compositionality + -- Makes it potentially more difficult for compilation with commitment schemes + -- Can recover the old version (with `embed` and `hEq`) via a constructor `QueryImpl.ofEmbed` + + -- simOStmt : QueryImpl [OStmtOut]ₒ (OracleComp ([OStmtIn]ₒ ++ₒ [pSpec.Message]ₒ)) + /-- An embedding that specifies how each output oracle statement (indexed by `ιₛₒ`) is derived. It maps an index `i : ιₛₒ` to either an index `j : ιₛᵢ` (meaning `OStmtOut i` comes from `OStmtIn j`) or an index `k : pSpec.MessageIdx` (meaning `OStmtOut i` comes from the @@ -368,15 +219,17 @@ structure OracleVerifier (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpe namespace OracleVerifier -variable {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] {StmtIn StmtOut : Type} - {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) +variable {ι : Type} {oSpec : OracleSpec ι} + {StmtIn : Type} {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} + {StmtOut : Type} {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] + [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) /-- An oracle verifier can be seen as a (non-oracle) verifier by providing the oracle interface using its knowledge of the oracle statements and the transcript messages in the clear -/ -def toVerifier : Verifier pSpec oSpec (StmtIn × ∀ i, OStmtIn i) (StmtOut × (∀ i, OStmtOut i)) where +def toVerifier : Verifier oSpec (StmtIn × ∀ i, OStmtIn i) (StmtOut × (∀ i, OStmtOut i)) pSpec where verify := fun ⟨stmt, oStmt⟩ transcript => do let stmtOut ← simulateQ (OracleInterface.simOracle2 oSpec oStmt transcript.messages) (verifier.verify stmt transcript.challenges) @@ -393,7 +246,7 @@ def toVerifier : Verifier pSpec oSpec (StmtIn × ∀ i, OStmtIn i) (StmtOut × ( TODO: define once `numQueries` is defined in `OracleComp` -/ def numQueries (stmt : StmtIn) (challenges : ∀ i, pSpec.Challenge i) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) : + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) : OracleComp (oSpec ++ₒ ([OStmtIn]ₒ ++ₒ [pSpec.Message]ₒ)) ℕ := sorry /-- A **non-adaptive** oracle verifier is an oracle verifier that makes a **fixed** list of queries @@ -413,10 +266,13 @@ def numQueries (stmt : StmtIn) (challenges : ∀ i, pSpec.Challenge i) Finally, we also allow for choosing a subset of the input oracle statements + the prover's messages to retain for the output oracle statements. -/ -structure NonAdaptive (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) - [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] (StmtIn StmtOut : Type) - {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) where +structure NonAdaptive {ι : Type} (oSpec : OracleSpec ι) + (StmtIn : Type) {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) + (StmtOut : Type) {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) + {n : ℕ} (pSpec : ProtocolSpec n) + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] + [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] + where /-- Makes a list of queries to each of the oracle statements, given the input statement and the challenges -/ @@ -444,8 +300,8 @@ namespace NonAdaptive This essentially performs the queries via `List.mapM`, then runs `verify` on the query-response pairs. -/ def toOracleVerifier - (naVerifier : OracleVerifier.NonAdaptive pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) : - OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut where + (naVerifier : OracleVerifier.NonAdaptive oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) : + OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec where verify := fun stmt challenges => do let queryResponsesOStmt : List ((i : ιₛᵢ) × ((Oₛᵢ i).Query × (Oₛᵢ i).Response)) ← (naVerifier.queryOStmt stmt challenges).mapM @@ -468,20 +324,20 @@ def toOracleVerifier challenges. -/ def numOStmtQueries [DecidableEq ιₛᵢ] (i : ιₛᵢ) (stmt : StmtIn) (challenges : ∀ i, pSpec.Challenge i) - (naVerifier : OracleVerifier.NonAdaptive pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) : ℕ := + (naVerifier : OracleVerifier.NonAdaptive oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) : ℕ := (naVerifier.queryOStmt stmt challenges).filter (fun q => q.1 = i) |>.length /-- The number of queries made to the `i`-th prover's message, for a given input statement and challenges. -/ def numOMsgQueries (i : pSpec.MessageIdx) (stmt : StmtIn) (challenges : ∀ i, pSpec.Challenge i) - (naVerifier : OracleVerifier.NonAdaptive pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) : ℕ := + (naVerifier : OracleVerifier.NonAdaptive oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) : ℕ := (naVerifier.queryMsg stmt challenges).filter (fun q => q.1 = i) |>.length /-- The total number of queries made to the oracle statements and the prover's messages, for a given input statement and challenges. -/ def totalNumQueries (stmt : StmtIn) (challenges : ∀ i, pSpec.Challenge i) - (naVerifier : OracleVerifier.NonAdaptive pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) : ℕ := + (naVerifier : OracleVerifier.NonAdaptive oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) : ℕ := (naVerifier.queryOStmt stmt challenges).length + (naVerifier.queryMsg stmt challenges).length end NonAdaptive @@ -491,38 +347,41 @@ end OracleVerifier /-- An **interactive reduction** for a given protocol specification `pSpec`, and relative to oracles defined by `oSpec`, consists of a prover and a verifier. -/ @[ext] -structure Reduction (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) where - prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut - verifier : Verifier pSpec oSpec StmtIn StmtOut +structure Reduction {ι : Type} (oSpec : OracleSpec ι) + (StmtIn WitIn StmtOut WitOut : Type) {n : ℕ} (pSpec : ProtocolSpec n) where + prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec + verifier : Verifier oSpec StmtIn StmtOut pSpec /-- An **interactive oracle reduction** for a given protocol specification `pSpec`, and relative to oracles defined by `oSpec`, consists of a prover and an **oracle** verifier. -/ @[ext] -structure OracleReduction (pSpec : ProtocolSpec n) [∀ i, OracleInterface (pSpec.Message i)] - {ι : Type} (oSpec : OracleSpec ι) (StmtIn WitIn StmtOut WitOut : Type) - {ιₛ : Type} (OStmtIn : ιₛ → Type) [Oₛ : ∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) where - prover : OracleProver pSpec oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut - verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut +structure OracleReduction {ι : Type} (oSpec : OracleSpec ι) + (StmtIn : Type) {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) (WitIn : Type) + (StmtOut : Type) {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) (WitOut : Type) + {n : ℕ} (pSpec : ProtocolSpec n) + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] + where + prover : OracleProver oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut pSpec + verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec /-- An interactive oracle reduction can be seen as an interactive reduction, via coercing the oracle verifier to a (normal) verifier -/ -def OracleReduction.toReduction {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - {StmtIn WitIn StmtOut WitOut : Type} [∀ i, OracleInterface (pSpec.Message i)] - {ιₛ : Type} {OStmtIn : ιₛ → Type} [Oₛ : ∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} - (oracleReduction : OracleReduction pSpec oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut) : - Reduction pSpec oSpec (StmtIn × (∀ i, OStmtIn i)) WitIn - (StmtOut × (∀ i, OStmtOut i)) WitOut := +def OracleReduction.toReduction {ι : Type} {oSpec : OracleSpec ι} + {StmtIn : Type} {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} {WitIn : Type} + {StmtOut : Type} {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} {WitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] + (oracleReduction : OracleReduction oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut pSpec) : + Reduction oSpec (StmtIn × (∀ i, OStmtIn i)) WitIn + (StmtOut × (∀ i, OStmtOut i)) WitOut pSpec := ⟨oracleReduction.prover, oracleReduction.verifier.toVerifier⟩ /-- An **interactive proof (IP)** is an interactive reduction where the output statement is a boolean, the output witness is trivial (a `Unit`), and the relation checks whether the output statement is true. -/ -abbrev Proof (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - (Statement Witness : Type) := - Reduction pSpec oSpec Statement Witness Bool Unit +@[reducible] def Proof {ι : Type} (oSpec : OracleSpec ι) + (Statement Witness : Type) {n : ℕ} (pSpec : ProtocolSpec n) := + Reduction oSpec Statement Witness Bool Unit pSpec /-- An **interactive oracle proof (IOP)** is an interactive oracle reduction where the output statement is a boolean, while both the output oracle statement & the output witness are @@ -530,26 +389,83 @@ abbrev Proof (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) As a consequence, the output relation in an IOP is effectively a function `Bool → Prop`, which we can again assume to be the trivial one (sending `true` to `True`). -/ -abbrev OracleProof (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] (Statement Witness : Type) - {ιₛ : Type} (OStatement : ιₛ → Type) [Oₛ : ∀ i, OracleInterface (OStatement i)] := - OracleReduction pSpec oSpec Statement Witness Bool Unit OStatement (fun _ : Empty => Unit) - -abbrev NonInteractiveProver {ι : Type} (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) (Message : Type) := - Prover ![(.P_to_V, Message)] oSpec StmtIn WitIn StmtOut WitOut - -abbrev NonInteractiveVerifier {ι : Type} (oSpec : OracleSpec ι) - (StmtIn StmtOut : Type) (Message : Type) := - Verifier ![(.P_to_V, Message)] oSpec StmtIn StmtOut +@[reducible] def OracleProof {ι : Type} (oSpec : OracleSpec ι) + (Statement : Type) {ιₛᵢ : Type} (OStatement : ιₛᵢ → Type) (Witness : Type) + {n : ℕ} (pSpec : ProtocolSpec n) + [Oₛᵢ : ∀ i, OracleInterface (OStatement i)] + [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] := + OracleReduction oSpec Statement OStatement Witness Bool (fun _ : Empty => Unit) Unit pSpec + +/-- A **non-interactive prover** is a prover that only sends a single message to the verifier. -/ +@[reducible] def NonInteractiveProver (Message : Type) {ι : Type} (oSpec : OracleSpec ι) + (StmtIn WitIn StmtOut WitOut : Type) := + Prover oSpec StmtIn WitIn StmtOut WitOut ![(.P_to_V, Message)] + +/-- A **non-interactive verifier** is a verifier that only receives a single message from the + prover. -/ +@[reducible] def NonInteractiveVerifier (Message : Type) {ι : Type} (oSpec : OracleSpec ι) + (StmtIn StmtOut : Type) := + Verifier oSpec StmtIn StmtOut ![(.P_to_V, Message)] /-- A **non-interactive reduction** is an interactive reduction with only a single message from the prover to the verifier (and none in the other direction). -/ -abbrev NonInteractiveReduction {ι : Type} (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) (Message : Type) := - Reduction ![(.P_to_V, Message)] oSpec StmtIn WitIn StmtOut WitOut - -end Format +@[reducible] def NonInteractiveReduction (Message : Type) {ι : Type} (oSpec : OracleSpec ι) + (StmtIn WitIn StmtOut WitOut : Type) := + Reduction oSpec StmtIn WitIn StmtOut WitOut ![(.P_to_V, Message)] + +section Trivial + +variable {ι : Type} {oSpec : OracleSpec ι} + {Statement : Type} {ιₛ : Type} {OStatement : ιₛ → Type} {Witness : Type} + [Oₛ : ∀ i, OracleInterface (OStatement i)] + +/-- The trivial / identity prover, which does not send any messages to the verifier, and returns its + input context (statement & witness) as output. -/ +protected def Prover.id : Prover oSpec Statement Witness Statement Witness ![] where + PrvState := fun _ => Statement × Witness + input := _root_.id + sendMessage := fun i => Fin.elim0 i + receiveChallenge := fun i => Fin.elim0 i + output := _root_.id + +/-- The trivial / identity verifier, which does not receive any messages from the prover, and + returns its input statement as output. -/ +protected def Verifier.id : Verifier oSpec Statement Statement ![] where + verify := fun stmt _ => pure stmt + +/-- The trivial / identity reduction, which consists of the trivial prover and verifier. -/ +protected def Reduction.id : Reduction oSpec Statement Witness Statement Witness ![] where + prover := Prover.id + verifier := Verifier.id + +/-- The trivial / identity prover in an oracle reduction, which unfolds to the trivial prover for + the associated non-oracle reduction. -/ +protected def OracleProver.id : + OracleProver oSpec Statement OStatement Witness Statement OStatement Witness ![] := + Prover.id + +/-- The trivial / identity verifier in an oracle reduction, which receives no messages from the + prover, and returns its input statement as output. -/ +protected def OracleVerifier.id : + OracleVerifier oSpec Statement OStatement Statement OStatement ![] where + verify := fun stmt _ => pure stmt + embed := Function.Embedding.inl + hEq := fun _ => rfl + +/-- The trivial / identity oracle reduction, which consists of the trivial oracle prover and + verifier. -/ +protected def OracleReduction.id : + OracleReduction oSpec Statement OStatement Witness Statement OStatement Witness ![] := + ⟨OracleProver.id, OracleVerifier.id⟩ + +alias Prover.trivial := Prover.id +alias Verifier.trivial := Verifier.id +alias Reduction.trivial := Reduction.id +alias OracleProver.trivial := OracleProver.id +alias OracleVerifier.trivial := OracleVerifier.id +alias OracleReduction.trivial := OracleReduction.id + +end Trivial section Classes @@ -564,16 +480,17 @@ class ProverFirst (pSpec : ProtocolSpec n) [NeZero n] where class VerifierFirst (pSpec : ProtocolSpec n) [NeZero n] where verifier_first' : (pSpec 0).1 = .V_to_P -class ProverLast (pSpec : ProtocolSpec n) [NeZero n] where - prover_last' : (pSpec (n - 1)).1 = .P_to_V +class ProverLast (pSpec : ProtocolSpec n) [inst : NeZero n] where + prover_last' : (pSpec ⟨n - 1, by simp [Nat.pos_of_neZero]⟩).1 = .P_to_V /-- A protocol specification with the verifier speaking last -/ class VerifierLast (pSpec : ProtocolSpec n) [NeZero n] where - verifier_last' : (pSpec (n - 1)).1 = .V_to_P + verifier_last' : (pSpec ⟨n - 1, by simp [Nat.pos_of_neZero]⟩).1 = .V_to_P class ProverOnly (pSpec : ProtocolSpec 1) extends ProverFirst pSpec -/-- A non-interactive protocol specification with a single message from the prover to the verifier-/ +/-- A non-interactive protocol specification with a single message from the prover to the verifier +-/ alias NonInteractive := ProverOnly class VerifierOnly (pSpec : ProtocolSpec 1) extends VerifierFirst pSpec @@ -588,11 +505,11 @@ theorem verifier_first (pSpec : ProtocolSpec n) [NeZero n] [h : VerifierFirst pS @[simp] theorem prover_last (pSpec : ProtocolSpec n) [NeZero n] [h : ProverLast pSpec] : - (pSpec (n - 1)).1 = .P_to_V := h.prover_last' + (pSpec ⟨n - 1, by simp [Nat.pos_of_neZero]⟩).1 = .P_to_V := h.prover_last' @[simp] theorem verifier_last (pSpec : ProtocolSpec n) [NeZero n] [h : VerifierLast pSpec] : - (pSpec (n - 1)).1 = .V_to_P := h.verifier_last' + (pSpec ⟨n - 1, by simp [Nat.pos_of_neZero]⟩).1 = .V_to_P := h.verifier_last' section SingleMessage @@ -696,11 +613,9 @@ def FullTranscript.mk2 {pSpec : ProtocolSpec 2} (msg0 : pSpec.getType 0) (msg1 : theorem FullTranscript.mk2_eq_snoc_snoc {pSpec : ProtocolSpec 2} (msg0 : pSpec.getType 0) (msg1 : pSpec.getType 1) : - FullTranscript.mk2 msg0 msg1 = ((default : pSpec.Transcript 0).snoc msg0).snoc msg1 := by - unfold FullTranscript.mk2 Transcript.snoc - simp only [default, Nat.mod_succ, Nat.lt_one_iff, - not_lt_zero', ↓reduceDIte, Fin.zero_eta, Fin.isValue, Nat.reduceMod, Nat.succ_eq_add_one, - Nat.reduceAdd, Fin.mk_one] + FullTranscript.mk2 msg0 msg1 = ((default : pSpec.Transcript 0).concat msg0).concat msg1 := by + unfold FullTranscript.mk2 Transcript.concat + simp only [default, Fin.isValue] funext i by_cases hi : i = 0 · subst hi; simp [Fin.snoc] @@ -711,159 +626,21 @@ end ProtocolSpec section IsPure -variable {n : ℕ} {ι : Type} {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} - {StmtIn WitIn StmtOut WitOut : Type} +variable {ι : Type} {oSpec : OracleSpec ι} + {StmtIn WitIn StmtOut WitOut : Type} {n : ℕ} {pSpec : ProtocolSpec n} -class Prover.IsPure (P : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) where +class Prover.IsPure (P : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) where is_pure : ∃ sendMessage : ∀ _, _ → _, ∀ i st, P.sendMessage i st = pure (sendMessage i st) -class Verifier.IsPure (V : Verifier pSpec oSpec StmtIn StmtOut) where +class Verifier.IsPure (V : Verifier oSpec StmtIn StmtOut pSpec) where is_pure : ∃ verify : _ → _ → _, ∀ stmtIn transcript, V.verify stmtIn transcript = pure (verify stmtIn transcript) -class Reduction.IsPure (R : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) where +class Reduction.IsPure (R : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) where prover_is_pure : R.prover.IsPure verifier_is_pure : R.verifier.IsPure end IsPure end Classes - --- The below is not needed for now - --- section Stateless - --- open ProtocolSpec - --- -- Here we define the stateless form of an (oracle) reduction, where both the prover and the --- -- verifier does not maintain any state. This is useful for specification purposes, as it reduces --- -- the protocol to a more pure form. In this stateless form, the context (witness + statement + --- -- oracle statement) is append-only - --- variable {n : ℕ} {ι : Type} - --- -- TODO: figure out if we should go with a `Context` struct like this - --- structure ContextType (ιS : Type) (ιO : Type) (ιW : Type) where --- Statement : ιS → Type --- OracleStatement : ιO → Type --- Witness : ιW → Type - --- def ContextType.toType {ιS ιO ιW : Type} (CtxType : ContextType ιS ιO ιW) : Type := --- (∀ i, CtxType.Statement i) × (∀ i, CtxType.OracleStatement i) × (∀ i, CtxType.Witness i) - --- structure Context {ιS ιO ιW : Type} (CtxType : ContextType ιS ιO ιW) where --- statement : ∀ i, CtxType.Statement i --- oracleStatement : ∀ i, CtxType.OracleStatement i --- witness : ∀ i, CtxType.Witness i --- [OracleInterface : ∀ i, OracleInterface (CtxType.OracleStatement i)] - --- structure Prover.Stateless (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) --- (Statement Witness : Type) where --- prove (i : pSpec.MessageIdx) : Statement → Witness → --- Transcript i.1.castSucc pSpec → OracleComp oSpec (pSpec.Message i) - --- -- #print Prover.Stateless - --- /-- In a stateless form prover, the output statement is simply the input statement concatenated --- with the transcript, the output witness stays the same, and the prover's state is the partial --- transcript. -/ --- def Prover.ofStateless {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} --- {Statement Witness : Type} --- (P : Prover.Stateless pSpec oSpec Statement Witness) : --- Prover pSpec oSpec Statement Witness (Statement × FullTranscript pSpec) Witness where --- PrvState := fun i => Statement × Transcript i pSpec × Witness --- input := fun stmt wit => ⟨stmt, default, wit⟩ --- sendMessage := fun i ⟨stmt, transcript, wit⟩ => do --- let msg ← P.prove i stmt wit transcript --- return (msg, ⟨stmt, transcript.snoc msg, wit⟩) --- receiveChallenge := fun _ ⟨stmt, transcript, wit⟩ chal => ⟨stmt, transcript.snoc chal, wit⟩ --- output := fun ⟨stmt, transcript, wit⟩ => ⟨⟨stmt, transcript⟩, wit⟩ - --- @[reducible] --- def OracleProver.Stateless (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) --- (Statement : Type) {ιₛᵢ : Type} (OStatement : ιₛᵢ → Type) (Witness : Type) := --- Prover.Stateless pSpec oSpec (Statement × (∀ i, OStatement i)) Witness - --- def OracleProver.ofStateless {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} --- {Statement : Type} {ιₛᵢ : Type} {OStatement : ιₛᵢ → Type} {Witness : Type} --- (P : OracleProver.Stateless pSpec oSpec Statement OStatement Witness) : --- OracleProver pSpec oSpec Statement Witness (Statement × (∀ i, pSpec.Challenge i)) Witness --- OStatement (Sum.elim OStatement pSpec.Message) := --- -- by simpa [OracleProver] using Prover.ofStateless P --- sorry - --- /-- A verifier in a stateless form only performs checks (i.e. `guard`s) on the input statement --- and transcript -/ --- structure Verifier.Stateless (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) (Statement : Type) --- where --- verify : Statement → FullTranscript pSpec → OracleComp oSpec Unit - --- def Verifier.ofStateless {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} --- {Statement : Type} (V : Verifier.Stateless pSpec oSpec Statement) : --- Verifier pSpec oSpec Statement (Statement × FullTranscript pSpec) where --- verify := fun stmt transcript => do --- -- First perform the guard check, then return the statement and transcript --- let _ ← V.verify stmt transcript --- return (stmt, transcript) - --- /-- An oracle verifier in a stateless form only performs checks (i.e. `guard`s) on the input --- statement and transcript -/ --- structure OracleVerifier.Stateless (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) --- (Statement : Type) {ιₛᵢ : Type} (OStatement : ιₛᵢ → Type) --- [Oₛᵢ : ∀ i, OracleInterface (OStatement i)] [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] --- where --- verify : Statement → (∀ i, pSpec.Challenge i) → --- OracleComp (oSpec ++ₒ ([OStatement]ₒ ++ₒ [pSpec.Message]ₒ)) Unit - --- def OracleVerifier.ofStateless {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} --- {Statement : Type} {ιₛᵢ : Type} {OStatement : ιₛᵢ → Type} --- [Oₛᵢ : ∀ i, OracleInterface (OStatement i)] [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] --- (V : OracleVerifier.Stateless pSpec oSpec Statement OStatement) : --- OracleVerifier pSpec oSpec Statement (Statement × ∀ i, pSpec.Challenge i) OStatement --- (ιₛₒ := ιₛᵢ ⊕ pSpec.MessageIdx) (Sum.elim OStatement pSpec.Message) where --- verify := fun stmt challenges => do --- -- First perform the guard check, then return the statement and transcript --- let _ ← V.verify stmt challenges --- return (stmt, challenges) - --- embed := Function.Embedding.refl _ - --- hEq := fun i => by aesop - --- structure Reduction.Stateless (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) --- (Statement Witness : Type) where --- prover : Prover.Stateless pSpec oSpec Statement Witness --- verifier : Verifier.Stateless pSpec oSpec Statement - --- def Reduction.ofStateless {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} --- {Statement Witness : Type} (R : Reduction.Stateless pSpec oSpec Statement Witness) : --- Reduction pSpec oSpec Statement Witness (Statement × FullTranscript pSpec) Witness where --- prover := Prover.ofStateless R.prover --- verifier := Verifier.ofStateless R.verifier - --- structure OracleReduction.Stateless (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) --- (Statement : Type) {ιₛᵢ : Type} (OStatement : ιₛᵢ → Type) (Witness : Type) --- [Oₛᵢ : ∀ i, OracleInterface (OStatement i)] [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] --- where --- prover : OracleProver.Stateless pSpec oSpec Statement OStatement Witness --- verifier : OracleVerifier.Stateless pSpec oSpec Statement OStatement - --- def Prover.Stateless.runToRound {pSpec : ProtocolSpec n} {oSpec : OracleSpec ι} --- -- {CtxType : ReductionContextType} {Ctx : ReductionContext CtxType} --- {Statement Witness : Type} --- (stmt : Statement) (wit : Witness) (i : Fin (n + 1)) --- (P : Prover.Stateless pSpec oSpec Statement Witness) : --- OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) (pSpec.Transcript i) := --- Fin.inductionOn i (pure default) --- (fun j ih => match hDir : (pSpec j).1 with --- | .P_to_V => do --- let transcript ← ih --- let msg ← P.prove ⟨j, hDir⟩ stmt wit transcript --- return (← ih).snoc msg --- | .V_to_P => do --- let chal ← query (spec := [pSpec.Challenge]ₒ) ⟨j, hDir⟩ () --- return (← ih).snoc chal) - --- end Stateless diff --git a/ArkLib/OracleReduction/Cast.lean b/ArkLib/OracleReduction/Cast.lean index daf43e210..7ec900fe7 100644 --- a/ArkLib/OracleReduction/Cast.lean +++ b/ArkLib/OracleReduction/Cast.lean @@ -9,7 +9,7 @@ import ArkLib.OracleReduction.Security.Basic /-! # Casting for structures of oracle reductions - We define custom dependent casts (registered as `DepCast` instances) for the following structures: + We define custom dependent casts (registered as `DCast` instances) for the following structures: - `ProtocolSpec` - `(Full)Transcript` - `(Oracle)Prover` @@ -17,84 +17,212 @@ import ArkLib.OracleReduction.Security.Basic - `(Oracle)Reduction` -/ -/-! ### Casting Protocol Specifications -/ namespace ProtocolSpec -section Cast +variable {m n : ℕ} (h : m = n) -variable {m n : ℕ} +/-! ### Casting Protocol Specifications -/ /-- Casting a `ProtocolSpec` across an equality of the number of rounds One should use the type-class function `dcast` instead of this one. -/ -protected def cast (h : m = n) (pSpec : ProtocolSpec m) : ProtocolSpec n := +protected def cast (pSpec : ProtocolSpec m) : ProtocolSpec n := pSpec ∘ (Fin.cast h.symm) @[simp] -theorem cast_id : ProtocolSpec.cast (Eq.refl n) = id := rfl +theorem cast_id : ProtocolSpec.cast (Eq.refl m) = id := rfl -instance instDepCast : DepCast Nat ProtocolSpec where +instance instDCast : DCast Nat ProtocolSpec where dcast h := ProtocolSpec.cast h dcast_id := cast_id -theorem cast_eq_dcast (h : m = n) (pSpec : ProtocolSpec m) : - dcast h pSpec = ProtocolSpec.cast h pSpec := rfl +theorem cast_eq_dcast {h : m = n} {pSpec : ProtocolSpec m} : + pSpec.cast h = dcast h pSpec := rfl + +/-! ### Casting (Partial) Transcripts -/ + +variable {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} + +@[simp] +theorem cast_funext {h} (hSpec : pSpec₁.cast h = pSpec₂) {i : Fin n} : + pSpec₁ (Fin.cast h.symm i) = pSpec₂ i := funext_iff.mp hSpec i + +alias cast_idx := cast_funext + +theorem cast_symm {h} (hSpec : pSpec₁.cast h = pSpec₂) : pSpec₂.cast h.symm = pSpec₁ := by + rw [cast_eq_dcast] at hSpec ⊢ + exact dcast_symm h hSpec + +variable (hSpec : pSpec₁.cast h = pSpec₂) + +namespace MessageIdx + +/-- Casting a message index across an equality of `ProtocolSpec`s. -/ +protected def cast (i : MessageIdx pSpec₁) : MessageIdx pSpec₂ := + ⟨Fin.cast h i.1, by simp [← hSpec, ProtocolSpec.cast, i.property]⟩ + +@[simp] +theorem cast_id : MessageIdx.cast (Eq.refl m) rfl = (id : pSpec₁.MessageIdx → _) := rfl + +instance instDCast₂ : DCast₂ Nat ProtocolSpec (fun _ pSpec => MessageIdx pSpec) where + dcast₂ h := MessageIdx.cast h + dcast₂_id := cast_id + +theorem cast_eq_dcast₂ {h} {hSpec : pSpec₁.cast h = pSpec₂} {i : MessageIdx pSpec₁}: + i.cast h hSpec = dcast₂ h hSpec i := rfl + +end MessageIdx + +@[simp] +theorem Message.cast_idx {i : MessageIdx pSpec₂} : + pSpec₁.Message (i.cast h.symm (cast_symm hSpec)) = pSpec₂.Message i := by + simp [MessageIdx.cast] + exact congrArg Prod.snd (cast_funext hSpec) + +namespace ChallengeIdx + +/-- Casting a challenge index across an equality of `ProtocolSpec`s. -/ +protected def cast (i : ChallengeIdx pSpec₁) : ChallengeIdx pSpec₂ := + ⟨Fin.cast h i.1, by simp [← hSpec, ProtocolSpec.cast, i.property]⟩ + +@[simp] +theorem cast_id : ChallengeIdx.cast (Eq.refl m) rfl = (id : pSpec₁.ChallengeIdx → _) := rfl + +instance instDCast₂ : DCast₂ Nat ProtocolSpec (fun _ pSpec => ChallengeIdx pSpec) where + dcast₂ h := ChallengeIdx.cast h + dcast₂_id := cast_id + +theorem cast_eq_dcast₂ {h : m = n} {hSpec : pSpec₁.cast h = pSpec₂} {i : ChallengeIdx pSpec₁}: + i.cast h hSpec = dcast₂ h hSpec i := rfl + +end ChallengeIdx + +@[simp] +theorem Challenge.cast_idx {i : ChallengeIdx pSpec₂} : + pSpec₁.Challenge (i.cast h.symm (cast_symm hSpec)) = pSpec₂.Challenge i := by + simp [ChallengeIdx.cast] + exact congrArg Prod.snd (cast_funext hSpec) + +variable {k : Fin (m + 1)} {l : Fin (n + 1)} + +namespace Transcript + +/-- Casting two partial transcripts across an equality of `ProtocolSpec`s, where the cutoff indices + are equal. -/ +protected def cast (hIdx : k.val = l.val) (hSpec : pSpec₁.cast h = pSpec₂) + (T : pSpec₁.Transcript k) : pSpec₂.Transcript l := + fun i => _root_.cast (by rw [← hSpec]; rfl) (T (Fin.cast hIdx.symm i)) + +@[simp] +theorem cast_id : Transcript.cast rfl rfl rfl = (id : pSpec₁.Transcript k → _) := rfl + +instance instDCast₃ : DCast₃ Nat (fun n => Fin (n + 1)) (fun n _ => ProtocolSpec n) + (fun _ k pSpec => pSpec.Transcript k) where + dcast₃ h h' h'' T := Transcript.cast h + (by simp only [dcast] at h'; rw [← h']; sorry) + (by simp [ProtocolSpec.cast_eq_dcast, dcast_eq_root_cast]; exact h'') + T + dcast₃_id := cast_id -end Cast +-- theorem cast_eq_dcast₃ (h : m = n) (hIdx : k.val = l.val) (hSpec : pSpec₁.cast h = pSpec₂) +-- (T : Transcript pSpec₁ k) : +-- T.cast h hIdx hSpec = (dcast₃ h (by sorry) sorry T : pSpec₂.Transcript l) := rfl + +end Transcript namespace FullTranscript /-! ### Casting Full Transcripts -/ -section Cast - -variable {m n : ℕ} {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} +/-- Casting a transcript across an equality of `ProtocolSpec`s. -/-- Casting a transcript across an equality of `ProtocolSpec`s -/ -protected def cast (h : m = n) (hSpec : dcast h pSpec₁ = pSpec₂) (T : FullTranscript pSpec₁) : +Internally invoke `Transcript.cast` with the last indices. -/ +protected def cast (T : FullTranscript pSpec₁) : FullTranscript pSpec₂ := - fun i => _root_.cast (congrFun (congrArg getType hSpec) i) (T (Fin.cast h.symm i)) + Transcript.cast (k := Fin.last m) (l := Fin.last n) h h hSpec T @[simp] theorem cast_id : FullTranscript.cast rfl rfl = (id : pSpec₁.FullTranscript → _) := rfl -instance instDepCast₂ : DepCast₂ Nat ProtocolSpec (fun _ pSpec => FullTranscript pSpec) where +instance instDCast₂ : DCast₂ Nat ProtocolSpec (fun _ pSpec => FullTranscript pSpec) where dcast₂ h h' T := FullTranscript.cast h h' T dcast₂_id := cast_id -theorem cast_eq_dcast₂ (h : m = n) (hSpec : dcast h pSpec₁ = pSpec₂) (T : FullTranscript pSpec₁) : +theorem cast_eq_dcast₂ {T : FullTranscript pSpec₁} : dcast₂ h hSpec T = FullTranscript.cast h hSpec T := rfl -end Cast - end FullTranscript end ProtocolSpec +variable {ι : Type} {oSpec : OracleSpec ι} {StmtIn WitIn StmtOut WitOut : Type} +variable {m n : ℕ} {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} +variable (h : m = n) (hSpec : pSpec₁.cast h = pSpec₂) + +open ProtocolSpec + +/-! ### Casting Provers -/ + +namespace Prover + +/-- Casting the prover of a non-oracle reduction across an equality of `ProtocolSpec`s. -/ +protected def cast (P : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec₁) : + Prover oSpec StmtIn WitIn StmtOut WitOut pSpec₂ where + PrvState := P.PrvState ∘ Fin.cast (congrArg (· + 1) h.symm) + input := P.input + sendMessage := fun i st => do + let ⟨msg, newSt⟩ ← P.sendMessage (i.cast h.symm (cast_symm hSpec)) st + return ⟨(Message.cast_idx h hSpec) ▸ msg, newSt⟩ + receiveChallenge := fun i st chal => + P.receiveChallenge (i.cast h.symm (cast_symm hSpec)) st ((Challenge.cast_idx h hSpec) ▸ chal) + output := P.output ∘ (fun st => _root_.cast (by simp) st) + +end Prover + /-! ### Casting Verifiers -/ -section Cast +namespace Verifier -variable {m n : ℕ} {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} -variable {ι : Type} {oSpec : OracleSpec ι} {StmtIn StmtOut : Type} +/-- Casting the verifier of a non-oracle reduction across an equality of `ProtocolSpec`s. -/-- To cast the verifier, we only need to cast the transcript. -/ -def Verifier.cast - (h : m = n) (hSpec : dcast h pSpec₁ = pSpec₂) - (V : Verifier pSpec₁ oSpec StmtIn StmtOut) : - Verifier pSpec₂ oSpec StmtIn StmtOut where +This boils down to casting the (full) transcript. -/ +protected def cast (V : Verifier oSpec StmtIn StmtOut pSpec₁) : + Verifier oSpec StmtIn StmtOut pSpec₂ where verify := fun stmt transcript => V.verify stmt (dcast₂ h.symm (dcast_symm h hSpec) transcript) @[simp] -def Verifier.cast_id - (V : Verifier pSpec₁ oSpec StmtIn StmtOut) : - V.cast rfl rfl = V := by +theorem cast_id : Verifier.cast rfl rfl = (id : Verifier oSpec StmtIn StmtOut pSpec₁ → _) := by ext; simp [Verifier.cast] -instance instDepCast₂Verifier : - DepCast₂ Nat ProtocolSpec (fun _ pSpec => Verifier pSpec oSpec StmtIn StmtOut) where +instance instDCast₂Verifier : + DCast₂ Nat ProtocolSpec (fun _ pSpec => Verifier oSpec StmtIn StmtOut pSpec) where + dcast₂ := Verifier.cast + dcast₂_id := by intros; funext; simp + +theorem cast_eq_dcast₂ {V : Verifier oSpec StmtIn StmtOut pSpec₁} : + V.cast h hSpec = dcast₂ h hSpec V := rfl + +end Verifier + +namespace Reduction + +/-- Casting the reduction of a non-oracle reduction across an equality of `ProtocolSpec`s, which + casts the underlying prover and verifier. -/ +protected def cast (R : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec₁) : + Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec₂ where + prover := R.prover.cast h hSpec + verifier := R.verifier.cast h hSpec + +@[simp] +def cast_id {R : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec₁} : + R.cast rfl rfl = R := by + ext : 1 <;> simp [Reduction.cast, Prover.cast, Verifier.cast] + rfl + +instance instDCast₂Verifier : + DCast₂ Nat ProtocolSpec (fun _ pSpec => Verifier oSpec StmtIn StmtOut pSpec) where dcast₂ := Verifier.cast dcast₂_id := by intros; funext; simp -end Cast +end Reduction diff --git a/ArkLib/OracleReduction/Composition/Sequential/Append.lean b/ArkLib/OracleReduction/Composition/Sequential/Append.lean index fe87fe7ea..844eaa6e6 100644 --- a/ArkLib/OracleReduction/Composition/Sequential/Append.lean +++ b/ArkLib/OracleReduction/Composition/Sequential/Append.lean @@ -5,7 +5,7 @@ Authors: Quang Dao -/ import ArkLib.OracleReduction.Composition.Sequential.ProtocolSpec -import ArkLib.OracleReduction.Security.Basic +import ArkLib.OracleReduction.Security.RoundByRound /-! # Sequential Composition of Two (Oracle) Reductions @@ -39,8 +39,8 @@ end find_home open ProtocolSpec -variable {m n : ℕ} {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} {ι : Type} [DecidableEq ι] - {oSpec : OracleSpec ι} {Stmt₁ Wit₁ Stmt₂ Wit₂ Stmt₃ Wit₃ : Type} +variable {ι : Type} {oSpec : OracleSpec ι} {Stmt₁ Wit₁ Stmt₂ Wit₂ Stmt₃ Wit₃ : Type} + {m n : ℕ} {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} section Instances @@ -124,9 +124,9 @@ This is defined by combining the two provers' private states and functions, with the last private state of the first prover is "merged" into the first private state of the second prover (via outputting the new statement and witness, and then inputting these into the second prover). -/ -def Prover.append (P₁ : Prover pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) - (P₂ : Prover pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃) : - Prover (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Wit₁ Stmt₃ Wit₃ where +def Prover.append (P₁ : Prover oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ pSpec₁) + (P₂ : Prover oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ pSpec₂) : + Prover oSpec Stmt₁ Wit₁ Stmt₃ Wit₃ (pSpec₁ ++ₚ pSpec₂) where /- The combined prover's states are the concatenation of the first prover's states, except the last one, and the second prover's states. -/ @@ -134,16 +134,16 @@ def Prover.append (P₁ : Prover pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) /- The combined prover's input function is the first prover's input function, except for when the first protocol is empty, in which case it is the second prover's input function -/ - input := fun stmt wit => by + input := fun ctxIn => by by_cases h : m > 0 · simp [Fin.append, Fin.addCases, Fin.init, Fin.castLT, h] - exact P₁.input stmt wit + exact P₁.input ctxIn · simp [Fin.append, Fin.addCases, h, Fin.subNat] exact ( - letI state := P₁.input stmt wit + letI state := P₁.input ctxIn haveI : 0 = Fin.last m := by aesop haveI state : P₁.PrvState (Fin.last m) := by simpa [this] using state - P₂.input.uncurry (P₁.output state)) + P₂.input (P₁.output state)) /- The combined prover sends messages according to the round index `i` as follows: - if `i < m - 1`, then it sends the message & updates the state as the first prover @@ -163,10 +163,10 @@ def Prover.append (P₁ : Prover pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) let ⟨msg, state⟩ ← P₁.sendMessage ⟨⟨i, hi⟩, h⟩ state haveI state : P₁.PrvState (Fin.last m) := by simpa only [Fin.last, Fin.succ_mk, this] using state - return ⟨msg, P₂.input.uncurry (P₁.output state)⟩) + return ⟨msg, P₂.input (P₁.output state)⟩) · haveI : ¬ i + 1 < m := by omega - simp [ProtocolSpec.append, Fin.append, Fin.addCases, Fin.init, hi, this, - Fin.cast, Fin.castLT, Fin.succ, Fin.castSucc] at h state ⊢ + simp [ProtocolSpec.append, Fin.append, Fin.addCases, hi, this, Fin.cast, Fin.castLT, Fin.succ, + Fin.castSucc] at h state ⊢ exact (do let ⟨msg, newState⟩ ← P₂.sendMessage ⟨⟨i - m, by omega⟩, h⟩ state haveI newState : P₂.PrvState ⟨i + 1 - m, by omega⟩ := by @@ -189,10 +189,10 @@ def Prover.append (P₁ : Prover pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) letI newState := P₁.receiveChallenge ⟨⟨i, hi⟩, h⟩ state chal haveI newState : P₁.PrvState (Fin.last m) := by simpa [Fin.last, this] using newState - P₂.input.uncurry (P₁.output newState)) + P₂.input (P₁.output newState)) · haveI : ¬ i + 1 < m := by omega - simp [ProtocolSpec.append, Fin.append, Fin.addCases, Fin.init, hi, this, - Fin.cast, Fin.castLT, Fin.succ, Fin.castSucc] at h state chal ⊢ + simp [ProtocolSpec.append, Fin.append, Fin.addCases, hi, this, Fin.cast, Fin.castLT, Fin.succ, + Fin.castSucc] at h state chal ⊢ exact ( letI newState := P₂.receiveChallenge ⟨⟨i - m, by omega⟩, h⟩ state chal haveI newState := by @@ -206,16 +206,16 @@ def Prover.append (P₁ : Prover pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) exact P₂.output state /-- Composition of verifiers. Return the conjunction of the decisions of the two verifiers. -/ -def Verifier.append (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) - (V₂ : Verifier pSpec₂ oSpec Stmt₂ Stmt₃) : - Verifier (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Stmt₃ where +def Verifier.append (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) + (V₂ : Verifier oSpec Stmt₂ Stmt₃ pSpec₂) : + Verifier oSpec Stmt₁ Stmt₃ (pSpec₁ ++ₚ pSpec₂) where verify := fun stmt transcript => do return ← V₂.verify (← V₁.verify stmt transcript.fst) transcript.snd /-- Composition of reductions boils down to composing the provers and verifiers. -/ -def Reduction.append (R₁ : Reduction pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) - (R₂ : Reduction pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃) : - Reduction (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Wit₁ Stmt₃ Wit₃ where +def Reduction.append (R₁ : Reduction oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ pSpec₁) + (R₂ : Reduction oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ pSpec₂) : + Reduction oSpec Stmt₁ Wit₁ Stmt₃ Wit₃ (pSpec₁ ++ₚ pSpec₂) where prover := Prover.append R₁.prover R₂.prover verifier := Verifier.append R₁.verifier R₂.verifier @@ -226,9 +226,9 @@ variable [Oₘ₁ : ∀ i, OracleInterface (pSpec₁.Message i)] {ιₛ₃ : Type} {OStmt₃ : ιₛ₃ → Type} [Oₛ₃ : ∀ i, OracleInterface (OStmt₃ i)] open Function Embedding in -def OracleVerifier.append (V₁ : OracleVerifier pSpec₁ oSpec Stmt₁ Stmt₂ OStmt₁ OStmt₂) - (V₂ : OracleVerifier pSpec₂ oSpec Stmt₂ Stmt₃ OStmt₂ OStmt₃) : - OracleVerifier (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Stmt₃ OStmt₁ OStmt₃ where +def OracleVerifier.append (V₁ : OracleVerifier oSpec Stmt₁ OStmt₁ Stmt₂ OStmt₂ pSpec₁) + (V₂ : OracleVerifier oSpec Stmt₂ OStmt₂ Stmt₃ OStmt₃ pSpec₂) : + OracleVerifier oSpec Stmt₁ OStmt₁ Stmt₃ OStmt₃ (pSpec₁ ++ₚ pSpec₂) where verify := fun stmt challenges => by -- First, invoke the first oracle verifier, handling queries as necessary have := V₁.verify stmt (fun chal => sorry) @@ -250,15 +250,24 @@ def OracleVerifier.append (V₁ : OracleVerifier pSpec₁ oSpec Stmt₁ Stmt₂ hEq := fun i => by rcases h : V₂.embed i with j | j - · rcases h' : V₁.embed j with k | k <;> - simp [h, h', V₁.hEq j, V₂.hEq i, MessageIdx.inl] - · simp [h, V₂.hEq i, MessageIdx.inr] + · rcases h' : V₁.embed j with k | k + · have h1 := V₁.hEq j + have h2 := V₂.hEq i + simp [h, h'] at h1 h2 ⊢ + exact h2.trans h1 + · have h1 := V₁.hEq j + have h2 := V₂.hEq i + simp [h, h', MessageIdx.inl] at h1 h2 ⊢ + exact h2.trans h1 + · have := V₂.hEq i + simp [h] at this ⊢ + simp [this, MessageIdx.inr] /-- Sequential composition of oracle reductions is just the sequential composition of the oracle provers and oracle verifiers. -/ -def OracleReduction.append (R₁ : OracleReduction pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ OStmt₁ OStmt₂) - (R₂ : OracleReduction pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ OStmt₂ OStmt₃) : - OracleReduction (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Wit₁ Stmt₃ Wit₃ OStmt₁ OStmt₃ where +def OracleReduction.append (R₁ : OracleReduction oSpec Stmt₁ OStmt₁ Wit₁ Stmt₂ OStmt₂ Wit₂ pSpec₁) + (R₂ : OracleReduction oSpec Stmt₂ OStmt₂ Wit₂ Stmt₃ OStmt₃ Wit₃ pSpec₂) : + OracleReduction oSpec Stmt₁ OStmt₁ Wit₁ Stmt₃ OStmt₃ Wit₃ (pSpec₁ ++ₚ pSpec₂) where prover := Prover.append R₁.prover R₂.prover verifier := OracleVerifier.append R₁.verifier R₂.verifier @@ -282,22 +291,22 @@ As such, the definitions below are temporary until further development. -/ TODO: state a monotone condition on the extractor, namely that if extraction succeeds on a given query log, then it also succeeds on any extension of that query log -/ -def StraightlineExtractor.append (E₁ : StraightlineExtractor pSpec₁ oSpec Stmt₁ Wit₁ Wit₂) - (E₂ : StraightlineExtractor pSpec₂ oSpec Stmt₂ Wit₂ Wit₃) - (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) : - StraightlineExtractor (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Wit₁ Wit₃ := - fun wit₃ stmt₁ transcript proveQueryLog verifyQueryLog => do +def Extractor.Straightline.append (E₁ : Extractor.Straightline oSpec Stmt₁ Wit₁ Wit₂ pSpec₁) + (E₂ : Extractor.Straightline oSpec Stmt₂ Wit₂ Wit₃ pSpec₂) + (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) : + Extractor.Straightline oSpec Stmt₁ Wit₁ Wit₃ (pSpec₁ ++ₚ pSpec₂) := + fun stmt₁ wit₃ transcript proveQueryLog verifyQueryLog => do let stmt₂ ← V₁.verify stmt₁ transcript.fst - let wit₂ ← E₂ wit₃ stmt₂ transcript.snd proveQueryLog verifyQueryLog - let wit₁ ← E₁ wit₂ stmt₁ transcript.fst proveQueryLog verifyQueryLog + let wit₂ ← E₂ stmt₂ wit₃ transcript.snd proveQueryLog verifyQueryLog + let wit₁ ← E₁ stmt₁ wit₂ transcript.fst proveQueryLog verifyQueryLog return wit₁ /-- The round-by-round extractor for the sequential composition of two (oracle) reductions The nice thing is we just extend the first extractor to the concatenated protocol. The intuition is that RBR extraction happens on the very first message, so further messages don't matter. -/ -def RBRExtractor.append (E₁ : RBRExtractor pSpec₁ oSpec Stmt₁ Wit₁) : - RBRExtractor (pSpec₁ ++ₚ pSpec₂) oSpec Stmt₁ Wit₁ := +def Extractor.RoundByRound.append (E₁ : Extractor.RoundByRound oSpec Stmt₁ Wit₁ pSpec₁) : + Extractor.RoundByRound oSpec Stmt₁ Wit₁ (pSpec₁ ++ₚ pSpec₂) := -- (TODO: describe `Transcript.fst` and `Transcript.snd`) fun roundIdx stmt₁ transcript proveQueryLog => E₁ ⟨min roundIdx m, by omega⟩ stmt₁ transcript.fst proveQueryLog @@ -308,14 +317,14 @@ example {a b : ℕ} (h : a < b) : min b a = a := by exact min_eq_right_of_lt h /-- The sequential composition of two state functions. -/ def StateFunction.append [oSpec.FiniteRange] - (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) - (V₂ : Verifier pSpec₂ oSpec Stmt₂ Stmt₃) - (S₁ : StateFunction pSpec₁ oSpec lang₁ lang₂ V₁) - (S₂ : StateFunction pSpec₂ oSpec lang₂ lang₃ V₂) + (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) + (V₂ : Verifier oSpec Stmt₂ Stmt₃ pSpec₂) + (S₁ : V₁.StateFunction lang₁ lang₂) + (S₂ : V₂.StateFunction lang₂ lang₃) -- Assume the first verifier is deterministic for now (verify : Stmt₁ → pSpec₁.FullTranscript → Stmt₂) (hVerify : V₁ = ⟨fun stmt tr => pure (verify stmt tr)⟩) : - StateFunction (pSpec₁ ++ₚ pSpec₂) oSpec lang₁ lang₃ (V₁.append V₂) where + (V₁.append V₂).StateFunction lang₁ lang₃ where toFun := fun roundIdx stmt₁ transcript => if h : roundIdx.val ≤ m then -- If the round index falls in the first protocol, then we simply invokes the first state fn @@ -348,13 +357,13 @@ witness `wit₁` behaves as expected: it first runs `P₁` to obtain an intermed to produce the final statement `stmt₃`, witness `wit₃`, and transcript `transcript₂`. The overall output is `stmt₃`, `wit₃`, and the combined transcript `transcript₁ ++ₜ transcript₂`. -/ -theorem Prover.append_run (P₁ : Prover pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) - (P₂ : Prover pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃) (stmt : Stmt₁) (wit : Wit₁) : +theorem Prover.append_run (P₁ : Prover oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ pSpec₁) + (P₂ : Prover oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ pSpec₂) (stmt : Stmt₁) (wit : Wit₁) : (P₁.append P₂).run stmt wit = (do - let ⟨stmt₂, wit₂, transcript₁⟩ ← liftM (P₁.run stmt wit) - let ⟨stmt₃, wit₃, transcript₂⟩ ← liftM (P₂.run stmt₂ wit₂) + let ⟨⟨stmt₂, wit₂⟩, transcript₁⟩ ← liftM (P₁.run stmt wit) + let ⟨⟨stmt₃, wit₃⟩, transcript₂⟩ ← liftM (P₂.run stmt₂ wit₂) -- TODO: should we refactor the prover to take in a running query log? - return ⟨stmt₃, wit₃, transcript₁ ++ₜ transcript₂⟩) := + return ⟨⟨stmt₃, wit₃⟩, transcript₁ ++ₜ transcript₂⟩) := sorry -- TODO: Need to define a function that "extracts" a second prover from the combined prover @@ -369,7 +378,7 @@ section Append variable {pSpec₁ : ProtocolSpec m} {pSpec₂ : ProtocolSpec n} [∀ i, Sampleable (pSpec₁.Challenge i)] [∀ i, Sampleable (pSpec₂.Challenge i)] {Stmt₁ Wit₁ Stmt₂ Wit₂ Stmt₃ Wit₃ : Type} - {rel₁ : Stmt₁ → Wit₁ → Prop} {rel₂ : Stmt₂ → Wit₂ → Prop} {rel₃ : Stmt₃ → Wit₃ → Prop} + {rel₁ : Set (Stmt₁ × Wit₁)} {rel₂ : Set (Stmt₂ × Wit₂)} {rel₃ : Set (Stmt₃ × Wit₃)} [oSpec.DecidableEq] [oSpec.FiniteRange] namespace Reduction @@ -380,8 +389,8 @@ namespace Reduction completeness with respect to `rel₁` and `rel₃`. The completeness error of the appended reduction is the sum of the individual errors (`completenessError₁ + completenessError₂`). -/ -theorem completeness_append (R₁ : Reduction pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) - (R₂ : Reduction pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃) +theorem completeness_append (R₁ : Reduction oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ pSpec₁) + (R₂ : Reduction oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ pSpec₂) {completenessError₁ completenessError₂ : ℝ≥0} (h₁ : R₁.completeness rel₁ rel₂ completenessError₁) (h₂ : R₂.completeness rel₂ rel₃ completenessError₂) : @@ -389,16 +398,16 @@ theorem completeness_append (R₁ : Reduction pSpec₁ oSpec Stmt₁ Wit₁ Stmt /-- If two reductions satisfy perfect completeness with compatible relations, then their concatenation also satisfies perfect completeness. -/ -theorem perfectCompleteness_append (R₁ : Reduction pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂) - (R₂ : Reduction pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃) +theorem perfectCompleteness_append (R₁ : Reduction oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ pSpec₁) + (R₂ : Reduction oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ pSpec₂) (h₁ : R₁.perfectCompleteness rel₁ rel₂) (h₂ : R₂.perfectCompleteness rel₂ rel₃) : (R₁.append R₂).perfectCompleteness rel₁ rel₃ := by dsimp [perfectCompleteness] at h₁ h₂ ⊢ convert Reduction.completeness_append R₁ R₂ h₁ h₂ simp only [add_zero] -variable {R₁ : Reduction pSpec₁ oSpec Stmt₁ Wit₁ Stmt₂ Wit₂} - {R₂ : Reduction pSpec₂ oSpec Stmt₂ Wit₂ Stmt₃ Wit₃} +variable {R₁ : Reduction oSpec Stmt₁ Wit₁ Stmt₂ Wit₂ pSpec₁} + {R₂ : Reduction oSpec Stmt₂ Wit₂ Stmt₃ Wit₃ pSpec₂} -- Synthesization issues... -- So maybe no synthesization but simp is fine? Maybe not... @@ -412,8 +421,8 @@ namespace Verifier /-- If two verifiers satisfy soundness with compatible languages and respective soundness errors, then their sequential composition also satisfies soundness. The soundness error of the appended verifier is the sum of the individual errors. -/ -theorem append_soundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) - (V₂ : Verifier pSpec₂ oSpec Stmt₂ Stmt₃) +theorem append_soundness (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) + (V₂ : Verifier oSpec Stmt₂ Stmt₃ pSpec₂) (langIn₁ : Set Stmt₁) (langOut₁ : Set Stmt₂) (langIn₂ : Set Stmt₂) (langOut₂ : Set Stmt₃) {soundnessError₁ soundnessError₂ : ℝ≥0} @@ -425,10 +434,10 @@ theorem append_soundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) /-- If two verifiers satisfy knowledge soundness with compatible relations and respective knowledge errors, then their sequential composition also satisfies knowledge soundness. The knowledge error of the appended verifier is the sum of the individual errors. -/ -theorem append_knowledgeSoundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) - (V₂ : Verifier pSpec₂ oSpec Stmt₂ Stmt₃) - (relIn₁ : Stmt₁ → Wit₁ → Prop) (relOut₁ : Stmt₂ → Wit₂ → Prop) - (relIn₂ : Stmt₂ → Wit₂ → Prop) (relOut₂ : Stmt₃ → Wit₃ → Prop) +theorem append_knowledgeSoundness (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) + (V₂ : Verifier oSpec Stmt₂ Stmt₃ pSpec₂) + (relIn₁ : Set (Stmt₁ × Wit₁)) (relOut₁ : Set (Stmt₂ × Wit₂)) + (relIn₂ : Set (Stmt₂ × Wit₂)) (relOut₂ : Set (Stmt₃ × Wit₃)) {knowledgeError₁ knowledgeError₂ : ℝ≥0} (h₁ : V₁.knowledgeSoundness relIn₁ relOut₁ knowledgeError₁) (h₂ : V₂.knowledgeSoundness relIn₂ relOut₂ knowledgeError₂) : @@ -438,8 +447,8 @@ theorem append_knowledgeSoundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt /-- If two verifiers satisfy round-by-round soundness with compatible languages and respective RBR soundness errors, then their sequential composition also satisfies round-by-round soundness. The RBR soundness error of the appended verifier extends the individual errors appropriately. -/ -theorem append_rbrSoundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) - (V₂ : Verifier pSpec₂ oSpec Stmt₂ Stmt₃) +theorem append_rbrSoundness (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) + (V₂ : Verifier oSpec Stmt₂ Stmt₃ pSpec₂) (langIn₁ : Set Stmt₁) (langOut₁ : Set Stmt₂) (langIn₂ : Set Stmt₂) (langOut₂ : Set Stmt₃) {rbrSoundnessError₁ : pSpec₁.ChallengeIdx → ℝ≥0} @@ -457,10 +466,10 @@ theorem append_rbrSoundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) respective RBR knowledge errors, then their sequential composition also satisfies round-by-round knowledge soundness. The RBR knowledge error of the appended verifier extends the individual errors appropriately. -/ -theorem append_rbrKnowledgeSoundness (V₁ : Verifier pSpec₁ oSpec Stmt₁ Stmt₂) - (V₂ : Verifier pSpec₂ oSpec Stmt₂ Stmt₃) - (relIn₁ : Stmt₁ → Wit₁ → Prop) (relOut₁ : Stmt₂ → Wit₂ → Prop) - (relIn₂ : Stmt₂ → Wit₂ → Prop) (relOut₂ : Stmt₃ → Wit₃ → Prop) +theorem append_rbrKnowledgeSoundness (V₁ : Verifier oSpec Stmt₁ Stmt₂ pSpec₁) + (V₂ : Verifier oSpec Stmt₂ Stmt₃ pSpec₂) + (relIn₁ : Set (Stmt₁ × Wit₁)) (relOut₁ : Set (Stmt₂ × Wit₂)) + (relIn₂ : Set (Stmt₂ × Wit₂)) (relOut₂ : Set (Stmt₃ × Wit₃)) {rbrKnowledgeError₁ : pSpec₁.ChallengeIdx → ℝ≥0} {rbrKnowledgeError₂ : pSpec₂.ChallengeIdx → ℝ≥0} (h₁ : V₁.rbrKnowledgeSoundness relIn₁ relOut₁ rbrKnowledgeError₁) diff --git a/ArkLib/OracleReduction/Composition/Sequential/General.lean b/ArkLib/OracleReduction/Composition/Sequential/General.lean index 5822872c1..5caf52872 100644 --- a/ArkLib/OracleReduction/Composition/Sequential/General.lean +++ b/ArkLib/OracleReduction/Composition/Sequential/General.lean @@ -18,115 +18,152 @@ import ArkLib.OracleReduction.Composition.Sequential.Append open ProtocolSpec -variable {ι : Type} [DecidableEq ι] {oSpec : OracleSpec ι} +universe u v + +variable {ι : Type} {oSpec : OracleSpec ι} {m : ℕ} section Instances -variable {m : ℕ} {n : Fin (m + 1) → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} +variable {n : Fin m → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} + +namespace ProtocolSpec + +/-- The equivalence between the challenge indices of the individual protocols and the challenge + indices of the sequential composition. -/ +def seqComposeChallengeEquiv : + (i : Fin m) × (pSpec i).ChallengeIdx ≃ (seqCompose pSpec).ChallengeIdx where + -- TODO: write lemmas about `finSigmaFinEquiv` in mathlib with the one defined via `Fin.dfoldl` + toFun := fun ⟨i, ⟨chalIdx, h⟩⟩ => ⟨finSigmaFinEquiv ⟨i, chalIdx⟩, by + unfold seqCompose; sorry⟩ + invFun := fun ⟨seqComposedChalIdx, h⟩ => + let ⟨i, chalIdx⟩ := finSigmaFinEquiv.symm seqComposedChalIdx + ⟨i, chalIdx, sorry⟩ + left_inv := by intro ⟨_, _⟩; simp; rw! [finSigmaFinEquiv.left_inv']; simp + right_inv := by intro ⟨_, _⟩; simp + +/-- The equivalence between the message indices of the individual protocols and the message + indices of the sequential composition. -/ +def seqComposeMessageEquiv : + (i : Fin m) × (pSpec i).MessageIdx ≃ (seqCompose pSpec).MessageIdx where + toFun := fun ⟨i, ⟨msgIdx, h⟩⟩ => ⟨finSigmaFinEquiv ⟨i, msgIdx⟩, by + unfold seqCompose; sorry⟩ + invFun := fun ⟨seqComposedMsgIdx, h⟩ => + let ⟨i, msgIdx⟩ := finSigmaFinEquiv.symm seqComposedMsgIdx + ⟨i, msgIdx, sorry⟩ + left_inv := by intro ⟨_, _⟩; simp; rw! [finSigmaFinEquiv.left_inv']; simp + right_inv := by intro ⟨_, _⟩; simp + +end ProtocolSpec /-- If all protocols have sampleable challenges, then the challenges of their sequential composition also have sampleable challenges. -/ -instance [h : ∀ i, ∀ j, Sampleable ((pSpec i).Challenge j)] : - ∀ j, Sampleable ((compose m n pSpec).Challenge j) := fun ⟨⟨i, isLt⟩, h⟩ => by - dsimp [ProtocolSpec.compose, Fin.append, Fin.addCases, Fin.castLT, Fin.subNat, Fin.cast] at h ⊢ - sorry - -- by_cases h' : i < m <;> simp [h'] at h ⊢ - -- · exact h₁ ⟨⟨i, by omega⟩, h⟩ - -- · exact h₂ ⟨⟨i - m, by omega⟩, h⟩ +instance [inst : ∀ i, ∀ j, Sampleable ((pSpec i).Challenge j)] : + ∀ j, Sampleable ((seqCompose pSpec).Challenge j) := fun combinedIdx => by + let combinedIdx' := seqComposeChallengeEquiv.symm combinedIdx + let this := inst combinedIdx'.1 combinedIdx'.2 + convert this using 1; sorry /-- If all protocols' messages have oracle interfaces, then the messages of their sequential composition also have oracle interfaces. -/ -instance [O : ∀ i, ∀ j, OracleInterface ((pSpec i).Message j)] : - ∀ i, OracleInterface ((compose m n pSpec).Message i) := fun ⟨⟨i, isLt⟩, h⟩ => by - dsimp [ProtocolSpec.compose, ProtocolSpec.getDir, Fin.append, Fin.addCases, - Fin.castLT, Fin.subNat, Fin.cast] at h ⊢ - sorry - -- by_cases h' : i < m <;> simp [h'] at h ⊢ - -- · exact O₁ ⟨⟨i, by omega⟩, h⟩ - -- · exact O₂ ⟨⟨i - m, by omega⟩, h⟩ - --- open OracleComp OracleSpec SubSpec - --- variable [∀ i, ∀ j, Sampleable ((pSpec i).Challenge j)] - --- instance : SubSpec (challengeOracle pSpec₁ ++ₒ challengeOracle pSpec₂) --- (challengeOracle (compose m n pSpec)) := sorry +instance [O : ∀ i, ∀ j, OracleInterface.{0, u, v} ((pSpec i).Message j)] : + ∀ i, OracleInterface.{0, u, v} ((seqCompose pSpec).Message i) := fun combinedIdx => by + let combinedIdx' := seqComposeMessageEquiv.symm combinedIdx + let this := O combinedIdx'.1 combinedIdx'.2 + convert this using 1; sorry end Instances section Composition -variable {m : ℕ} {n : Fin (m + 1) → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} +variable {Stmt : Fin (m + 1) → Type} {Wit : Fin (m + 1) → Type} + {n : Fin m → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} -def Prover.compose (m : ℕ) (n : Fin (m + 1) → ℕ) (pSpec : ∀ i, ProtocolSpec (n i)) - (Stmt : Fin (m + 2) → Type) (Wit : Fin (m + 2) → Type) - (P : (i : Fin (m + 1)) → Prover (pSpec i) oSpec (Stmt i.castSucc) (Wit i.castSucc) - (Stmt i.succ) (Wit i.succ)) : - Prover (ProtocolSpec.compose m n pSpec) oSpec (Stmt 0) (Wit 0) (Stmt (Fin.last (m + 1))) - (Wit (Fin.last (m + 1))) := +def Prover.seqCompose + (P : (i : Fin m) → + Prover oSpec (Stmt i.castSucc) (Wit i.castSucc) (Stmt i.succ) (Wit i.succ) (pSpec i)) : + Prover oSpec (Stmt 0) (Wit 0) (Stmt (Fin.last m)) (Wit (Fin.last m)) (seqCompose pSpec) := Fin.dfoldl m (fun i => Prover - (ProtocolSpec.compose i (Fin.take (i + 1) (by omega) n) (Fin.take (i + 1) (by omega) pSpec)) - oSpec (Stmt 0) (Wit 0) (Stmt i.succ) (Wit i.succ)) + oSpec (Stmt 0) (Wit 0) (Stmt i) (Wit i) + (ProtocolSpec.seqCompose (Fin.take i (by omega) pSpec))) (fun i Pacc => by - convert Prover.append Pacc (P i.succ) + -- TODO: cast the prover with dependent cast + convert Prover.append Pacc (P i) · simp [Fin.sum_univ_castSucc, Fin.last, Fin.succ] - · simp [ProtocolSpec.compose_append, dcast_eq_root_cast]) - (by simpa using P 0) + · simp [ProtocolSpec.seqCompose_append, dcast_eq_root_cast]) + (Prover.id) -def Verifier.compose (m : ℕ) (n : Fin (m + 1) → ℕ) (pSpec : ∀ i, ProtocolSpec (n i)) - (Stmt : Fin (m + 2) → Type) - (V : (i : Fin (m + 1)) → Verifier (pSpec i) oSpec (Stmt i.castSucc) (Stmt i.succ)) : - Verifier (ProtocolSpec.compose m n pSpec) oSpec (Stmt 0) (Stmt (Fin.last (m + 1))) := +def Verifier.seqCompose + (V : (i : Fin m) → + Verifier oSpec (Stmt i.castSucc) (Stmt i.succ) (pSpec i)) : + Verifier oSpec (Stmt 0) (Stmt (Fin.last m)) (seqCompose pSpec) := Fin.dfoldl m - (fun i => Verifier - (ProtocolSpec.compose i (Fin.take (i + 1) (by omega) n) (Fin.take (i + 1) (by omega) pSpec)) - oSpec (Stmt 0) (Stmt i.succ)) + (fun i => Verifier oSpec (Stmt 0) (Stmt i) + (ProtocolSpec.seqCompose (Fin.take i (by omega) pSpec))) (fun i Vacc => by - refine dcast₂ (self := instDepCast₂Verifier) ?_ ?_ (Vacc.append (V i.succ)) + refine dcast₂ (self := instDCast₂Verifier) ?_ ?_ (Vacc.append (V i)) · simp [Fin.sum_univ_castSucc, Fin.last, Fin.succ] - · simp [ProtocolSpec.compose_append, dcast_eq_root_cast]) - (by simpa using V 0) - -def Reduction.compose (m : ℕ) (n : Fin (m + 1) → ℕ) (pSpec : ∀ i, ProtocolSpec (n i)) - (Stmt : Fin (m + 2) → Type) (Wit : Fin (m + 2) → Type) - (R : (i : Fin (m + 1)) → Reduction (pSpec i) oSpec (Stmt i.castSucc) (Wit i.castSucc) - (Stmt i.succ) (Wit i.succ)) : - Reduction (ProtocolSpec.compose m n pSpec) oSpec (Stmt 0) (Wit 0) (Stmt (Fin.last (m + 1))) - (Wit (Fin.last (m + 1))) := + · simp [seqCompose_append, dcast_eq_root_cast]) + (Verifier.id) + +def Reduction.seqCompose + (R : (i : Fin m) → + Reduction oSpec (Stmt i.castSucc) (Wit i.castSucc) (Stmt i.succ) (Wit i.succ) (pSpec i)) : + Reduction oSpec (Stmt 0) (Wit 0) (Stmt (Fin.last m)) (Wit (Fin.last m)) (seqCompose pSpec) := Fin.dfoldl m - (fun i => Reduction - (ProtocolSpec.compose i (Fin.take (i + 1) (by omega) n) (Fin.take (i + 1) (by omega) pSpec)) - oSpec (Stmt 0) (Wit 0) (Stmt i.succ) (Wit i.succ)) + (fun i => Reduction oSpec (Stmt 0) (Wit 0) (Stmt i) (Wit i) + (ProtocolSpec.seqCompose (Fin.take i (by omega) pSpec))) (fun i Racc => by - convert Reduction.append Racc (R i.succ) + convert Reduction.append Racc (R i) · simp [Fin.sum_univ_castSucc, Fin.last, Fin.succ] - · simp [ProtocolSpec.compose_append, dcast_eq_root_cast]) - (by simpa using R 0) + · simp [seqCompose_append, dcast_eq_root_cast]) + (Reduction.id) + +-- TODO: define the same for `Oracle Prover/Verifier/Reduction`, and extractors and state functions end Composition +variable {m : ℕ} + {Stmt : Fin (m + 1) → Type} {Wit : Fin (m + 1) → Type} {rel : ∀ i, Set (Stmt i × Wit i)} + {n : Fin m → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} + [oSpec.FiniteRange] + [∀ i, ∀ j, Sampleable ((pSpec i).Challenge j)] + +section Lemmas + +-- TODO: `compose_append` for everything, saying that sequential composition for `i + 1` times +-- equals the sequential composition for `i` times followed by appending the `i + 1`-th one + +end Lemmas + +-- section Execution + +-- -- Executing . +-- theorem Reduction.run_seqCompose +-- (stmt : Stmt 0) (wit : Wit 0) +-- (R : ∀ i, Reduction oSpec (Stmt i.castSucc) (Wit i.castSucc) (Stmt i.succ) (Wit i.succ) +-- (pSpec i)) : +-- (Reduction.seqCompose R).run stmt wit := by +-- sorry + +-- end Execution + section Security open scoped NNReal namespace Reduction -variable {m : ℕ} {n : Fin (m + 1) → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} - [∀ i, ∀ j, Sampleable ((pSpec i).Challenge j)] - {Stmt : Fin (m + 2) → Type} {Wit : Fin (m + 2) → Type} {rel : ∀ i, Stmt i → Wit i → Prop} - [oSpec.DecidableEq] [oSpec.FiniteRange] - -theorem completeness_compose - (R : ∀ i, Reduction (pSpec i) oSpec (Stmt i.castSucc) (Wit i.castSucc) - (Stmt i.succ) (Wit i.succ)) - (completenessError : Fin (m + 1) → ℝ≥0) +theorem completeness_seqCompose + (completenessError : Fin m → ℝ≥0) + (R : ∀ i, Reduction oSpec (Stmt i.castSucc) (Wit i.castSucc) (Stmt i.succ) (Wit i.succ) + (pSpec i)) (h : ∀ i, (R i).completeness (rel i.castSucc) (rel i.succ) (completenessError i)) : - (Reduction.compose m n pSpec Stmt Wit R).completeness (rel 0) (rel (Fin.last (m + 1))) + (Reduction.seqCompose R).completeness (rel 0) (rel (Fin.last m)) (∑ i, completenessError i) := by induction m with | zero => - simp_all; convert h 0; sorry + simp_all [seqCompose]; sorry | succ m ih => sorry @@ -134,62 +171,61 @@ end Reduction namespace Verifier -variable {m : ℕ} {n : Fin (m + 1) → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} - [∀ i, ∀ j, Sampleable ((pSpec i).Challenge j)] - {Stmt : Fin (m + 2) → Type} {Wit : Fin (m + 2) → Type} - [oSpec.DecidableEq] [oSpec.FiniteRange] - /-- If all verifiers in a sequence satisfy soundness with respective soundness errors, then their sequential composition also satisfies soundness. - The soundness error of the composed verifier is the sum of the individual errors. -/ -theorem compose_soundness - (V : ∀ i, Verifier (pSpec i) oSpec (Stmt i.castSucc) (Stmt i.succ)) + The soundness error of the seqComposed verifier is the sum of the individual errors. -/ +theorem seqCompose_soundness (lang : ∀ i, Set (Stmt i)) - (soundnessError : Fin (m + 1) → ℝ≥0) + (soundnessError : Fin m → ℝ≥0) + (V : ∀ i, Verifier oSpec (Stmt i.castSucc) (Stmt i.succ) (pSpec i)) (h : ∀ i, (V i).soundness (lang i.castSucc) (lang i.succ) (soundnessError i)) : - (Verifier.compose m n pSpec Stmt V).soundness (lang 0) (lang (Fin.last (m + 1))) + (Verifier.seqCompose V).soundness (lang 0) (lang (Fin.last m)) (∑ i, soundnessError i) := by sorry /-- If all verifiers in a sequence satisfy knowledge soundness with respective knowledge errors, then their sequential composition also satisfies knowledge soundness. - The knowledge error of the composed verifier is the sum of the individual errors. -/ -theorem compose_knowledgeSoundness - (V : ∀ i, Verifier (pSpec i) oSpec (Stmt i.castSucc) (Stmt i.succ)) - (rel : ∀ i, Stmt i → Wit i → Prop) - (knowledgeError : Fin (m + 1) → ℝ≥0) + The knowledge error of the seqComposed verifier is the sum of the individual errors. -/ +theorem seqCompose_knowledgeSoundness + (rel : ∀ i, Set (Stmt i × Wit i)) + (knowledgeError : Fin m → ℝ≥0) + (V : ∀ i, Verifier oSpec (Stmt i.castSucc) (Stmt i.succ) (pSpec i)) (h : ∀ i, (V i).knowledgeSoundness (rel i.castSucc) (rel i.succ) (knowledgeError i)) : - (Verifier.compose m n pSpec Stmt V).knowledgeSoundness (rel 0) (rel (Fin.last (m + 1))) + (Verifier.seqCompose V).knowledgeSoundness (rel 0) (rel (Fin.last m)) (∑ i, knowledgeError i) := by sorry /-- If all verifiers in a sequence satisfy round-by-round soundness with respective RBR soundness errors, then their sequential composition also satisfies round-by-round soundness. -/ -theorem compose_rbrSoundness - (V : ∀ i, Verifier (pSpec i) oSpec (Stmt i.castSucc) (Stmt i.succ)) +theorem seqCompose_rbrSoundness + (V : ∀ i, Verifier oSpec (Stmt i.castSucc) (Stmt i.succ) (pSpec i)) (lang : ∀ i, Set (Stmt i)) (rbrSoundnessError : ∀ i, (pSpec i).ChallengeIdx → ℝ≥0) (h : ∀ i, (V i).rbrSoundness (lang i.castSucc) (lang i.succ) (rbrSoundnessError i)) -- Deterministic verifier condition for state function composition (verify : ∀ i, Stmt i.castSucc → (pSpec i).FullTranscript → Stmt i.succ) (hVerify : ∀ i, V i = ⟨fun stmt tr => pure (verify i stmt tr)⟩) : - (Verifier.compose m n pSpec Stmt V).rbrSoundness (lang 0) (lang (Fin.last (m + 1))) - (fun _ => sorry) := by -- TODO: figure out the right way to compose RBR errors + (Verifier.seqCompose V).rbrSoundness (lang 0) (lang (Fin.last m)) + (fun combinedIdx => + let ⟨i, idx⟩ := seqComposeChallengeEquiv.symm combinedIdx + rbrSoundnessError i idx) := by sorry /-- If all verifiers in a sequence satisfy round-by-round knowledge soundness with respective RBR knowledge errors, then their sequential composition also satisfies round-by-round knowledge soundness. -/ -theorem compose_rbrKnowledgeSoundness - (V : ∀ i, Verifier (pSpec i) oSpec (Stmt i.castSucc) (Stmt i.succ)) - (rel : ∀ i, Stmt i → Wit i → Prop) +theorem seqCompose_rbrKnowledgeSoundness + (V : ∀ i, Verifier oSpec (Stmt i.castSucc) (Stmt i.succ) (pSpec i)) + (rel : ∀ i, Set (Stmt i × Wit i)) (rbrKnowledgeError : ∀ i, (pSpec i).ChallengeIdx → ℝ≥0) (h : ∀ i, (V i).rbrKnowledgeSoundness (rel i.castSucc) (rel i.succ) (rbrKnowledgeError i)) -- Deterministic verifier condition for state function composition (verify : ∀ i, Stmt i.castSucc → (pSpec i).FullTranscript → Stmt i.succ) (hVerify : ∀ i, V i = ⟨fun stmt tr => pure (verify i stmt tr)⟩) : - (Verifier.compose m n pSpec Stmt V).rbrKnowledgeSoundness (rel 0) (rel (Fin.last (m + 1))) - (fun _ => sorry) := by -- TODO: figure out the right way to compose RBR errors + (Verifier.seqCompose V).rbrKnowledgeSoundness (rel 0) (rel (Fin.last m)) + (fun combinedIdx => + let ⟨i, idx⟩ := seqComposeChallengeEquiv.symm combinedIdx + rbrKnowledgeError i idx) := by sorry end Verifier diff --git a/ArkLib/OracleReduction/Composition/Sequential/ProtocolSpec.lean b/ArkLib/OracleReduction/Composition/Sequential/ProtocolSpec.lean index 7888a2eba..998eb7aec 100644 --- a/ArkLib/OracleReduction/Composition/Sequential/ProtocolSpec.lean +++ b/ArkLib/OracleReduction/Composition/Sequential/ProtocolSpec.lean @@ -4,8 +4,8 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import ArkLib.Data.Fin.Basic import ArkLib.ToMathlib.BigOperators.Fin +import ArkLib.OracleReduction.ProtocolSpec import ArkLib.OracleReduction.Cast /-! # Sequential Composition of Protocol Specifications @@ -15,32 +15,6 @@ their associated data. -/ namespace ProtocolSpec -/-! ### Restriction of Protocol Specifications & Transcripts -/ - -section Restrict - -variable {n : ℕ} - -/-- Take the first `m ≤ n` rounds of a `ProtocolSpec n` -/ -abbrev take (m : ℕ) (h : m ≤ n) (pSpec : ProtocolSpec n) := Fin.take m h pSpec - -/-- Take the last `m ≤ n` rounds of a `ProtocolSpec n` -/ -abbrev rtake (m : ℕ) (h : m ≤ n) (pSpec : ProtocolSpec n) := Fin.rtake m h pSpec - -/-- Take the first `m ≤ n` rounds of a (full) transcript for a protocol specification `pSpec` -/ -abbrev FullTranscript.take {pSpec : ProtocolSpec n} (m : ℕ) (h : m ≤ n) - (transcript : FullTranscript pSpec) : FullTranscript (pSpec.take m h) := - Fin.take m h transcript - -/-- Take the last `m ≤ n` rounds of a (full) transcript for a protocol specification `pSpec` -/ -abbrev FullTranscript.rtake {pSpec : ProtocolSpec n} (m : ℕ) (h : m ≤ n) - (transcript : FullTranscript pSpec) : FullTranscript (pSpec.rtake m h) := - Fin.rtake m h transcript - -end Restrict - - - /-! ### Composition of Two Protocol Specifications -/ variable {m n : ℕ} @@ -154,7 +128,8 @@ def snoc {pSpec : ProtocolSpec n} {NextMessage : Type} -- theorem append_cast_left {n m : ℕ} {pSpec₁ pSpec₂ : ProtocolSpec n} {pSpec' : ProtocolSpec m} -- {T₁ : FullTranscript pSpec₁} {T₂ : FullTranscript pSpec'} (n' : ℕ) -- (h : n + m = n' + m) (hSpec : dcast h pSpec₁ = pSpec₂) : --- dcast₂ h (by simp) (T₁ ++ₜ T₂) = (dcast₂ (Nat.add_right_cancel h) (by simp) T₁) ++ₜ T₂ := by +-- dcast₂ h (by simp) (T₁ ++ₜ T₂) = (dcast₂ (Nat.add_right_cancel h) (by simp) T₁) ++ₜ T₂ := +-- by -- simp [append, dcast₂, ProtocolSpec.cast, Fin.append_cast_left] -- @[simp] @@ -169,18 +144,20 @@ theorem take_append_left (T : FullTranscript pSpec₁) (T' : FullTranscript pSpe T.cast rfl (by simp [ProtocolSpec.append]) := by ext i simp [take, append, ProtocolSpec.append, Fin.castLE, Fin.addCases', Fin.addCases, - FullTranscript.cast] + FullTranscript.cast, Transcript.cast] @[simp] theorem rtake_append_right (T : FullTranscript pSpec₁) (T' : FullTranscript pSpec₂) : (T ++ₜ T').rtake n (Nat.le_add_left n m) = T'.cast rfl (by simp [ProtocolSpec.append]) := by ext i - simp [rtake, append, ProtocolSpec.append, Fin.castLE, Fin.addCases', Fin.addCases, - Fin.natAdd, Fin.subNat, FullTranscript.cast] + simp only [ProtocolSpec.append, getType, Fin.rtake_apply, Fin.natAdd, Fin.cast_mk, rtake, append, + Fin.addCases', Fin.addCases, add_tsub_cancel_right, add_lt_iff_neg_left, not_lt_zero', + ↓reduceDIte, Fin.subNat_mk, Fin.natAdd_mk, eq_mpr_eq_cast, eq_mp_eq_cast, FullTranscript.cast, + Transcript.cast, Fin.val_last, Fin.cast_eq_self, Fin.castLE_refl] have : m + n - n + i.val - m = i.val := by omega rw! (castMode := .all) [this] - simp [_root_.cast_eq_cast_iff, eqRec_eq_cast] + simp [eqRec_eq_cast] /-- The first half of a transcript for a concatenated protocol -/ def fst (T : FullTranscript (pSpec₁ ++ₚ pSpec₂)) : FullTranscript pSpec₁ := @@ -224,7 +201,7 @@ def MessageIdx.sumEquiv : exact Sum.inr ⟨⟨i - m, by omega⟩, h⟩ left_inv := fun i => by rcases i with ⟨⟨i, isLt⟩, h⟩ | ⟨⟨i, isLt⟩, h⟩ <;> - simp [MessageIdx.inl, MessageIdx.inr, h, isLt] + simp [MessageIdx.inl, MessageIdx.inr, isLt] right_inv := fun ⟨i, h⟩ => by by_cases hi : i < m <;> simp [MessageIdx.inl, MessageIdx.inr, hi] @@ -248,82 +225,67 @@ def ChallengeIdx.sumEquiv : exact Sum.inr ⟨⟨i - m, by omega⟩, h⟩ left_inv := fun i => by rcases i with ⟨⟨i, isLt⟩, h⟩ | ⟨⟨i, isLt⟩, h⟩ <;> - simp [ChallengeIdx.inl, ChallengeIdx.inr, h, isLt] + simp [ChallengeIdx.inl, ChallengeIdx.inr, isLt] right_inv := fun ⟨i, h⟩ => by by_cases hi : i < m <;> simp [ChallengeIdx.inl, ChallengeIdx.inr, hi] congr; omega -/-- Composition of a family of `ProtocolSpec`s, indexed by `i : Fin (m + 1)`. +/-- Sequential composition of a family of `ProtocolSpec`s, indexed by `i : Fin m`. + +Defined by (dependent) folding over `Fin`, starting from the empty protocol specification `![]`. -Defined by (dependent) folding over `Fin`. -/ -def compose (m : ℕ) (n : Fin (m + 1) → ℕ) (pSpec : ∀ i, ProtocolSpec (n i)) : +TODO: add notation `∑ i, pSpec i` for `seqCompose` -/ +def seqCompose {m : ℕ} {n : Fin m → ℕ} (pSpec : ∀ i, ProtocolSpec (n i)) : ProtocolSpec (∑ i, n i) := - dcast (by have : Fin.last m = ⊤ := rfl; rw [this, Finset.Iic_top]) - (Fin.dfoldl m (fun i => ProtocolSpec (∑ j ≤ i, n j)) - (fun i acc => dcast (Fin.sum_Iic_succ i).symm (acc ++ₚ pSpec i.succ)) - (dcast (Fin.sum_Iic_zero).symm (pSpec 0))) + Fin.dfoldl m (fun i => ProtocolSpec (∑ j : Fin i, n (j.castLE (by omega)))) + (fun i acc => dcast (by simp [Fin.sum_univ_castSucc, Fin.castLE]) (acc ++ₚ pSpec i)) + ![] @[simp] -theorem compose_zero {n : ℕ} {pSpec : ProtocolSpec n} : - compose 0 (fun _ => n) (fun _ => pSpec) = pSpec := by - simp [compose] +theorem seqCompose_zero {n : ℕ} {pSpec : ProtocolSpec n} : + seqCompose (fun _ : Fin 0=> pSpec) = ![] := by + simp [seqCompose] -/-- Composition for `i + 1` equals composition for `i` appended with the `i + 1`-th `ProtocolSpec` +/-- Sequential composition of `i + 1` protocol specifications equals the sequential composition of + `i` first protocol specifications appended with the `i + 1`-th protocol specification. -/ -theorem compose_append {m : ℕ} {n : Fin (m + 1) → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} (i : Fin m) : - compose (i + 1) - (Fin.take (i + 1 + 1) (by omega) n) - (Fin.take (i + 1 + 1) (by omega) pSpec) - = dcast (by simp [Fin.sum_univ_castSucc]; congr) - (compose i - (Fin.take (i + 1) (by omega) n) - (Fin.take (i + 1) (by omega) pSpec) - ++ₚ pSpec i.succ) := by - simp only [id_eq, Fin.take_apply, compose, dcast_eq, - Fin.dfoldl_succ_last, Fin.succ_last, Nat.succ_eq_add_one, Function.comp_apply, dcast_trans] - unfold Function.comp Fin.castSucc Fin.castAdd Fin.castLE Fin.last Fin.succ - simp only [Fin.val_zero, Fin.zero_eta] - rw [dcast_eq_dcast_iff, append_cast_left, dcast_trans] - refine congrArg (· ++ₚ pSpec i.succ) ?_ - rw [dcast_eq_root_cast] - refine Fin.dfoldl_congr_dcast ?_ ?_ ?_ - · intro j; simp [Fin.sum_Iic_eq_univ] - · intro j a; simp [dcast_eq_dcast_iff, append_cast_left] - · simp +theorem seqCompose_append {m : ℕ} {n : Fin m → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} (i : Fin m) : + seqCompose (Fin.take (i + 1) (by omega) pSpec) + = dcast (by simp [Fin.sum_univ_castSucc, Fin.castLE]) + (seqCompose (Fin.take i (by omega) pSpec) ++ₚ pSpec i) := by + simp only [seqCompose, Fin.coe_castSucc, Fin.val_succ, Fin.take_apply, Fin.dfoldl_succ_last, + Fin.val_last, Fin.castLE_refl, Function.comp_apply, Fin.castLE_castSucc] + unfold Function.comp Fin.castSucc Fin.castAdd Fin.castLE Fin.last + simp namespace FullTranscript -/-- Composition of a family of `FullTranscript`s, indexed by `i : Fin (m + 1)`. -/ -def compose (m : ℕ) (n : Fin (m + 1) → ℕ) (pSpec : ∀ i, ProtocolSpec (n i)) - (T : ∀ i, FullTranscript (pSpec i)) : FullTranscript (compose m n pSpec) := - dcast₂ (by simp) (by simp; congr) - (Fin.dfoldl m - (fun i => FullTranscript (ProtocolSpec.compose i - (Fin.take (i + 1) (by omega) n) (Fin.take (i + 1) (by omega) pSpec))) - (fun i acc => by - refine dcast₂ ?_ ?_ (acc ++ₜ (T i.succ)) - · simp [Fin.take]; rw (occs := [2]) [Fin.sum_univ_castSucc]; congr - · simp [compose_append]) - (dcast₂ (by simp) (by congr) (T 0))) +/-- Sequential composition of a family of `FullTranscript`s, indexed by `i : Fin m`. + +TODO: add notation `∑ i, T i` for `seqCompose` -/ +def seqCompose {m : ℕ} {n : Fin m → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} + (T : ∀ i, FullTranscript (pSpec i)) : FullTranscript (seqCompose pSpec) := + Fin.dfoldl m + (fun i => FullTranscript (ProtocolSpec.seqCompose (Fin.take i (by omega) pSpec))) + (fun i acc => by + refine dcast₂ ?_ ?_ (acc ++ₜ (T i)) + · simp [Fin.sum_univ_castSucc, Fin.castLE] + · simp [ProtocolSpec.seqCompose_append]) + (fun i => Fin.elim0 i) @[simp] -theorem compose_zero {n : ℕ} {pSpec : ProtocolSpec n} {transcript : pSpec.FullTranscript} : - compose 0 (fun _ => n) (fun _ => pSpec) (fun _ => transcript) = transcript := rfl +theorem seqCompose_zero {n : ℕ} {pSpec : ProtocolSpec n} {transcript : pSpec.FullTranscript} : + seqCompose (fun _ : Fin 0 => transcript) = (fun i => Fin.elim0 i) := rfl -theorem compose_append {m : ℕ} {n : Fin (m + 1) → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} +/-- Sequential composition of `i + 1` transcripts equals the sequential composition of `i` first + transcripts appended with the `i + 1`-th transcript. -/ +theorem seqCompose_append {m : ℕ} {n : Fin m → ℕ} {pSpec : ∀ i, ProtocolSpec (n i)} {T : ∀ i, FullTranscript (pSpec i)} (i : Fin m) : - compose (i + 1) - (Fin.take (i + 1 + 1) (by omega) n) - (Fin.take (i + 1 + 1) (by omega) pSpec) - (Fin.take (i + 1 + 1) (by omega) T) - = dcast₂ (by simp [Fin.sum_univ_castSucc]; rfl) (by simp [compose_append]) - (compose i - (Fin.take (i + 1) (by omega) n) - (Fin.take (i + 1) (by omega) pSpec) - (Fin.take (i + 1) (by omega) T) - ++ₜ T i.succ) := by - simp [compose, append_cast_left] + seqCompose (Fin.take (i + 1) (by omega) T) + = dcast₂ (by simp [Fin.sum_univ_castSucc, Fin.castLE]) (by sorry) + (seqCompose (Fin.take i (by omega) T) ++ₜ T i) := by + simp [seqCompose] sorry end FullTranscript diff --git a/ArkLib/OracleReduction/Equiv.lean b/ArkLib/OracleReduction/Equiv.lean index d3b98c205..a66a8d105 100644 --- a/ArkLib/OracleReduction/Equiv.lean +++ b/ArkLib/OracleReduction/Equiv.lean @@ -1,5 +1,7 @@ + + import ArkLib.OracleReduction.Security.Basic -import ArkLib.OracleReduction.LiftContext.Basic +import ArkLib.OracleReduction.LiftContext.OracleReduction /-! # Equivalence / Isomorphism of Oracle Reductions @@ -44,91 +46,94 @@ import ArkLib.OracleReduction.LiftContext.Basic -/ --- First, what does it mean for two oracle computations to be equivalent? +section Relation -namespace OracleComp +variable {Stmt Wit Stmt' Wit' : Type} -open OracleSpec +def Relation.equiv (f : Stmt ≃ Stmt') (g : Wit ≃ Wit') + (R : Stmt → Wit → Prop) (R' : Stmt' → Wit' → Prop) : Prop := + ∀ stmt : Stmt, ∀ wit : Wit, R stmt wit ↔ R' (f stmt) (g wit) -variable {ι ιₜ : Type} {spec : OracleSpec ι} {specₜ : OracleSpec ιₜ} {α σ : Type} [spec.FiniteRange] +theorem Relation.equiv_symm (f : Stmt ≃ Stmt') (g : Wit ≃ Wit') + (R : Stmt → Wit → Prop) (R' : Stmt' → Wit' → Prop) : + Relation.equiv f g R R' ↔ Relation.equiv f.symm g.symm R' R := by + simp [Relation.equiv] + constructor + · intro h + intro stmt' wit' + simpa using (h (f.symm stmt') (g.symm wit')).symm + · intro h + intro stmt wit + simpa using (h (f stmt) (g wit)).symm -#check OracleComp.PolyQueries +end Relation -def oa1 : ProbComp (ℕ × ℕ) := do - let x ← query (spec := unifSpec) 0 () - let y ← query (spec := unifSpec) 1 () - return (x, y) +namespace ProtocolSpec -def oa2 : ProbComp (ℕ × ℕ) := do - let y ← query (spec := unifSpec) 1 () - let x ← query (spec := unifSpec) 0 () - return (x, y) +#check Equiv.instEquivLike -theorem oa1_eq_oa2 : oa1 = oa2 := by - simp [oa1, oa2] - sorry +/-- Two protocol specifications are equivalent if they have the same number of rounds, same + direction for each round, and an equivalence of types for each round. -/ +@[ext] +structure Equiv {m n : ℕ} (pSpec : ProtocolSpec m) (pSpec' : ProtocolSpec n) where + round_eq : m = n + dir_eq : ∀ i, (pSpec i).1 = (pSpec' (Fin.cast round_eq i)).1 + typeEquiv : ∀ i, (pSpec i).2 ≃ (pSpec' (Fin.cast round_eq i)).2 -@[simp] -theorem unifSpec_range (n : ℕ) : unifSpec.range n = Fin (n + 1) := rfl +namespace Equiv -def distEquiv (oa ob : OracleComp spec α) : Prop := - evalDist oa = evalDist ob +-- Note: this is not quite an `EquivLike` since `pSpec`s are terms, not types -theorem oa1_distEquiv_oa2 : distEquiv oa1 oa2 := by - simp [oa1, oa2, distEquiv, OptionT.lift, OptionT.mk, evalDist, liftM, monadLift, simulateQ, - Option.getM, Option.elimM, OptionT.run, MonadLift.monadLift, lift] - ext ⟨i, j⟩ - sorry - -- rcases i with none | ⟨x, y⟩ - -- · simp [tsum_eq_sum (s := Finset.univ) (by simp)] - -- simp [OptionT.run, OptionT.lift, OptionT.mk, Functor.map, Option.elimM] - -- sorry - -- · simp [tsum_eq_sum (s := Finset.univ) (by simp)] - -- simp [OptionT.run, OptionT.lift, OptionT.mk, Functor.map] - -- -- simp [tsum_eq_sum (s := Finset.univ) (by simp)] - -- -- cases on x and y - -- sorry +variable {m n k : ℕ} {pSpec : ProtocolSpec m} {pSpec' : ProtocolSpec n} {pSpec'' : ProtocolSpec k} -open SimOracle in -def obsEquiv (oa ob : OracleComp spec α) : Prop := - ∀ f : (i : ι) → spec.domain i → spec.range i, - simulateQ (fnOracle spec f) oa = simulateQ (fnOracle spec f) ob +@[simps] +def refl (pSpec : ProtocolSpec n) : Equiv pSpec pSpec where + round_eq := rfl + dir_eq := fun _ => rfl + typeEquiv := fun _ => _root_.Equiv.refl _ --- Note: observational equivalence does not imply distributional equivalence, since the distribution --- of each new query is independently random +def symm (eqv : Equiv pSpec pSpec') : Equiv pSpec' pSpec where + round_eq := eqv.round_eq.symm + dir_eq := fun i => by simp [eqv.dir_eq] + typeEquiv := fun i => (eqv.typeEquiv (Fin.cast (eqv.round_eq.symm) i)).symm -theorem oa1_obsEquiv_oa2 : obsEquiv oa1 oa2 := by - simp only [obsEquiv, unifSpec_range, oa1, Nat.reduceAdd, Fin.val_eq_zero, bind_pure_comp, - simulateQ_bind, simulateQ_query, simulateQ_map, oa2] - intro f - simp only [SimOracle.fnOracle, unifSpec_range, SimOracle.statelessOracle, liftM, monadLift, - MonadLift.monadLift, StateT.lift, Nat.reduceAdd, bind_pure_comp, map_pure, Prod.map_apply, - id_eq] - sorry +def trans (eqv : Equiv pSpec pSpec') (eqv' : Equiv pSpec' pSpec'') : Equiv pSpec pSpec'' where + round_eq := eqv.round_eq.trans eqv'.round_eq + dir_eq := fun i => by simp [eqv.dir_eq, eqv'.dir_eq] + typeEquiv := fun i => .trans (eqv.typeEquiv i) (eqv'.typeEquiv (Fin.cast eqv.round_eq i)) -end OracleComp +end Equiv -section Relation -variable {Stmt Wit Stmt' Wit' : Type} +end ProtocolSpec -def Relation.equiv (f : Stmt ≃ Stmt') (g : Wit ≃ Wit') (R : Stmt → Wit → Prop) (R' : Stmt' → Wit' → Prop) : Prop := - ∀ stmt : Stmt, ∀ wit : Wit, R stmt wit ↔ R' (f stmt) (g wit) +/- Lots of TODOs here: -theorem Relation.equiv_symm (f : Stmt ≃ Stmt') (g : Wit ≃ Wit') (R : Stmt → Wit → Prop) (R' : Stmt' → Wit' → Prop) : - Relation.equiv f g R R' ↔ Relation.equiv f.symm g.symm R' R := by - simp [Relation.equiv] - constructor - · intro h - intro stmt' wit' - simpa using (h (f.symm stmt') (g.symm wit')).symm - · intro h - intro stmt wit - simpa using (h (f stmt) (g wit)).symm +1. Specify equivalence of transcripts, provers, verifiers, reductions +2. Prove distributional equivalence of execution semantics +3. Prove preservation of security properties +-/ --- TODO: define quotienting (i.e. statement is a product type and relation only depends on one component) +variable {n : ℕ} {pSpec pSpec' : ProtocolSpec n} -end Relation +-- More targeted / limited version of equivalence only for the context, i.e. +-- `ctxEquiv`, `stmtEquiv`, `oStmtEquiv`, `witEquiv` + +-- Also, equality and not just equivalence. many times we want observational **equality**. have to +-- specify fewer things + +-- Finally, could we go for a general _simulation_ relation? + +-- structure Prover.ObsEquiv (P : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) +-- (P' : Prover pSpec' oSpec' StmtIn' WitIn' StmtOut' WitOut') where +-- pSpecDirEq : ∀ i, (pSpec i).1 = (pSpec' i).1 +-- pSpecEquiv : ∀ i, (pSpec i).2 ≃ (pSpec' i).2 +-- stmtInEquiv : StmtIn ≃ StmtIn' +-- witInEquiv : WitIn ≃ WitIn' +-- stmtOutEquiv : StmtOut ≃ StmtOut' +-- witOutEquiv : WitOut ≃ WitOut' + -- All prover functions give the same output + -- proverEquiv : ∀ stmtIn witIn, ... namespace Reduction diff --git a/ArkLib/OracleReduction/Execution.lean b/ArkLib/OracleReduction/Execution.lean index d69b2458e..85bbaf734 100644 --- a/ArkLib/OracleReduction/Execution.lean +++ b/ArkLib/OracleReduction/Execution.lean @@ -41,17 +41,18 @@ end loggingOracle section Execution -variable {n : ℕ} {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - {StmtIn WitIn StmtOut WitOut : Type} - {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} +variable {ι : Type} {oSpec : OracleSpec ι} + {StmtIn : Type} {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} {WitIn : Type} + {StmtOut : Type} {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} {WitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] /-- - Prover's function for processing the next round, given the current result of the previous round. +Prover's function for processing the next round, given the current result of the previous round. -/ @[inline, specialize] -def Prover.processRound [∀ i, VCVCompatible (pSpec.Challenge i)] (j : Fin n) - (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) +def Prover.processRound (j : Fin n) + (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) (currentResult : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) (pSpec.Transcript j.castSucc × prover.PrvState j.castSucc)) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) @@ -61,73 +62,66 @@ def Prover.processRound [∀ i, VCVCompatible (pSpec.Challenge i)] (j : Fin n) | .V_to_P => do let challenge ← pSpec.getChallenge ⟨j, hDir⟩ letI newState := prover.receiveChallenge ⟨j, hDir⟩ state challenge - return ⟨transcript.snoc challenge, newState⟩ + return ⟨transcript.concat challenge, newState⟩ | .P_to_V => do let ⟨msg, newState⟩ ← prover.sendMessage ⟨j, hDir⟩ state - return ⟨transcript.snoc msg, newState⟩ + return ⟨transcript.concat msg, newState⟩ -/-- - Run the prover in an interactive reduction up to round index `i`, via first inputting the +/-- Run the prover in an interactive reduction up to round index `i`, via first inputting the statement and witness, and then processing each round up to round `i`. Returns the transcript up to round `i`, and the prover's state after round `i`. -/ @[inline, specialize] -def Prover.runToRound [∀ i, VCVCompatible (pSpec.Challenge i)] (i : Fin (n + 1)) - (stmt : StmtIn) (wit : WitIn) (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : +def Prover.runToRound (i : Fin (n + 1)) + (stmt : StmtIn) (wit : WitIn) (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) (pSpec.Transcript i × prover.PrvState i) := Fin.induction - (pure ⟨default, prover.input stmt wit⟩) + (pure ⟨default, prover.input (stmt, wit)⟩) prover.processRound i -/-- - Run the prover in an interactive reduction up to round `i`, logging all the queries made by the +/-- Run the prover in an interactive reduction up to round `i`, logging all the queries made by the prover. Returns the transcript up to that round, the prover's state after that round, and the log of the prover's oracle queries. -/ @[inline, specialize] -def Prover.runWithLogToRound [∀ i, VCVCompatible (pSpec.Challenge i)] (i : Fin (n + 1)) - (stmt : StmtIn) (wit : WitIn) (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : +def Prover.runWithLogToRound (i : Fin (n + 1)) + (stmt : StmtIn) (wit : WitIn) (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) (pSpec.Transcript i × prover.PrvState i × QueryLog (oSpec ++ₒ [pSpec.Challenge]ₒ)) := do let ⟨⟨transcript, state⟩, proveQueryLog⟩ ← (simulateQ loggingOracle (prover.runToRound i stmt wit)).run return ⟨transcript, state, proveQueryLog⟩ -/-- - Run the prover in an interactive reduction. Returns the output statement and witness, and the +/-- Run the prover in an interactive reduction. Returns the output statement and witness, and the transcript. See `Prover.runWithLog` for a version that additionally returns the log of the prover's oracle queries. -/ @[inline, specialize] -def Prover.run [∀ i, VCVCompatible (pSpec.Challenge i)] (stmt : StmtIn) (wit : WitIn) - (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : - OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) (StmtOut × WitOut × FullTranscript pSpec) := do +def Prover.run (stmt : StmtIn) (wit : WitIn) + (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : + OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) ((StmtOut × WitOut) × FullTranscript pSpec) := do let ⟨transcript, state⟩ ← prover.runToRound (Fin.last n) stmt wit - let ⟨stmtOut, witOut⟩ := prover.output state - return ⟨stmtOut, witOut, transcript⟩ + return ⟨prover.output state, transcript⟩ -/-- - Run the prover in an interactive reduction, logging all the queries made by the prover. Returns +/-- Run the prover in an interactive reduction, logging all the queries made by the prover. Returns the output statement and witness, the transcript, and the log of the prover's oracle queries. -/ @[inline, specialize] -def Prover.runWithLog [∀ i, VCVCompatible (pSpec.Challenge i)] (stmt : StmtIn) (wit : WitIn) - (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : +def Prover.runWithLog (stmt : StmtIn) (wit : WitIn) + (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) - (StmtOut × WitOut × FullTranscript pSpec × QueryLog (oSpec ++ₒ [pSpec.Challenge]ₒ)) := do + ((StmtOut × WitOut) × FullTranscript pSpec × QueryLog (oSpec ++ₒ [pSpec.Challenge]ₒ)) := do let ⟨transcript, state, proveQueryLog⟩ ← prover.runWithLogToRound (Fin.last n) stmt wit - let ⟨stmtOut, witOut⟩ := prover.output state - return ⟨stmtOut, witOut, transcript, proveQueryLog⟩ + return ⟨prover.output state, transcript, proveQueryLog⟩ -/-- - Run the (non-oracle) verifier in an interactive reduction. It takes in the input statement and the - transcript, and return the output statement along with the log of oracle queries made by the +/-- Run the (non-oracle) verifier in an interactive reduction. It takes in the input statement and + the transcript, and return the output statement along with the log of oracle queries made by the veirifer. -/ @[inline, specialize, reducible] def Verifier.run (stmt : StmtIn) (transcript : FullTranscript pSpec) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) : OracleComp oSpec StmtOut := + (verifier : Verifier oSpec StmtIn StmtOut pSpec) : OracleComp oSpec StmtOut := verifier.verify stmt transcript /-- Run the oracle verifier in the interactive protocol. Returns the verifier's output and the log @@ -136,7 +130,7 @@ def Verifier.run (stmt : StmtIn) (transcript : FullTranscript pSpec) @[inline, specialize] def OracleVerifier.run [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] (stmt : StmtIn) (oStmtIn : ∀ i, OStmtIn i) (transcript : FullTranscript pSpec) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) : + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) : OracleComp oSpec StmtOut := do let f := OracleInterface.simOracle2 oSpec oStmtIn transcript.messages let stmtOut ← simulateQ f (verifier.verify stmt transcript.challenges) @@ -145,16 +139,15 @@ def OracleVerifier.run [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] /-- Running an oracle verifier then discarding the query list is equivalent to running a non-oracle verifier -/ @[simp] -theorem OracleVerifier.run_eq_run_verifier [∀ i, OracleInterface (pSpec.Message i)] {stmt : StmtIn} - {transcript : FullTranscript pSpec} {oStmt : ∀ i, OStmtIn i} - {verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut} : +theorem OracleVerifier.run_eq_run_verifier [Oₘ : ∀ i, OracleInterface (pSpec.Message i)] + {stmt : StmtIn} {oStmt : ∀ i, OStmtIn i} {transcript : FullTranscript pSpec} + {verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec} : verifier.run stmt oStmt transcript = Prod.fst <$> verifier.toVerifier.run ⟨stmt, oStmt⟩ transcript := by simp only [run, bind_pure, Verifier.run, toVerifier, eq_mpr_eq_cast, bind_pure_comp, Functor.map_map, id_map'] -/-- - An execution of an interactive reduction on a given initial statement and witness. Consists of +/-- An execution of an interactive reduction on a given initial statement and witness. Consists of first running the prover, and then the verifier. Returns the output statement and witness, and the full transcript. @@ -162,35 +155,36 @@ theorem OracleVerifier.run_eq_run_verifier [∀ i, OracleInterface (pSpec.Messag the verifier's oracle queries. -/ @[inline, specialize] -def Reduction.run [∀ i, VCVCompatible (pSpec.Challenge i)] (stmt : StmtIn) (wit : WitIn) - (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) : +def Reduction.run (stmt : StmtIn) (wit : WitIn) + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) ((StmtOut × WitOut) × StmtOut × FullTranscript pSpec) := do - let ⟨prvStmtOut, witOut, transcript⟩ ← reduction.prover.run stmt wit + -- `ctxOut` contains both the output statement and witness after running the prover + let ⟨ctxOut, transcript⟩ ← reduction.prover.run stmt wit let stmtOut ← liftM (reduction.verifier.run stmt transcript) - return ((prvStmtOut, witOut), stmtOut, transcript) + return (ctxOut, stmtOut, transcript) -/-- - An execution of an interactive reduction on a given initial statement and witness. Consists of +/-- An execution of an interactive reduction on a given initial statement and witness. Consists of first running the prover, and then the verifier. Returns the output statement and witness, the full transcript, and the logs of the prover's and the verifier's oracle queries. -/ @[inline, specialize] -def Reduction.runWithLog [∀ i, VCVCompatible (pSpec.Challenge i)] (stmt : StmtIn) (wit : WitIn) - (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) : +def Reduction.runWithLog (stmt : StmtIn) (wit : WitIn) + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) ((StmtOut × WitOut) × StmtOut × FullTranscript pSpec × QueryLog (oSpec ++ₒ [pSpec.Challenge]ₒ) × QueryLog oSpec) := do - let ⟨prvStmtOut, witOut, transcript, proveQueryLog⟩ ← reduction.prover.runWithLog stmt wit + -- `ctxOut` contains both the output statement and witness after running the prover + let ⟨ctxOut, transcript, proveQueryLog⟩ ← reduction.prover.runWithLog stmt wit let ⟨stmtOut, verifyQueryLog⟩ ← liftM (simulateQ loggingOracle (reduction.verifier.run stmt transcript)).run - return ((prvStmtOut, witOut), stmtOut, transcript, proveQueryLog, verifyQueryLog) + return (ctxOut, stmtOut, transcript, proveQueryLog, verifyQueryLog) /-- Logging the queries made by both parties do not change the output of the reduction -/ @[simp] -theorem Reduction.runWithLog_eq_run [∀ i, VCVCompatible (pSpec.Challenge i)] +theorem Reduction.runWithLog_eq_run {stmt : StmtIn} {wit : WitIn} - {reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut} : + {reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec} : (fun ⟨prvOutput, witOut, transcript, _, _⟩ => (prvOutput, witOut, transcript)) <$> reduction.runWithLog stmt wit = reduction.run stmt wit := by simp [run, runWithLog, Verifier.run, Prover.runWithLog, Prover.runWithLogToRound] @@ -201,17 +195,17 @@ theorem Reduction.runWithLog_eq_run [∀ i, VCVCompatible (pSpec.Challenge i)] the prover's messages and to the shared oracle. -/ @[inline, specialize] -def OracleReduction.run [∀ i, VCVCompatible (pSpec.Challenge i)] - [∀ i, OracleInterface (pSpec.Message i)] (stmt : StmtIn) (wit : WitIn) (oStmt : ∀ i, OStmtIn i) - (reduction : OracleReduction pSpec oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut) : +def OracleReduction.run [∀ i, OracleInterface (pSpec.Message i)] + (stmt : StmtIn) (oStmt : ∀ i, OStmtIn i) (wit : WitIn) + (reduction : OracleReduction oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut pSpec) : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) (((StmtOut × ∀ i, OStmtOut i) × WitOut) × StmtOut × FullTranscript pSpec × QueryLog (oSpec ++ₒ [pSpec.Challenge]ₒ) × QueryLog oSpec) := do - let ⟨⟨prvStmtOut, witOut, transcript⟩, proveQueryLog⟩ ← + let ⟨⟨ctxOut, transcript⟩, proveQueryLog⟩ ← (simulateQ loggingOracle (reduction.prover.run ⟨stmt, oStmt⟩ wit)).run let ⟨stmtOut, verifyQueryLog⟩ ← liftM (simulateQ loggingOracle (reduction.verifier.run stmt oStmt transcript)).run - return ((prvStmtOut, witOut), stmtOut, transcript, proveQueryLog, verifyQueryLog) + return (ctxOut, stmtOut, transcript, proveQueryLog, verifyQueryLog) -- /-- Running an oracle verifier then discarding the query list is equivalent to -- running a non-oracle verifier -/ @@ -225,10 +219,10 @@ def OracleReduction.run [∀ i, VCVCompatible (pSpec.Challenge i)] -- Verifier.run, OracleVerifier.toVerifier, liftComp] @[simp] -theorem Prover.runToRound_zero_of_prover_first [∀ i, VCVCompatible (pSpec.Challenge i)] - (stmt : StmtIn) (wit : WitIn) (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : - prover.runToRound 0 stmt wit = (pure (default, prover.input stmt wit)) := by - simp [Prover.run, Prover.runToRound] +theorem Prover.runToRound_zero_of_prover_first + (stmt : StmtIn) (wit : WitIn) (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : + prover.runToRound 0 stmt wit = (pure (default, prover.input (stmt, wit))) := by + simp [Prover.runToRound] end Execution @@ -242,9 +236,9 @@ variable {pSpec : ProtocolSpec 1} @[simp] theorem Prover.runToRound_one_of_prover_first [ProverOnly pSpec] (stmt : StmtIn) (wit : WitIn) - (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : + (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : prover.runToRound 1 stmt wit = (do - let state := prover.input stmt wit + let state := prover.input (stmt, wit) let ⟨msg, state⟩ ← liftComp (prover.sendMessage ⟨0, by simp⟩ state) _ return (fun i => match i with | ⟨0, _⟩ => msg, state)) := by simp [Prover.runToRound, Prover.processRound] @@ -252,25 +246,25 @@ theorem Prover.runToRound_one_of_prover_first [ProverOnly pSpec] (stmt : StmtIn) split <;> rename_i hDir · have : Direction.P_to_V = .V_to_P := by rw [← this, hDir] contradiction - · congr; funext a; congr; simp [default, Transcript.snoc]; funext i + · congr; funext a; congr; simp [default, Transcript.concat]; funext i have : i = 0 := by aesop rw [this]; simp [Fin.snoc] @[simp] theorem Prover.run_of_prover_first [ProverOnly pSpec] (stmt : StmtIn) (wit : WitIn) - (prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : + (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : prover.run stmt wit = (do - let state := prover.input stmt wit + let state := prover.input (stmt, wit) let ⟨msg, state⟩ ← liftComp (prover.sendMessage ⟨0, by simp⟩ state) _ - let (stmtOut, witOut) := prover.output state - return (stmtOut, witOut, fun i => match i with | ⟨0, _⟩ => msg)) := by + let ctxOut := prover.output state + return (ctxOut, fun i => match i with | ⟨0, _⟩ => msg)) := by simp [Prover.run]; rfl @[simp] theorem Reduction.run_of_prover_first [ProverOnly pSpec] (stmt : StmtIn) (wit : WitIn) - (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) : + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) : reduction.run stmt wit = (do - let state := reduction.prover.input stmt wit + let state := reduction.prover.input (stmt, wit) let ⟨msg, state⟩ ← liftComp (reduction.prover.sendMessage ⟨0, by simp⟩ state) _ let (prvStmtOut, witOut) := reduction.prover.output state let transcript : pSpec.FullTranscript := fun i => match i with | ⟨0, _⟩ => msg @@ -293,9 +287,9 @@ end SingleMessage section Classes -variable {n : ℕ} {pSpec : ProtocolSpec 2} - [∀ i, VCVCompatible (pSpec.Challenge i)] {ι : Type} [DecidableEq ι] {oSpec : OracleSpec ι} +variable {ι : Type} {oSpec : OracleSpec ι} {StmtIn WitIn StmtOut WitOut : Type} + {pSpec : ProtocolSpec 2} -- /-- Simplification of the prover's execution in a single-round, two-message protocol where the -- prover speaks first -/ diff --git a/ArkLib/OracleReduction/FiatShamir/Basic.lean b/ArkLib/OracleReduction/FiatShamir/Basic.lean new file mode 100644 index 000000000..ef8afb8e8 --- /dev/null +++ b/ArkLib/OracleReduction/FiatShamir/Basic.lean @@ -0,0 +1,152 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # The Fiat-Shamir Transformation + + This file defines the Fiat-Shamir transformation. This transformation takes in a (public-coin) + interactive reduction (IR) `R` and transforms it into a non-interactive reduction via removing + interaction from `R`. In particular, in the transformed reduction, queries to the verifier's + challenges are replaced by queries to a random oracle. + + We will show that the transformation satisfies security properties as follows: + + - Completeness is preserved + - State-restoration (knowledge) soundness implies (knowledge) soundness + - Honest-verifier zero-knowledge implies zero-knowledge + + ## Notes + + Our formalization mostly follows the treatment in the Chiesa-Yogev textbook. + + There are many variants of Fiat-Shamir. The most theoretical one is that one hashes the entire + statement and transcript up to the point of deriving a new challenge. This is inefficient and not + actually how it is implemented in practice. + + In contrast, most implementations today use the hash function as a **cryptographic sponge**, which + permits adding in bitstrings (serialized from the messages) and squeezing out new bitstrings (that + are then de-serialized into challenges). +-/ + +open ProtocolSpec OracleComp OracleSpec + +variable {n : ℕ} + +variable {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} + {StmtIn WitIn StmtOut WitOut : Type} + [VCVCompatible StmtIn] [∀ i, VCVCompatible (pSpec.Challenge i)] + +-- In order to define the Fiat-Shamir transformation for the prover, we need to define +-- a slightly altered execution for the prover + +/-- +Prover's function for processing the next round, given the current result of the previous round. + + This is modified for Fiat-Shamir, where we only accumulate the messages and not the challenges. +-/ +@[inline, specialize] +def Prover.processRoundFS [∀ i, VCVCompatible (pSpec.Challenge i)] (j : Fin n) + (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) + (currentResult : OracleComp (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) + (pSpec.MessagesUpTo j.castSucc × StmtIn × prover.PrvState j.castSucc)) : + OracleComp (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) + (pSpec.MessagesUpTo j.succ × StmtIn × prover.PrvState j.succ) := do + let ⟨messages, stmtIn, state⟩ ← currentResult + match hDir : pSpec.getDir j with + | .V_to_P => do + let challenge ← query (spec := fiatShamirSpec StmtIn pSpec) ⟨j, hDir⟩ ⟨stmtIn, messages⟩ + letI newState := prover.receiveChallenge ⟨j, hDir⟩ state challenge + return ⟨messages.extend hDir, stmtIn, newState⟩ + | .P_to_V => do + let ⟨msg, newState⟩ ← prover.sendMessage ⟨j, hDir⟩ state + return ⟨messages.concat hDir msg, stmtIn, newState⟩ + +/-- +Run the prover in an interactive reduction up to round index `i`, via first inputting the + statement and witness, and then processing each round up to round `i`. Returns the transcript up + to round `i`, and the prover's state after round `i`. +-/ +@[inline, specialize] +def Prover.runToRoundFS [∀ i, VCVCompatible (pSpec.Challenge i)] (i : Fin (n + 1)) + (stmt : StmtIn) (prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) + (state : prover.PrvState 0) : + OracleComp (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) + (pSpec.MessagesUpTo i × StmtIn × prover.PrvState i) := + Fin.induction + (pure ⟨default, stmt, state⟩) + prover.processRoundFS + i + +/-- The (slow) Fiat-Shamir transformation for the prover. -/ +def Prover.fiatShamir (P : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec) : + NonInteractiveProver (∀ i, pSpec.Message i) (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) + StmtIn WitIn StmtOut WitOut where + PrvState := fun i => match i with + | 0 => StmtIn × P.PrvState 0 + | _ => P.PrvState (Fin.last n) + input := fun ctx => ⟨ctx.1, P.input ctx⟩ + -- Compute the messages to send via the modified `runToRoundFS` + sendMessage | ⟨0, _⟩ => fun ⟨stmtIn, state⟩ => do + let ⟨messages, _, state⟩ ← P.runToRoundFS (Fin.last n) stmtIn state + return ⟨messages, state⟩ + -- This function is never invoked so we apply the elimination principle + receiveChallenge | ⟨0, h⟩ => nomatch h + output := P.output + +/-- Derive the full transcript from the (full) messages, via querying the Fiat-Shamir oracle for the + challenges. -/ +def ProtocolSpec.Messages.deriveTranscriptFS (messages : pSpec.Messages) (stmt : StmtIn) + (k : Fin (n + 1)) : + OracleComp (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) (pSpec.Transcript k) := do + Fin.induction + (pure default) + (fun i ih => do + let prevTranscript ← ih + match hDir : pSpec.getDir i with + | .V_to_P => + let challenge ← + query (spec := fiatShamirSpec _ _) ⟨i, hDir⟩ ⟨stmt, messages.take i.castSucc⟩ + return prevTranscript.concat challenge + | .P_to_V => return prevTranscript.concat (messages ⟨i, hDir⟩)) + k + +/-- The (slow) Fiat-Shamir transformation for the verifier. -/ +def Verifier.fiatShamir (V : Verifier oSpec StmtIn StmtOut pSpec) : + NonInteractiveVerifier (∀ i, pSpec.Message i) (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) + StmtIn StmtOut where + verify := fun stmtIn proof => do + let messages : pSpec.Messages := proof 0 + let transcript ← messages.deriveTranscriptFS stmtIn (Fin.last n) + V.verify stmtIn transcript + +/-- The Fiat-Shamir transformation for an (interactive) reduction, which consists of applying the + Fiat-Shamir transformation to both the prover and the verifier. -/ +def Reduction.fiatShamir (R : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) : + NonInteractiveReduction (∀ i, pSpec.Message i) (oSpec ++ₒ fiatShamirSpec StmtIn pSpec) + StmtIn WitIn StmtOut WitOut where + prover := R.prover.fiatShamir + verifier := R.verifier.fiatShamir + +section Security + +noncomputable section + +open scoped NNReal + +variable [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + +theorem fiatShamir_completeness (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (completenessError : ℝ≥0) (R : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) : + R.completeness relIn relOut completenessError → + R.fiatShamir.completeness relIn relOut completenessError := sorry + +-- TODO: state-restoration (knowledge) soundness implies (knowledge) soundness after Fiat-Shamir + +end + +end Security diff --git a/ArkLib/OracleReduction/FiatShamir/DuplexSponge/State.lean b/ArkLib/OracleReduction/FiatShamir/DuplexSponge/State.lean new file mode 100644 index 000000000..72501c42e --- /dev/null +++ b/ArkLib/OracleReduction/FiatShamir/DuplexSponge/State.lean @@ -0,0 +1,453 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.Data.Hash.DomainSep + +/-! + # State of the Prover and Verifier in Duplex Sponge Fiat-Shamir + + This file contains the implementation of prover and verifier states for interactive proofs + using duplex sponge functions, based on the [spongefish](https://github.com/arkworks-rs/spongefish) + Rust library. + + ## Core Components + + - `HashStateWithInstructions`: A stateful hash object that interfaces with duplex sponges + - `VerifierState`: The verifier state containing hash state and NARG string + - `ProverState`: The prover state containing randomness, hash state, and transcript + - `ProverPrivateRng`: Cryptographically-secure RNG bound to the protocol transcript + + NOTE: this file is generated by Claude 4 Sonnet and has not been fully proof-read. +-/ + +-- Hack: define `Repr` instance for `ByteArray` in terms of the `Repr` instance for `List` +instance : Repr ByteArray where + reprPrec b n := List.repr b.toList n + +/-- A stateful hash object that interfaces with duplex sponges. + +Rust interface: +```rust +#[derive(Clone)] +pub struct HashStateWithInstructions +where + U: Unit, + H: DuplexSpongeInterface, +{ + /// The internal duplex sponge used for absorbing and squeezing data. + ds: H, + /// A stack of expected sponge operations. + stack: VecDeque, + /// Marker to associate the unit type `U` without storing a value. + _unit: PhantomData, +} +``` + +This structure maintains the sponge state and tracks expected operations to ensure +protocol compliance. +-/ +structure HashStateWithInstructions (U : Type) [SpongeUnit U] (H : Type*) + [DuplexSpongeInterface U H] where + /-- The internal duplex sponge used for absorbing and squeezing data. -/ + ds : H + /-- A stack of expected sponge operations (FIFO queue). -/ + stack : Array DomainSeparator.Op +deriving Inhabited, DecidableEq, Repr + +namespace HashStateWithInstructions + +variable {U : Type} {H : Type*} [SpongeUnit U] [DuplexSpongeInterface U H] + +/-- Generate a 32-byte tag from the domain separator bytes using Keccak. + +Rust implementation: +```rust +fn generate_tag(iop_bytes: &[u8]) -> [u8; 32] { + let mut keccak = Keccak::default(); + keccak.absorb_unchecked(iop_bytes); + let mut tag = [0u8; 32]; + keccak.squeeze_unchecked(&mut tag); + tag +} +``` +-/ +def generateTag (iopBytes : ByteArray) : Vector UInt8 32 := + -- TODO: Implement Keccak hashing + sorry + +/-- Initialize a stateful hash object from a domain separator. + +Rust interface: +```rust +pub fn new(domain_separator: &DomainSeparator) -> Self +``` +-/ +def new (domainSeparator : DomainSeparator U H) : HashStateWithInstructions U H := + let stack := domainSeparator.finalize + let tag := generateTag domainSeparator.asBytes + { ds := Initialize.new tag, stack := stack } + +/-- Perform secure absorption of elements into the sponge. + +Rust interface: +```rust +pub fn absorb(&mut self, input: &[U]) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def absorb (state : HashStateWithInstructions U H) (input : Array U) : + Except DomainSeparatorMismatch (HashStateWithInstructions U H) := do + -- TODO: Fix implementation with proper interface calls and array bounds checking + sorry + +/-- Perform a secure squeeze operation. + +Rust interface: +```rust +pub fn squeeze(&mut self, output: &mut [U]) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def squeeze (state : HashStateWithInstructions U H) (outputSize : Nat) : + Except DomainSeparatorMismatch (HashStateWithInstructions U H × Array U) := do + -- TODO: Fix implementation with proper interface calls and array bounds checking + sorry + +/-- Process a hint operation. + +Rust interface: +```rust +pub fn hint(&mut self) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def hint (state : HashStateWithInstructions U H) : + Except DomainSeparatorMismatch (HashStateWithInstructions U H) := do + -- TODO: Fix implementation with proper array bounds checking + sorry + +/-- Perform a ratchet operation. + +Rust interface: +```rust +pub fn ratchet(&mut self) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def ratchet (state : HashStateWithInstructions U H) : + Except DomainSeparatorMismatch (HashStateWithInstructions U H) := do + -- TODO: Fix implementation with proper interface calls and array bounds checking + sorry + +end HashStateWithInstructions + +/-- The verifier state for interactive proofs. + +Rust interface: +```rust +pub struct VerifierState<'a, H = DefaultHash, U = u8> +where + H: DuplexSpongeInterface, + U: Unit, +{ + pub(crate) hash_state: HashStateWithInstructions, + pub(crate) narg_string: &'a [u8], +} +``` + +The verifier state contains a hash state and a reference to the NARG string +(Non-interactive ARGument string) containing the proof transcript. +-/ +structure FSVerifierState (U : Type) [SpongeUnit U] (H : Type*) [DuplexSpongeInterface U H] where + /-- The hash state tracking expected operations. -/ + hashState : HashStateWithInstructions U H + /-- The NARG string containing the proof transcript. -/ + nargString : ByteArray +deriving Repr + +namespace FSVerifierState + +variable {U : Type} {H : Type*} [SpongeUnit U] [DuplexSpongeInterface U H] + +/-- Create a new VerifierState from a domain separator and NARG string. + +Rust interface: +```rust +pub fn new(domain_separator: &DomainSeparator, narg_string: &'a [u8]) -> Self +``` +-/ +def new (domainSeparator : DomainSeparator U H) (nargString : ByteArray) : + FSVerifierState U H := + { hashState := HashStateWithInstructions.new domainSeparator, + nargString := nargString } + +/-- Read units from the NARG string and absorb them. + +Rust interface: +```rust +pub fn fill_next_units(&mut self, input: &mut [U]) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def fillNextUnits (state : FSVerifierState U H) (count : Nat) : + Except DomainSeparatorMismatch (FSVerifierState U H × Array U) := do + -- Check if we have enough bytes in the NARG string + let bytesNeeded := count * HasSize.size U UInt8 + if state.nargString.size < bytesNeeded then + .error { + message := s!"Insufficient transcript remaining, need {bytesNeeded} bytes, + got {state.nargString.size}" } + else + -- Read the required bytes + let readBytes := state.nargString.extract 0 bytesNeeded + let remaining := state.nargString.extract bytesNeeded state.nargString.size + -- Deserialize units from bytes + let units := Array.range count |>.mapM (fun i => + let unitBytes := readBytes.extract (i * HasSize.size U UInt8) (HasSize.size U UInt8) + DeserializeOption.deserialize unitBytes) + match units with + | some unitsArray => + -- Absorb into hash state + let newHashState ← state.hashState.absorb unitsArray + .ok ({ hashState := newHashState, nargString := remaining }, unitsArray) + | none => + .error { message := "Failed to deserialize units from NARG string" } + +/-- Read a hint from the NARG string. + +Rust interface: +```rust +pub fn hint_bytes(&mut self) -> Result<&'a [u8], DomainSeparatorMismatch> +``` +-/ +def hintBytes (state : FSVerifierState U H) : + Except DomainSeparatorMismatch (FSVerifierState U H × ByteArray) := do + let newHashState ← state.hashState.hint + + -- Ensure at least 4 bytes are available for the length prefix + if state.nargString.size < 4 then + .error { message := "Insufficient transcript remaining for hint" } + else + -- Read 4-byte little-endian length prefix explicitly + let byte0 := state.nargString[0]!.toNat + let byte1 := state.nargString[1]!.toNat + let byte2 := state.nargString[2]!.toNat + let byte3 := state.nargString[3]!.toNat + let length := byte0 + (byte1 <<< 8) + (byte2 <<< 16) + (byte3 <<< 24) + let rest := state.nargString.extract 4 state.nargString.size + + -- Ensure the rest of the slice has `length` bytes + if rest.size < length then + .error { message := s!"Insufficient transcript remaining, got {rest.size}, need {length}" } + else + -- Split the hint and advance the transcript + let hint := rest.extract 0 length + let remaining := rest.extract length rest.size + .ok ({ hashState := newHashState, nargString := remaining }, hint) + +/-- Signal the end of statement with ratcheting. + +Rust interface: +```rust +pub fn ratchet(&mut self) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def ratchet (state : FSVerifierState U H) : + Except DomainSeparatorMismatch (FSVerifierState U H) := do + let newHashState ← state.hashState.ratchet + .ok { hashState := newHashState, nargString := state.nargString } + +end FSVerifierState + +/-- A cryptographically-secure random number generator bound to the protocol transcript. + +Rust interface: +```rust +pub struct ProverPrivateRng { + /// The duplex sponge that is used to generate the random coins. + pub(crate) ds: Keccak, + /// The cryptographic random number generator that seeds the sponge. + pub(crate) csrng: R, +} +``` + +This ensures that the prover's randomness is deterministically derived from the protocol +transcript while being seeded by a cryptographically secure source. +-/ +structure ProverPrivateRng (R : Type*) where + /-- The duplex sponge for generating random coins. -/ + ds : Unit -- TODO: Replace with actual Keccak type + /-- The cryptographic random number generator seed. -/ + csrng : R +deriving Repr + +/-- The prover state for interactive proofs. + +Rust interface: +```rust +pub struct ProverState +where + U: Unit, + H: DuplexSpongeInterface, + R: RngCore + CryptoRng, +{ + /// The randomness state of the prover. + pub(crate) rng: ProverPrivateRng, + /// The public coins for the protocol + pub(crate) hash_state: HashStateWithInstructions, + /// The encoded data. + pub(crate) narg_string: Vec, +} +``` + +The Fiat-Shamir prover state maintains secret randomness, tracks the protocol state, and builds +the proof transcript. +-/ +structure FSProverState (U : Type) [SpongeUnit U] (H : Type*) [DuplexSpongeInterface U H] + (R : Type*) where + /-- The randomness state of the prover. -/ + rng : ProverPrivateRng R + /-- The public coins for the protocol. -/ + hashState : HashStateWithInstructions U H + /-- The encoded proof transcript. -/ + nargString : ByteArray +deriving Repr + +namespace FSProverState + +variable {U : Type} {H : Type*} {R : Type*} [SpongeUnit U] [DuplexSpongeInterface U H] + +/-- Create a new `FSProverState` from a domain separator and RNG. + +Rust interface: +```rust +pub fn new(domain_separator: &DomainSeparator, csrng: R) -> Self +``` +-/ +def new (domainSeparator : DomainSeparator U H) (csrng : R) : FSProverState U H R := + let hashState := HashStateWithInstructions.new domainSeparator + -- TODO: Initialize ProverPrivateRng properly + let rng : ProverPrivateRng R := { ds := (), csrng := csrng } + { rng := rng, hashState := hashState, nargString := ByteArray.empty } + +/-- Add units to the protocol transcript. + +Rust interface: +```rust +pub fn add_units(&mut self, input: &[U]) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def addUnits (state : FSProverState U H R) (input : Array U) : + Except DomainSeparatorMismatch (FSProverState U H R) := + match state.hashState.absorb input with + | .ok newHashState => + -- TODO: Serialize units and append to NARG string + -- TODO: Update RNG with new transcript data + .ok { rng := state.rng, hashState := newHashState, nargString := state.nargString } + | .error e => .error e + +/-- Add a hint to the protocol transcript. + +Rust interface: +```rust +pub fn hint_bytes(&mut self, hint: &[u8]) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def hintBytes (state : FSProverState U H R) (hint : ByteArray) : + Except DomainSeparatorMismatch (FSProverState U H R) := + match state.hashState.hint with + | .ok newHashState => + -- TODO: Add length prefix and hint to NARG string + -- let len = u32::try_from(hint.len()).expect("Hint size out of bounds"); + .ok { rng := state.rng, hashState := newHashState, nargString := state.nargString } + | .error e => .error e + +/-- Ratchet the protocol state. + +Rust interface: +```rust +pub fn ratchet(&mut self) -> Result<(), DomainSeparatorMismatch> +``` +-/ +def ratchet (state : FSProverState U H R) : + Except DomainSeparatorMismatch (FSProverState U H R) := + match state.hashState.ratchet with + | .ok newHashState => + .ok { rng := state.rng, hashState := newHashState, nargString := state.nargString } + | .error e => .error e + +/-- Get the current NARG string (proof transcript). + +Rust interface: +```rust +pub fn narg_string(&self) -> &[u8] +``` +-/ +def getNargString (state : FSProverState U H R) : ByteArray := state.nargString + +end FSProverState + +/-- Type class for unit transcript operations. + +Rust interface: +```rust +pub trait UnitTranscript { + fn public_units(&mut self, input: &[U]) -> Result<(), DomainSeparatorMismatch>; + fn fill_challenge_units(&mut self, input: &mut [U]) -> Result<(), DomainSeparatorMismatch>; +} +``` +-/ +class UnitTranscript (α : Type*) (U : Type) where + /-- Add public units without writing to transcript. -/ + publicUnits : α → Array U → Except DomainSeparatorMismatch α + /-- Fill array with challenge units. -/ + fillChallengeUnits : α → Nat → Except DomainSeparatorMismatch (α × Array U) + +/-- UnitTranscript instance for FSVerifierState. -/ +instance {U : Type} {H : Type*} [SpongeUnit U] [DuplexSpongeInterface U H] : + UnitTranscript (FSVerifierState U H) U where + publicUnits state input := do + let newHashState ← state.hashState.absorb input + .ok { hashState := newHashState, nargString := state.nargString } + fillChallengeUnits state count := do + let (newHashState, output) ← state.hashState.squeeze count + .ok ({ hashState := newHashState, nargString := state.nargString }, output) + +/-- UnitTranscript instance for FSProverState. -/ +instance {U : Type} {H : Type*} {R : Type*} [SpongeUnit U] [DuplexSpongeInterface U H] : + UnitTranscript (FSProverState U H R) U where + publicUnits state input := do + -- Public units are absorbed but not added to transcript + -- let oldNargLen := state.nargString.size + let newState ← state.addUnits input + .ok { rng := newState.rng, hashState := newState.hashState, + nargString := state.nargString } -- Keep old NARG string + fillChallengeUnits state count := + match state.hashState.squeeze count with + | .ok (newHashState, output) => + .ok ({ rng := state.rng, hashState := newHashState, nargString := state.nargString }, output) + | .error e => .error e + +namespace DomainSeparator + +variable {H : Type*} {U : Type} {R : Type*} [SpongeUnit U] [DuplexSpongeInterface U H] + +/-- Create a ProverState from this domain separator. + +Rust interface: +```rust +pub fn to_prover_state(&self) -> crate::ProverState +``` +-/ +def toProverState (ds : DomainSeparator U H) (rng : R) : FSProverState U H R := + FSProverState.new ds rng + +/-- Create a FSVerifierState from this domain separator and transcript. + +Rust interface: +```rust +pub fn to_verifier_state<'a>(&self, transcript: &'a [u8]) -> crate::VerifierState<'a, H, U> +``` +-/ +def toVerifierState (ds : DomainSeparator U H) (transcript : ByteArray) : FSVerifierState U H := + FSVerifierState.new ds transcript + +end DomainSeparator diff --git a/ArkLib/OracleReduction/LiftContext/Basic.lean b/ArkLib/OracleReduction/LiftContext/Basic.lean deleted file mode 100644 index 94b5e5fd6..000000000 --- a/ArkLib/OracleReduction/LiftContext/Basic.lean +++ /dev/null @@ -1,854 +0,0 @@ -/- -Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Authors: Quang Dao --/ - -import ArkLib.OracleReduction.Security.Basic -import ToMathlib.PFunctor.Basic - -/-! - ## Lifting (Oracle) Reductions to Larger Contexts - - Sequential composition is usually not enough to represent oracle reductions in a modular way. We - also need to formalize **virtual** oracle reductions, which lift reductions from one - (virtual / inner) context into the another (real / outer) context. - - This is what is meant when we informally say "apply so-and-so protocol to this quantity (derived - from the input statement & witness)". - - Put in other words, we define a mapping between the input-output interfaces of two (oracle) - reductions, without changing anything about the underlying reductions. - - Recall that the input-output interface of an oracle reduction consists of: - - Input: `OuterStmtIn : Type`, `OuterOStmtIn : ιₛᵢ → Type`, and `OuterWitIn : Type` - - Output: `OuterStmtOut : Type`, `OuterOStmtOut : ιₛₒ → Type`, and `OuterWitOut : Type` - - The liftContext is defined as the following mappings of projections / lifts: - - - `projStmt : OuterStmtIn → InnerStmtIn` - - `projOStmt : (simulation involving OuterOStmtIn to produce InnerOStmtIn)` - - `projWit : OuterWitIn → InnerWitIn` - - `liftStmt : OuterStmtIn × InnerStmtOut → OuterStmtOut` - - `liftOStmt : (simulation involving InnerOStmtOut to produce OuterOStmtOut)` - - `liftWit : OuterWitIn × InnerWitOut → OuterWitOut` - - Note that since completeness & soundness for oracle reductions are defined in terms of the same - properties after converting to (non-oracle) reductions, we only need to focus our efforts on the - non-oracle case. - - Note that this _exactly_ corresponds to lenses in programming languages / category theory. - Namely, liftContext on the inputs correspond to a `view`/`get` operation (our "proj"), while - liftContext on the output corresponds to a `modify`/`set` operation (our "lift"). - - More precisely, the `proj/lift` operations correspond to a Lens between two monomial polyonmial - functors: `OuterCtxIn y^ OuterCtxOut ⇆ InnerCtxIn y^ InnerCtxOut`. - - What are some expected properties of the liftContext (via the connection to lenses)? - - First, we recall the expected properties of lenses: - - 1. `get(lens, set(lens, value, store)) = value` - 2. `set(lens, new, set(lens, old, store)) = set(lens, new, store)` - 3. `set(lens, get(lens, store), store) = store` - 4. and more - - What should this mean here? - - one property not mentioned above, is that the pre-image of `projInput` should be invariant - for `liftOutput`. - - That is, if `projInput(inp1) = projInput(inp2) = inp*`, then `liftOutput(inp1, out) - = liftOutput(inp2, out)` for all `out` which is a possible result of running the oracle - reduction on `inp*`. This basically implies a decomposition of sorts between the input to be - transported. --/ - -open OracleSpec OracleComp ProtocolSpec - -section Transport - -open scoped NNReal - -/-- A lens for transporting (non-oracle) statements between outer and inner contexts -/ -class StatementLens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type) where - projStmt : OuterStmtIn → InnerStmtIn - liftStmt : OuterStmtIn × InnerStmtOut → OuterStmtOut - -#check ReaderT.run - -/-- A lens for transporting oracle statements between outer and inner contexts - -We require knowledge of the (non-oracle) input statement in the outer context, along with the -(non-oracle) output statement in the inner context. -/ -class OStatementLens (OuterStmtIn InnerStmtOut : Type) - {Outer_ιₛᵢ : Type} (OuterOStmtIn : Outer_ιₛᵢ → Type) [∀ i, OracleInterface (OuterOStmtIn i)] - {Outer_ιₛₒ : Type} (OuterOStmtOut : Outer_ιₛₒ → Type) [∀ i, OracleInterface (OuterOStmtOut i)] - {Inner_ιₛᵢ : Type} (InnerOStmtIn : Inner_ιₛᵢ → Type) [∀ i, OracleInterface (InnerOStmtIn i)] - {Inner_ιₛₒ : Type} (InnerOStmtOut : Inner_ιₛₒ → Type) [∀ i, OracleInterface (InnerOStmtOut i)] - where - -- To access an input oracle statement in the inner context, we may simulate it using the input - -- (non-oracle) statement of the outer context, along with oracle access to the outer input oracle - -- statements - projOStmt : ∀ i, OuterOStmtIn i → ∀ i, InnerOStmtIn i - - simOStmt : QueryImpl [InnerOStmtIn]ₒ - (ReaderT OuterStmtIn (OracleComp [OuterOStmtIn]ₒ)) - - -- simOStmt_eq_projOStmt : - - -- projOStmt_neverFails : ∀ i, ∀ t, ∀ outerStmtIn, - -- ((projOStmt.impl (query i t)).run outerStmtIn).neverFails - -- To get back an output oracle statement in the outer context, we may simulate it using the input - -- (non-oracle) statement of the outer context, the output (non-oracle) statement of the inner - -- context, along with oracle access to the inner output oracle statements - liftOStmt : QueryImpl [OuterOStmtOut]ₒ - (ReaderT (OuterStmtIn × InnerStmtOut) (OracleComp ([OuterOStmtIn]ₒ ++ₒ [InnerOStmtOut]ₒ))) - liftOStmt_neverFails : ∀ i, ∀ t, ∀ outerStmtIn, ∀ innerStmtOut, - ((liftOStmt.impl (query i t)).run (outerStmtIn, innerStmtOut)).neverFails - -/-- A lens for transporting witnesses between outer and inner contexts -/ -class WitnessLens (OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) where - projWit : OuterWitIn → InnerWitIn - liftWit : OuterWitIn × InnerWitOut → OuterWitOut - -/-- A lens for transporting between outer and inner contexts of a (non-oracle) reduction -/ -class ContextLens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type) - (OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) - extends StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut, - WitnessLens OuterWitIn OuterWitOut InnerWitIn InnerWitOut - -/-- A lens for transporting between outer and inner contexts of an oracle reduction -/ -class OracleContextLens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type) - {Outer_ιₛᵢ : Type} (OuterOStmtIn : Outer_ιₛᵢ → Type) [∀ i, OracleInterface (OuterOStmtIn i)] - {Outer_ιₛₒ : Type} (OuterOStmtOut : Outer_ιₛₒ → Type) [∀ i, OracleInterface (OuterOStmtOut i)] - {Inner_ιₛᵢ : Type} (InnerOStmtIn : Inner_ιₛᵢ → Type) [∀ i, OracleInterface (InnerOStmtIn i)] - {Inner_ιₛₒ : Type} (InnerOStmtOut : Inner_ιₛₒ → Type) [∀ i, OracleInterface (InnerOStmtOut i)] - (OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) extends - - StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut, - - OStatementLens OuterStmtIn InnerStmtOut - OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut, - - WitnessLens OuterWitIn OuterWitOut InnerWitIn InnerWitOut - -namespace OracleContextLens - -#check OracleComp.simulateQ - -variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - {Outer_ιₛᵢ : Type} (OuterOStmtIn : Outer_ιₛᵢ → Type) [∀ i, OracleInterface (OuterOStmtIn i)] - {Outer_ιₛₒ : Type} (OuterOStmtOut : Outer_ιₛₒ → Type) [∀ i, OracleInterface (OuterOStmtOut i)] - {Inner_ιₛᵢ : Type} (InnerOStmtIn : Inner_ιₛᵢ → Type) [∀ i, OracleInterface (InnerOStmtIn i)] - {Inner_ιₛₒ : Type} (InnerOStmtOut : Inner_ιₛₒ → Type) [∀ i, OracleInterface (InnerOStmtOut i)] - {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} - {oLens : OracleContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut} - -/-- Converting an oracle context lens to a non-oracle context lens, via moving oracle statements -into non-oracle statements -/ -instance instContextLens : ContextLens - (OuterStmtIn × ∀ i, OuterOStmtIn i) (OuterStmtOut × ∀ i, OuterOStmtOut i) - (InnerStmtIn × ∀ i, InnerOStmtIn i) (InnerStmtOut × ∀ i, InnerOStmtOut i) - OuterWitIn OuterWitOut InnerWitIn InnerWitOut where - projStmt := fun ⟨outerStmtIn, outerOStmtIn⟩ => - ⟨oLens.projStmt outerStmtIn, sorry⟩ - liftStmt := fun ⟨outerStmtIn, innerStmtOut⟩ => sorry - projWit := fun outerWitIn => sorry - liftWit := fun ⟨outerWitIn, innerWitOut⟩ => sorry - -end OracleContextLens - - -/- - Recall the interface of an extractor: - - Takes in `WitOut`, `StmtIn`, `Transcript`, `QueryLog` - (note: no need for `StmtOut` as it can be derived from `StmtIn`, `Transcript`, and `QueryLog`) - - Returns `WitIn` - - We need a lens for the extractor as well. - - Assume we have an inner extractor - `E : InnerWitOut → InnerStmtIn → Transcript → QueryLog → InnerWitIn` - - We need to derive an outer extractor - `E' : OuterWitOut → OuterStmtIn → Transcript → QueryLog → OuterWitIn` - - Note that `Transcript` and `QueryLog` are the same due to the lens only changing the - input-output interface, and not the inside (oracle) reduction. - - So, `E' outerWitOut outerStmtIn transcript queryLog` needs to call - `E (map to innerWitOut) (projStmt outerStmtIn) transcript queryLog` - and then post-process the result, which is some `innerWitIn`, to get some `outerWitIn`. - - This processing is exactly the same as a lens in the opposite direction between the outer and - inner witness types. --/ - -/-- Inverse lens between outer and inner witnesses (going the other direction with respect to - input-output), for extractors / knowledge soundness. --/ -@[reducible] -def WitnessLensInv (OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) := - WitnessLens OuterWitOut OuterWitIn InnerWitOut InnerWitIn --- structure ContextLensInv (OuterStmtOut InnerStmtOut : Type) --- (OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) extends --- WitnessLens OuterWitOut OuterWitIn InnerWitOut InnerWitIn where --- projStmt : OuterStmtOut → InnerStmtOut - -- projWitInv : InnerWitOut → OuterWitOut - -- liftWitInv : InnerWitIn × OuterWitOut → OuterWitIn - -/-- For round-by-round knowledge soundness, we require an _equivalence_ on the input witness - (inner <=> outer). Otherwise, we cannot extract. - - (imagine a reduction from R1 x R2 => R3 x R4, that is the sequential composition of R1 => R3 and - then R2 => R4. This reduction is not round-by-round knowledge sound, since if we are in the - R1 => R3 rounds, we have no way of invoking the second extractor for recovering the witness for - R2.)-/ -class RBRWitnessLensInv (OuterWitIn InnerWitIn : Type) where - liftWit : InnerWitIn → OuterWitIn - -/-- Conditions for the lens / transformation to preserve completeness - -For `lift`, we require compatibility relations between the outer input statement/witness and -the inner output statement/witness -/ -class ContextLens.IsComplete {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} - (outerRelIn : OuterStmtIn → OuterWitIn → Prop) - (innerRelIn : InnerStmtIn → InnerWitIn → Prop) - (outerRelOut : OuterStmtOut → OuterWitOut → Prop) - (innerRelOut : InnerStmtOut → InnerWitOut → Prop) - (compat : OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → Prop) - (lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut) where - - proj_complete : ∀ stmtIn witIn, - outerRelIn stmtIn witIn → - innerRelIn (lens.projStmt stmtIn) (lens.projWit witIn) - - lift_complete : ∀ outerStmtIn outerWitIn innerStmtOut innerWitOut, - outerRelIn outerStmtIn outerWitIn → - innerRelOut innerStmtOut innerWitOut → - compat (outerStmtIn, outerWitIn) (innerStmtOut, innerWitOut) → - outerRelOut (lens.liftStmt (outerStmtIn, innerStmtOut)) - (lens.liftWit (outerWitIn, innerWitOut)) - -/-- Conditions for the lens / transformation to preserve soundness -/ -class StatementLens.IsSound {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - (outerLangIn : Set OuterStmtIn) (outerLangOut : Set OuterStmtOut) - (innerLangIn : Set InnerStmtIn) (innerLangOut : Set InnerStmtOut) - (compat : OuterStmtIn → InnerStmtOut → Prop) - (stmtLens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) where - - proj_sound : ∀ outerStmtIn, - outerStmtIn ∉ outerLangIn → stmtLens.projStmt outerStmtIn ∉ innerLangIn - - lift_sound : ∀ outerStmtIn innerStmtOut, - outerStmtIn ∉ outerLangIn → - innerStmtOut ∉ innerLangOut → - compat outerStmtIn innerStmtOut → - stmtLens.liftStmt (outerStmtIn, innerStmtOut) ∉ outerLangOut - -/-- Conditions for the lens / transformation to preserve round-by-round soundness - -This is nearly identical to the `IsSound` condition, _except_ that we do not require -`outerStmtIn ∉ outerLangIn` in the `lift_sound` condition. - -(we need to relax that condition to prove `toFun_full` of the lifted state function) -/ -class StatementLens.IsRBRSound {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - (outerLangIn : Set OuterStmtIn) (outerLangOut : Set OuterStmtOut) - (innerLangIn : Set InnerStmtIn) (innerLangOut : Set InnerStmtOut) - (compat : OuterStmtIn → InnerStmtOut → Prop) - (stmtLens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) where - - proj_sound : ∀ outerStmtIn, - outerStmtIn ∉ outerLangIn → stmtLens.projStmt outerStmtIn ∉ innerLangIn - - lift_sound : ∀ outerStmtIn innerStmtOut, - innerStmtOut ∉ innerLangOut → - compat outerStmtIn innerStmtOut → - stmtLens.liftStmt (outerStmtIn, innerStmtOut) ∉ outerLangOut - -/-- Conditions for the lens / transformation to preserve knowledge soundness - -(TODO: double-check) -/ -class ContextLens.IsKnowledgeSound - {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} - (outerRelIn : OuterStmtIn → OuterWitIn → Prop) - (innerRelIn : InnerStmtIn → InnerWitIn → Prop) - (outerRelOut : OuterStmtOut → OuterWitOut → Prop) - (innerRelOut : InnerStmtOut → InnerWitOut → Prop) - (compat : OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → Prop) - (lensInv : WitnessLensInv OuterWitIn OuterWitOut InnerWitIn InnerWitOut) - (lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut) where - - /-- Projecting using the inverse lens from outer to inner context preserves satisfaction of - relations -/ - proj_knowledge_sound : ∀ outerStmtIn outerWitIn, - outerRelIn outerStmtIn outerWitIn → - innerRelIn (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - - lift_knowledge_sound : ∀ outerStmtIn outerWitIn innerStmtOut innerWitOut, - outerRelIn outerStmtIn outerWitIn → - innerRelOut innerStmtOut innerWitOut → - compat (outerStmtIn, outerWitIn) (innerStmtOut, innerWitOut) → - outerRelOut (lens.liftStmt (outerStmtIn, innerStmtOut)) - (lens.liftWit (outerWitIn, innerWitOut)) - -namespace ContextLens.IsKnowledgeSound - --- Convert knowledge soundness into soundness - -end ContextLens.IsKnowledgeSound - -class ContextLens.IsRBRKnowledgeSound - {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} - (outerRelIn : OuterStmtIn → OuterWitIn → Prop) - (innerRelIn : InnerStmtIn → InnerWitIn → Prop) - (outerRelOut : OuterStmtOut → OuterWitOut → Prop) - (innerRelOut : InnerStmtOut → InnerWitOut → Prop) - -variable {n : ℕ} {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - {OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut : Type} - {InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut : Type} - [∀ i, VCVCompatible (pSpec.Challenge i)] - -/-- The outer prover after lifting invokes the inner prover on the projected input, and - lifts the output -/ -def Prover.liftContext - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - Prover pSpec oSpec OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut where - PrvState := fun i => P.PrvState i × OuterStmtIn × OuterWitIn - input := fun stmtIn witIn => - ⟨P.input (lens.projStmt stmtIn) (lens.projWit witIn), stmtIn, witIn⟩ - sendMessage := fun i ⟨prvState, stmtIn, witIn⟩ => do - let ⟨msg, prvState'⟩ ← P.sendMessage i prvState - return ⟨msg, ⟨prvState', stmtIn, witIn⟩⟩ - receiveChallenge := fun i ⟨prvState, stmtIn, witIn⟩ chal => - ⟨P.receiveChallenge i prvState chal, stmtIn, witIn⟩ - output := fun ⟨prvState, stmtIn, witIn⟩ => - let ⟨innerStmtOut, innerWitOut⟩ := P.output prvState - ⟨lens.liftStmt (stmtIn, innerStmtOut), lens.liftWit (witIn, innerWitOut)⟩ - -/-- The outer verifier after lifting invokes the inner verifier on the projected input, and - lifts the output -/ -def Verifier.liftContext - [lens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut] - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) : - Verifier pSpec oSpec OuterStmtIn OuterStmtOut where - verify := fun stmtIn transcript => do - let innerStmtIn := lens.projStmt stmtIn - let innerStmtOut ← V.verify innerStmtIn transcript - return lens.liftStmt (stmtIn, innerStmtOut) - -/-- The outer reduction after lifting is the combination of the lifting of the prover and - verifier -/ -def Reduction.liftContext - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - (R : Reduction pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - Reduction pSpec oSpec OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut where - prover := R.prover.liftContext - verifier := R.verifier.liftContext - -open Verifier in -/-- The outer extractor after lifting invokes the inner extractor on the projected input, and - lifts the output -/ -def StraightlineExtractor.liftContext - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - [lensInv : WitnessLensInv OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - (E : StraightlineExtractor pSpec oSpec InnerStmtIn InnerWitIn InnerWitOut) : - StraightlineExtractor pSpec oSpec OuterStmtIn OuterWitIn OuterWitOut := - fun outerWitOut outerStmtIn fullTranscript proveQueryLog verifyQueryLog => - let innerStmtIn := lens.projStmt outerStmtIn - let innerWitOut := lensInv.projWit outerWitOut - let innerWitIn := E innerWitOut innerStmtIn fullTranscript proveQueryLog verifyQueryLog - do return lensInv.liftWit (outerWitOut, ← innerWitIn) - -open Verifier in -/-- The outer round-by-round extractor after lifting invokes the inner extractor on the projected - input, and lifts the output -/ -def RBRExtractor.liftContext - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - [rbrLensInv : RBRWitnessLensInv OuterWitIn InnerWitIn] - (E : RBRExtractor pSpec oSpec InnerStmtIn InnerWitIn) : - RBRExtractor pSpec oSpec OuterStmtIn OuterWitIn := - fun roundIdx outerStmtIn fullTranscript proveQueryLog => - rbrLensInv.liftWit (E roundIdx (lens.projStmt outerStmtIn) fullTranscript proveQueryLog) - -/-- Compatibility relation between the outer input statement and the inner output statement, -relative to a verifier. - -We require that the inner output statement is a possible output of the verifier on the outer -input statement, for any given transcript. Note that we have to existentially quantify over -transcripts since we only reference the verifier, and there's no way to get the transcript without -a prover. -/ -def Verifier.compatStatement {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - (lens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) : - OuterStmtIn → InnerStmtOut → Prop := - fun outerStmtIn innerStmtOut => - innerStmtOut ∈ ⋃ transcript, (V.run (lens.projStmt outerStmtIn) transcript).support - -/-- Compatibility relation between the outer input context and the inner output context, relative -to a reduction. - -We require that the inner output context (statement + witness) is a possible output of the reduction -on the outer input context (statement + witness). -/ -def Reduction.compatContext {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} - {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} - (lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut) - (R : Reduction pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → Prop := - fun ⟨outerStmtIn, outerWitIn⟩ innerContextOut => - innerContextOut ∈ - Prod.fst <$> (R.run (lens.projStmt outerStmtIn) (lens.projWit outerWitIn)).support - -/-- The outer state function after lifting invokes the inner state function on the projected - input, and lifts the output -/ -def Verifier.StateFunction.liftContext [oSpec.FiniteRange] - [lens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut] - (outerLangIn : Set OuterStmtIn) (outerLangOut : Set OuterStmtOut) - (innerLangIn : Set InnerStmtIn) (innerLangOut : Set InnerStmtOut) - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) - [lensRBRSound : lens.IsRBRSound outerLangIn outerLangOut innerLangIn innerLangOut - (V.compatStatement lens)] - (stF : V.StateFunction pSpec oSpec innerLangIn innerLangOut) : - V.liftContext.StateFunction pSpec oSpec outerLangIn outerLangOut -where - toFun := fun m outerStmtIn transcript => - stF m (lens.projStmt outerStmtIn) transcript - toFun_empty := fun stmt hStmt => - stF.toFun_empty (lens.projStmt stmt) (lensRBRSound.proj_sound stmt hStmt) - toFun_next := fun m hDir outerStmtIn transcript hStmt msg => - stF.toFun_next m hDir (lens.projStmt outerStmtIn) transcript hStmt msg - toFun_full := fun outerStmtIn transcript hStmt => by - have h := stF.toFun_full (lens.projStmt outerStmtIn) transcript hStmt - simp [Verifier.run, Verifier.liftContext] at h ⊢ - intro innerStmtOut hSupport - apply lensRBRSound.lift_sound - · exact h innerStmtOut hSupport - · simp [compatStatement]; exact ⟨transcript, hSupport⟩ - --- def OracleProver.liftContext - --- def OracleVerifier.liftContext - --- def OracleReduction.liftContext - -section Theorems - -/- Theorems about liftContext interacting with reduction execution and security properties -/ - -section Prover - -/- Breaking down the intertwining of liftContext and prover execution -/ - -/-- Lifting the prover intertwines with the process round function -/ -theorem Prover.liftContext_processRound - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {i : Fin n} - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) - {resultRound : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) - (pSpec.Transcript i.castSucc × P.liftContext.PrvState i.castSucc)} : - (P.liftContext (lens := lens)).processRound i resultRound - = do - let ⟨transcript, prvState, outerStmtIn, outerWitIn⟩ ← resultRound - let ⟨newTranscript, newPrvState⟩ ← P.processRound i (do return ⟨transcript, prvState⟩) - return ⟨newTranscript, ⟨newPrvState, outerStmtIn, outerWitIn⟩⟩ := by - unfold processRound liftContext - simp - congr 1; funext - split <;> simp - -/-- Lemma needed for the proof of `Prover.liftContext_runToRound` -/ -private lemma Prover.bind_map_processRound_pure {i : Fin n} - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) - (comp : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) - (pSpec.Transcript i.castSucc × P.PrvState i.castSucc)) - {α : Type} (f : pSpec.Transcript i.succ × P.PrvState i.succ → α) : - (do let result ← comp; f <$> P.processRound i (pure result)) - = f <$> P.processRound i comp := by - simp [processRound] - -theorem Prover.liftContext_runToRound - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} {i : Fin (n + 1)} - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - (P.liftContext (lens := lens)).runToRound i outerStmtIn outerWitIn - = do - let ⟨transcript, prvState⟩ ← - P.runToRound i (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - return ⟨transcript, ⟨prvState, outerStmtIn, outerWitIn⟩⟩ := by - unfold runToRound - induction i using Fin.induction with - | zero => simp [liftContext] - | succ i ih => simp [liftContext_processRound, ih, bind_map_processRound_pure] - --- Requires more lemmas about `simulateQ` for logging oracles -theorem Prover.liftContext_runWithLogToRound - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} {i : Fin (n + 1)} - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - (P.liftContext (lens := lens)).runWithLogToRound i outerStmtIn outerWitIn - = do - let ⟨transcript, prvState, queryLog⟩ ← - P.runWithLogToRound i (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - return ⟨transcript, ⟨prvState, outerStmtIn, outerWitIn⟩, queryLog⟩ := by - unfold runWithLogToRound - induction i using Fin.induction with - | zero => simp [liftContext] - | succ i ih => simp [liftContext_runToRound] - -/-- Running the lifted outer prover is equivalent to running the inner prover on the projected - input, and then integrating the output -/ -theorem Prover.liftContext_run - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - P.liftContext.run outerStmtIn outerWitIn - = do - let ⟨innerStmtOut, innerWitOut, fullTranscript⟩ ← - P.run (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - return ⟨lens.liftStmt (outerStmtIn, innerStmtOut), - lens.liftWit (outerWitIn, innerWitOut), - fullTranscript⟩ := by - simp only [Prover.run, liftContext_runToRound] - simp [liftContext] - -/- Lifting the prover intertwines with the runWithLog function -/ -theorem Prover.liftContext_runWithLog - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} - (P : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - P.liftContext.runWithLog outerStmtIn outerWitIn - = do - let ⟨innerStmtOut, innerWitOut, fullTranscript, queryLog⟩ ← - P.runWithLog (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - return ⟨lens.liftStmt (outerStmtIn, innerStmtOut), - lens.liftWit (outerWitIn, innerWitOut), - fullTranscript, - queryLog⟩ := by - simp only [runWithLog, liftContext_runWithLogToRound] - simp [liftContext] - -end Prover - -theorem Reduction.liftContext_run - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} - (R : Reduction pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - R.liftContext.run outerStmtIn outerWitIn = do - let ⟨⟨prvInnerStmtOut, innerWitOut⟩, verInnerStmtOut, fullTranscript⟩ ← - R.run (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - return ⟨⟨lens.liftStmt (outerStmtIn, prvInnerStmtOut), - lens.liftWit (outerWitIn, innerWitOut)⟩, - lens.liftStmt (outerStmtIn, verInnerStmtOut), - fullTranscript⟩ := by - unfold Reduction.run - simp [Reduction.liftContext, Prover.liftContext_run, Verifier.liftContext, Verifier.run] - -theorem Reduction.liftContext_runWithLog - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} - (R : Reduction pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut) : - R.liftContext.runWithLog outerStmtIn outerWitIn = do - let ⟨⟨prvInnerStmtOut, innerWitOut⟩, verInnerStmtOut, fullTranscript, queryLog⟩ ← - R.runWithLog (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - return ⟨⟨lens.liftStmt (outerStmtIn, prvInnerStmtOut), - lens.liftWit (outerWitIn, innerWitOut)⟩, - lens.liftStmt (outerStmtIn, verInnerStmtOut), fullTranscript, queryLog⟩ := by - unfold Reduction.runWithLog - simp [Reduction.liftContext, Prover.liftContext_runWithLog, Verifier.liftContext, Verifier.run] - -variable [oSpec.FiniteRange] - -section Completeness - -variable - {outerRelIn : OuterStmtIn → OuterWitIn → Prop} {outerRelOut : OuterStmtOut → OuterWitOut → Prop} - {innerRelIn : InnerStmtIn → InnerWitIn → Prop} {innerRelOut : InnerStmtOut → InnerWitOut → Prop} - {R : Reduction pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut} - {completenessError : ℝ≥0} - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - [lensComplete : lens.IsComplete outerRelIn innerRelIn outerRelOut innerRelOut - (R.compatContext lens)] - -/-- - Lifting the reduction preserves completeness, assuming the lens satisfies its completeness - conditions --/ -theorem Reduction.liftContext_completeness - (h : R.completeness innerRelIn innerRelOut completenessError) : - R.liftContext.completeness outerRelIn outerRelOut completenessError := by - unfold completeness at h ⊢ - intro outerStmtIn outerWitIn hRelIn - have hR := h (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) - (lensComplete.proj_complete _ _ hRelIn) - rw [Reduction.liftContext_run] - refine le_trans hR ?_ - simp - refine probEvent_mono ?_ - intro ⟨innerContextOut, a, b⟩ hSupport ⟨hRelOut, hRelOut'⟩ - have : innerContextOut ∈ - Prod.fst <$> (R.run (lens.projStmt outerStmtIn) (lens.projWit outerWitIn)).support := by - simp - exact ⟨a, b, hSupport⟩ - simp_all - refine lensComplete.lift_complete _ _ _ _ hRelIn hRelOut ?_ - rw [← hRelOut'] - simp [compatContext]; exact this - --- Can't turn the above into an instance because Lean needs to synthesize `innerRelIn` and --- `innerRelOut` out of thin air. - --- instance [Reduction.IsComplete innerRelIn innerRelOut R completenessError] : --- R.liftContext.IsComplete outerRelIn outerRelOut completenessError := --- ⟨R.liftContext.completeness⟩ - --- instance [R.IsPerfectComplete relIn relOut] : --- R.liftContext.IsPerfectComplete relIn relOut := --- ⟨fun _ => R.liftContext.perfectCompleteness _ _ _⟩ - -end Completeness - -/-- Lifting the reduction preserves soundness, assuming the lens satisfies its soundness - conditions -/ -theorem Verifier.liftContext_soundness [Inhabited InnerStmtOut] - {outerLangIn : Set OuterStmtIn} {outerLangOut : Set OuterStmtOut} - {innerLangIn : Set InnerStmtIn} {innerLangOut : Set InnerStmtOut} - {soundnessError : ℝ≥0} - [lens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut] - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) - -- TODO: figure out the right compatibility relation for the IsSound condition - [lensSound : lens.IsSound outerLangIn outerLangOut innerLangIn innerLangOut - (V.compatStatement lens)] - (h : V.soundness innerLangIn innerLangOut soundnessError) : - V.liftContext.soundness outerLangIn outerLangOut soundnessError := by - unfold soundness Reduction.run at h ⊢ - -- Note: there is no distinction between `Outer` and `Inner` here - intro WitIn WitOut outerWitIn outerP outerStmtIn hOuterStmtIn - simp at h ⊢ - have innerP : Prover pSpec oSpec InnerStmtIn WitIn InnerStmtOut WitOut := { - PrvState := outerP.PrvState - input := fun _ _ => outerP.input outerStmtIn outerWitIn - sendMessage := outerP.sendMessage - receiveChallenge := outerP.receiveChallenge - output := fun state => - let ⟨outerStmtOut, outerWitOut⟩ := outerP.output state - ⟨default, outerWitOut⟩ - } - have : lens.projStmt outerStmtIn ∉ innerLangIn := by - apply lensSound.proj_sound - exact hOuterStmtIn - have hSound := h WitIn WitOut outerWitIn innerP (lens.projStmt outerStmtIn) this - refine le_trans ?_ hSound - simp [Verifier.liftContext, Verifier.run] - -- Need to massage the two `probEvent`s so that they have the same base computation `oa` - -- Then apply `lensSound.lift_sound`? - sorry - -/- - Lifting the reduction preserves knowledge soundness, assuming the lens satisfies its - knowledge soundness conditions --/ -theorem Verifier.liftContext_knowledgeSoundness [Inhabited InnerStmtOut] - {outerRelIn : OuterStmtIn → OuterWitIn → Prop} {outerRelOut : OuterStmtOut → OuterWitOut → Prop} - {innerRelIn : InnerStmtIn → InnerWitIn → Prop} {innerRelOut : InnerStmtOut → InnerWitOut → Prop} - {knowledgeError : ℝ≥0} - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - (lensInv : WitnessLensInv OuterWitIn OuterWitOut InnerWitIn InnerWitOut) - [lensKnowledgeSound : lens.IsKnowledgeSound outerRelIn innerRelIn outerRelOut innerRelOut - (fun _ _ => True) lensInv] - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) - (h : V.knowledgeSoundness innerRelIn innerRelOut knowledgeError) : - V.liftContext.knowledgeSoundness outerRelIn outerRelOut - knowledgeError := by - unfold knowledgeSoundness at h ⊢ - obtain ⟨E, h'⟩ := h - refine ⟨E.liftContext (lens := lens), ?_⟩ - intro outerStmtIn outerWitIn outerP - simp [StraightlineExtractor.liftContext] - let innerP : Prover pSpec oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut := - { - PrvState := outerP.PrvState - input := fun _ _ => outerP.input outerStmtIn outerWitIn - sendMessage := outerP.sendMessage - receiveChallenge := outerP.receiveChallenge - output := fun state => - let ⟨outerStmtOut, outerWitOut⟩ := outerP.output state - ⟨default, lensInv.projWit outerWitOut⟩ - } - have h_innerP_input {innerStmtIn} {innerWitIn} : - innerP.input innerStmtIn innerWitIn = outerP.input outerStmtIn outerWitIn := rfl - have hR := h' (lens.projStmt outerStmtIn) (lens.projWit outerWitIn) innerP - simp at hR - simp [Reduction.runWithLog, Verifier.liftContext, Verifier.run] at hR ⊢ - have h_innerP_runWithLog {innerStmtIn} {innerWitIn} : - innerP.runWithLog innerStmtIn innerWitIn - = do - let ⟨_, outerWitOut, rest⟩ ← outerP.runWithLog outerStmtIn outerWitIn - return ⟨default, lensInv.projWit outerWitOut, rest⟩ := by - sorry - refine le_trans ?_ hR - -- Massage the two `probEvent`s so that they have the same base computation `oa`? - simp [h_innerP_runWithLog] - -- apply probEvent_mono ?_ - sorry - -/- - Lifting the reduction preserves round-by-round soundness, assuming the lens satisfies its - soundness conditions --/ -theorem Verifier.liftContext_rbr_soundness [Inhabited InnerStmtOut] - {outerLangIn : Set OuterStmtIn} {outerLangOut : Set OuterStmtOut} - {innerLangIn : Set InnerStmtIn} {innerLangOut : Set InnerStmtOut} - {rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0} - [lens : StatementLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut] - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) - -- TODO: figure out the right compatibility relation for the IsSound condition - [lensRBRSound : lens.IsRBRSound outerLangIn outerLangOut innerLangIn innerLangOut - (V.compatStatement lens)] - (h : V.rbrSoundness innerLangIn innerLangOut rbrSoundnessError) : - V.liftContext.rbrSoundness outerLangIn outerLangOut rbrSoundnessError := by - unfold rbrSoundness at h ⊢ - obtain ⟨stF, h⟩ := h - simp at h ⊢ - refine ⟨stF.liftContext (lens := lens) (lensRBRSound := lensRBRSound), ?_⟩ - intro outerStmtIn hOuterStmtIn WitIn WitOut witIn outerP roundIdx hDir - have innerP : Prover pSpec oSpec InnerStmtIn WitIn InnerStmtOut WitOut := { - PrvState := outerP.PrvState - input := fun _ _ => outerP.input outerStmtIn witIn - sendMessage := outerP.sendMessage - receiveChallenge := outerP.receiveChallenge - output := fun state => - let ⟨outerStmtOut, outerWitOut⟩ := outerP.output state - ⟨default, outerWitOut⟩ - } - have h' := h (lens.projStmt outerStmtIn) (lensRBRSound.proj_sound _ hOuterStmtIn) - WitIn WitOut witIn innerP roundIdx hDir - refine le_trans ?_ h' - sorry - -/- - Lifting the reduction preserves round-by-round knowledge soundness, assuming the lens - satisfies its knowledge soundness conditions --/ -theorem Verifier.liftContext_rbr_knowledgeSoundness - {outerRelIn : OuterStmtIn → OuterWitIn → Prop} {outerRelOut : OuterStmtOut → OuterWitOut → Prop} - {innerRelIn : InnerStmtIn → InnerWitIn → Prop} {innerRelOut : InnerStmtOut → InnerWitOut → Prop} - {rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0} - [lens : ContextLens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut - OuterWitIn OuterWitOut InnerWitIn InnerWitOut] - (lensInv : WitnessLensInv OuterWitIn OuterWitOut InnerWitIn InnerWitOut) - [lensKnowledgeSound : lens.IsKnowledgeSound outerRelIn innerRelIn outerRelOut innerRelOut - (fun _ _ => True) lensInv] - (V : Verifier pSpec oSpec InnerStmtIn InnerStmtOut) - (h : V.rbrKnowledgeSoundness innerRelIn innerRelOut rbrKnowledgeError) : - V.liftContext.rbrKnowledgeSoundness outerRelIn outerRelOut - rbrKnowledgeError := by - unfold rbrKnowledgeSoundness at h ⊢ - obtain ⟨stF, E, h⟩ := h - simp at h ⊢ - -- refine ⟨stF.liftContext (lens := lens.toStatementLens) - -- (lensSound := lensKnowledgeSound.toSound), - -- ?_, ?_⟩ - sorry - - --- TODO: state definitions & theorems about oracle reduction as well - --- def OracleReduction.liftContext_eq_reduction_liftContext - --- def OracleReduction.liftContext_completeness - --- def OracleVerifier.liftContext_soundness - --- def OracleVerifier.liftContext_knowledgeSoundness - --- def OracleVerifier.liftContext_rbr_soundness - --- def OracleVerifier.liftContext_rbr_knowledgeSoundness - -end Theorems - -end Transport - -section Test - -open Polynomial - --- Testing out sum-check-like relations - -noncomputable section - -def OuterStmtIn_Test := ℤ[X] × ℤ[X] × ℤ -def InnerStmtIn_Test := ℤ[X] × ℤ - -def outerRelIn_Test : OuterStmtIn_Test → Unit → Prop := - fun ⟨p, q, t⟩ _ => ∑ x ∈ {0, 1}, (p * q).eval x = t -def innerRelIn_Test : InnerStmtIn_Test → Unit → Prop := - fun ⟨f, t⟩ _ => ∑ x ∈ {0, 1}, f.eval x = t - -def OuterStmtOut_Test := ℤ[X] × ℤ[X] × ℤ × ℤ -def InnerStmtOut_Test := ℤ[X] × ℤ × ℤ - -def outerRelOut_Test : OuterStmtOut_Test → Unit → Prop := - fun ⟨p, q, t, r⟩ _ => (p * q).eval r = t -def innerRelOut_Test : InnerStmtOut_Test → Unit → Prop := - fun ⟨f, t, r⟩ _ => f.eval r = t - -def testLens : - ContextLens OuterStmtIn_Test OuterStmtOut_Test InnerStmtIn_Test InnerStmtOut_Test - Unit Unit Unit Unit where - projStmt := fun ⟨p, q, t⟩ => ⟨p * q, t⟩ - liftStmt := fun ⟨⟨p, q, _⟩, ⟨_, t', u⟩⟩ => (p, q, t', u) - projWit := fun (_ : Unit) => (() : Unit) - liftWit := fun (_ : Unit × Unit) => (() : Unit) - -def testLensInv : WitnessLensInv Unit Unit Unit Unit where - projWit := fun _ => () - liftWit := fun _ => () - -def testLensComplete : testLens.IsComplete - outerRelIn_Test innerRelIn_Test outerRelOut_Test innerRelOut_Test - (fun ⟨⟨p, q, _⟩, ()⟩ ⟨⟨f, _⟩, ()⟩ => p * q = f) where - proj_complete := fun ⟨p, q, t⟩ () hRelIn => by - simp [outerRelIn_Test] at hRelIn - simp [innerRelIn_Test, testLens, hRelIn] - lift_complete := fun ⟨p, q, t⟩ _ ⟨f, t', r⟩ _ hRelIn hRelOut' hCompat => by - simp [outerRelIn_Test] at hRelIn - simp [innerRelOut_Test] at hRelOut' - simp at hCompat - simp [outerRelOut_Test, testLens, hRelIn, ← hRelOut', hCompat] - -def testLensKnowledgeSound : testLens.IsKnowledgeSound - outerRelIn_Test innerRelIn_Test outerRelOut_Test innerRelOut_Test - (fun ⟨⟨p, q, _⟩, ()⟩ ⟨⟨f, _⟩, ()⟩ => p * q = f) testLensInv where - proj_knowledge_sound := fun ⟨p, q, t⟩ () hRelIn => by - simp [outerRelIn_Test] at hRelIn - simp [innerRelIn_Test, testLens, hRelIn] - lift_knowledge_sound := fun ⟨p, q, t⟩ _ ⟨f, t', r⟩ _ hRelIn hRelOut' hCompat => by - simp [outerRelIn_Test] at hRelIn - sorry -end - -end Test diff --git a/ArkLib/OracleReduction/LiftContext/Lens.lean b/ArkLib/OracleReduction/LiftContext/Lens.lean new file mode 100644 index 000000000..f84334c34 --- /dev/null +++ b/ArkLib/OracleReduction/LiftContext/Lens.lean @@ -0,0 +1,685 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic +import ToMathlib.PFunctor.Basic + +/-! + ## Lens between Input and Output Contexts of (Oracle) Reductions + + This file defines the different lenses required for the transformation / lifting of context for an + (oracle) reduction, and the properties required for the transformation / lift to be complete / + sound / knowledge sound (including an extra lens for the transformation / lifting of the + extractor). + + We also define simpler examples of lenses, when we don't need the full generality. For instance, + lenses where we have (only) an equivalence between the statements / witnesses, or lenses where the + witnesses are trivial. +-/ + +open OracleSpec OracleComp + +/-- A lens for transporting input and output statements for the verifier of a (non-oracle) + reduction. + + Consists of two functions: + - `proj : OuterStmtIn → InnerStmtIn` : Transport input statements from the outer context to + the inner context + - `lift : OuterStmtIn → InnerStmtOut → OuterStmtOut` : Transport output statements from the + inner context to the outer context, additionally relying on the outer input statement. + + This is exactly the same as a `PFunctor.Lens` between two monomials defined by the input and + output statements (from the outer to the inner context). +-/ +@[inline, reducible] +def Statement.Lens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type) + := PFunctor.Lens (OuterStmtIn y^ OuterStmtOut) (InnerStmtIn y^ InnerStmtOut) + +namespace Statement.Lens + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + (lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) + +/-- Transport input statements from the outer context to the inner context -/ +@[inline, reducible] +def proj : OuterStmtIn → InnerStmtIn := + lens.toFunA + +/-- Transport output statements from the inner context to the outer context, + additionally relying on the input statements of the outer context. -/ +@[inline, reducible] +def lift : OuterStmtIn → InnerStmtOut → OuterStmtOut := + lens.toFunB + +end Statement.Lens + +/-- A lens for transporting input and output statements (both oracle and non-oracle) for the + oracle verifier of an oracle reduction. + + TODO: figure out the right way to define this -/ +@[inline, reducible] +def OracleStatement.Lens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type) + {Outer_ιₛᵢ : Type} (OuterOStmtIn : Outer_ιₛᵢ → Type) [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} (OuterOStmtOut : Outer_ιₛₒ → Type) [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} (InnerOStmtIn : Inner_ιₛᵢ → Type) [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} (InnerOStmtOut : Inner_ιₛₒ → Type) [∀ i, OracleInterface (InnerOStmtOut i)] + := + Statement.Lens (OuterStmtIn × ∀ i, OuterOStmtIn i) (OuterStmtOut × ∀ i, OuterOStmtOut i) + (InnerStmtIn × ∀ i, InnerOStmtIn i) (InnerStmtOut × ∀ i, InnerOStmtOut i) + -- TODO: fill in the extra conditions + /- Basically, as we model the output oracle statement as a subset of the input oracle statement + + the prover's messages, we need to make sure that this subset relation is satisfied in the + statement lens mapping. + + We also need to provide a `QueryImpl` instance for simulating the outer oracle verifier using + the inner oracle verifier. + -/ + + -- simOStmt : QueryImpl [InnerOStmtIn]ₒ + -- (ReaderT OuterStmtIn (OracleComp [OuterOStmtIn]ₒ)) + + -- simOStmt_neverFails : ∀ i, ∀ t, ∀ outerStmtIn, + -- ((simOStmt.impl (query i t)).run outerStmtIn).neverFails + -- To get back an output oracle statement in the outer context, we may simulate it using the input + -- (non-oracle) statement of the outer context, the output (non-oracle) statement of the inner + -- context, along with oracle access to the inner output oracle statements + + -- liftOStmt : QueryImpl [OuterOStmtOut]ₒ + -- (ReaderT (OuterStmtIn × InnerStmtOut) (OracleComp ([OuterOStmtIn]ₒ ++ₒ [InnerOStmtOut]ₒ))) + -- liftOStmt_neverFails : ∀ i, ∀ t, ∀ outerStmtIn, ∀ innerStmtOut, + -- ((liftOStmt.impl (query i t)).run (outerStmtIn, innerStmtOut)).neverFails + +namespace OracleStatement.Lens + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {Outer_ιₛᵢ : Type} {OuterOStmtIn : Outer_ιₛᵢ → Type} [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} {OuterOStmtOut : Outer_ιₛₒ → Type} [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} {InnerOStmtIn : Inner_ιₛᵢ → Type} [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} {InnerOStmtOut : Inner_ιₛₒ → Type} [∀ i, OracleInterface (InnerOStmtOut i)] + (lens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut) +/-- Transport input statements from the outer context to the inner context + +TODO: refactor etc. -/ +@[inline, reducible] +def proj : OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtIn × (∀ i, InnerOStmtIn i) := + lens.toFunA + +/-- Transport output statements from the inner context to the outer context, + additionally relying on the input statements of the outer context. + + TODO: refactor etc. -/ +@[inline, reducible] +def lift : OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtOut × (∀ i, InnerOStmtOut i) → + OuterStmtOut × (∀ i, OuterOStmtOut i) := + lens.toFunB + +-- def toVerifierLens : Statement.Lens +-- (OuterStmtIn × ∀ i, OuterOStmtIn i) (OuterStmtOut × ∀ i, OuterOStmtOut i) +-- (InnerStmtIn × ∀ i, InnerOStmtIn i) (InnerStmtOut × ∀ i, InnerOStmtOut i) +-- := oStmtLens + +end OracleStatement.Lens + +/-- Lenses for transporting the input & output witnesses from an inner protocol to an outer + protocol. + + It consists of two functions: + - `projWit : OuterStmtIn × OuterWitIn → InnerWitIn`, which derives the inner input witness from + the outer one, requiring also the outer input statement. + - `liftWit : OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → OuterWitOut`, which + derives the outer output witness from outer input witness & the inner output one, requiring + also the associated statements. + + The inclusion of the statements are necessary when we consider the full view of the prover. In + practice as well, oftentimes a lens between only witnesses are not enough. -/ +@[inline, reducible] +def Witness.Lens (OuterStmtIn InnerStmtOut OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) + := PFunctor.Lens ((OuterStmtIn × OuterWitIn) y^ OuterWitOut) + (InnerWitIn y^ (InnerStmtOut × InnerWitOut)) + +namespace Witness.Lens + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (lens : Witness.Lens OuterStmtIn InnerStmtOut OuterWitIn OuterWitOut InnerWitIn InnerWitOut) +/-- Transport input witness from the outer context to the inner context -/ +@[inline, reducible] +def proj : OuterStmtIn × OuterWitIn → InnerWitIn := + lens.toFunA + +/-- Transport output witness from the inner context to the outer context, + additionally relying on the input statements of the outer context. -/ +@[inline, reducible] +def lift : OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → OuterWitOut := + lens.toFunB + +end Witness.Lens + +/-- A structure collecting a lens for the prover, and a lens for the verifier, for transporting + between the contexts of an outer reduction and an inner reduction. -/ +structure Context.Lens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) where + stmt : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + wit : Witness.Lens OuterStmtIn InnerStmtOut OuterWitIn OuterWitOut InnerWitIn InnerWitOut + +namespace Context.Lens + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + +/-- Projection of the context. -/ +@[inline, reducible] +def proj : OuterStmtIn × OuterWitIn → InnerStmtIn × InnerWitIn := + fun ctxIn => ⟨lens.stmt.proj ctxIn.1, lens.wit.proj ctxIn⟩ + +/-- Lifting of the context. -/ +@[inline, reducible] +def lift : OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → OuterStmtOut × OuterWitOut := + fun ctxIn ctxOut => + ⟨lens.stmt.lift ctxIn.1 ctxOut.1, lens.wit.lift ctxIn ctxOut⟩ + +end Context.Lens + +/-- A structure collecting a lens for the prover, and a lens for the oracle verifier, for + transporting between the contexts of an outer oracle reduction and an inner oracle reduction. -/ +structure OracleContext.Lens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type) + {Outer_ιₛᵢ : Type} (OuterOStmtIn : Outer_ιₛᵢ → Type) [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} (OuterOStmtOut : Outer_ιₛₒ → Type) [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} (InnerOStmtIn : Inner_ιₛᵢ → Type) [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} (InnerOStmtOut : Inner_ιₛₒ → Type) [∀ i, OracleInterface (InnerOStmtOut i)] + (OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) where + stmt : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + wit : Witness.Lens (OuterStmtIn × ∀ i, OuterOStmtIn i) (InnerStmtOut × ∀ i, InnerOStmtOut i) + OuterWitIn OuterWitOut InnerWitIn InnerWitOut + +namespace OracleContext.Lens + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {Outer_ιₛᵢ : Type} {OuterOStmtIn : Outer_ιₛᵢ → Type} [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} {OuterOStmtOut : Outer_ιₛₒ → Type} [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} {InnerOStmtIn : Inner_ιₛᵢ → Type} [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} {InnerOStmtOut : Inner_ιₛₒ → Type} [∀ i, OracleInterface (InnerOStmtOut i)] + {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (lens : OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) +/-- Projection of the context. -/ +@[inline, reducible] +def proj : (OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn → + (InnerStmtIn × (∀ i, InnerOStmtIn i)) × InnerWitIn := + fun ctxIn => ⟨lens.stmt.proj ctxIn.1, lens.wit.proj ctxIn⟩ + +/-- Lifting of the context. -/ +@[inline, reducible] +def lift : (OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn → + (InnerStmtOut × (∀ i, InnerOStmtOut i)) × InnerWitOut → + (OuterStmtOut × (∀ i, OuterOStmtOut i)) × OuterWitOut := + fun ctxIn ctxOut => ⟨lens.stmt.lift ctxIn.1 ctxOut.1, lens.wit.lift ctxIn ctxOut⟩ + +/-- Convert the oracle context lens to a context lens. -/ +@[inline, reducible] +def toContext : + Context.Lens (OuterStmtIn × (∀ i, OuterOStmtIn i)) (OuterStmtOut × (∀ i, OuterOStmtOut i)) + (InnerStmtIn × (∀ i, InnerOStmtIn i)) (InnerStmtOut × (∀ i, InnerOStmtOut i)) + OuterWitIn OuterWitOut InnerWitIn InnerWitOut := + ⟨lens.stmt, lens.wit⟩ + +end OracleContext.Lens + +/-- Lens for lifting the witness extraction procedure from the inner reduction to the outer + reduction. + +This goes in the reverse direction (output to input) compared to the witness lens for the prover, +and requires in addition the outer input statement. +-/ +@[inline, reducible] +def Witness.InvLens (OuterStmtIn OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) + := PFunctor.Lens ((OuterStmtIn × OuterWitOut) y^ OuterWitIn) + (InnerWitOut y^ InnerWitIn) + +namespace Witness.InvLens + +variable {OuterStmtIn OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (lens : Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + +/-- Projection of the witness. -/ +@[inline, reducible] +def proj : OuterStmtIn × OuterWitOut → InnerWitOut := + lens.toFunA + +/-- Lifting of the witness. -/ +@[inline, reducible] +def lift : OuterStmtIn × OuterWitOut → InnerWitIn → OuterWitIn := + lens.toFunB + +end Witness.InvLens + +/-- Lens for lifting the extractor from the inner reduction to the outer reduction. + +This consists of two components: +- `stmt` : the statement lens +- `wit` : the witness lens in the reverse direction, matching the input-output interface of the + extractor, i.e. `StmtIn × WitOut → WitIn` (ignoring transcript and query logs) +-/ +@[ext] +structure Extractor.Lens (OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type) where + stmt : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + wit : Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut InnerWitIn InnerWitOut + +namespace Extractor.Lens + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + +/-- Transport the tuple of (input statement, output witness) from the outer context to the inner + context -/ +@[inline, reducible] +def proj (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) : + OuterStmtIn × OuterWitOut → InnerStmtIn × InnerWitOut := + fun ⟨stmtIn, witOut⟩ => ⟨lens.stmt.proj stmtIn, lens.wit.proj (stmtIn, witOut)⟩ + +-- /-- Transport the inner input witness to the outer input witness, also relying on the tuple +-- (outer-- input statement, outer output witness) -/ +-- @[inline, reducible] +-- def lift (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut +-- OuterWitIn OuterWitOut InnerWitIn InnerWitOut) : +-- OuterStmtIn × OuterWitOut → InnerWitIn → OuterWitIn := +-- fun ⟨stmtIn, witOut⟩ innerWitIn => +-- lens.wit.lift (stmtIn, witOut) innerWitIn + +end Extractor.Lens + +/-- Conditions for the lens / transformation to preserve completeness + +For `lift`, we require compatibility relations between the outer input statement/witness and +the inner output statement/witness -/ +class Context.Lens.IsComplete {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (outerRelIn : Set (OuterStmtIn × OuterWitIn)) + (innerRelIn : Set (InnerStmtIn × InnerWitIn)) + (outerRelOut : Set (OuterStmtOut × OuterWitOut)) + (innerRelOut : Set (InnerStmtOut × InnerWitOut)) + (compat : (OuterStmtIn × OuterWitIn) → (InnerStmtOut × InnerWitOut) → Prop) + (lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) where + + proj_complete : ∀ stmtIn witIn, + (stmtIn, witIn) ∈ outerRelIn → + (lens.stmt.proj stmtIn, lens.wit.proj (stmtIn, witIn)) ∈ innerRelIn + + lift_complete : ∀ outerStmtIn outerWitIn innerStmtOut innerWitOut, + compat (outerStmtIn, outerWitIn) (innerStmtOut, innerWitOut) → + (outerStmtIn, outerWitIn) ∈ outerRelIn → + (innerStmtOut, innerWitOut) ∈ innerRelOut → + (lens.stmt.lift outerStmtIn innerStmtOut, + lens.wit.lift (outerStmtIn, outerWitIn) (innerStmtOut, innerWitOut)) ∈ outerRelOut + +/-- The completeness condition for the oracle context lens is just the one for the underlying + context lens -/ +@[reducible, simp] +def OracleContext.Lens.IsComplete {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {Outer_ιₛᵢ : Type} {OuterOStmtIn : Outer_ιₛᵢ → Type} [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} {OuterOStmtOut : Outer_ιₛₒ → Type} [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} {InnerOStmtIn : Inner_ιₛᵢ → Type} [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} {InnerOStmtOut : Inner_ιₛₒ → Type} [∀ i, OracleInterface (InnerOStmtOut i)] + {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (outerRelIn : Set ((OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn)) + (innerRelIn : Set ((InnerStmtIn × (∀ i, InnerOStmtIn i)) × InnerWitIn)) + (outerRelOut : Set ((OuterStmtOut × (∀ i, OuterOStmtOut i)) × OuterWitOut)) + (innerRelOut : Set ((InnerStmtOut × (∀ i, InnerOStmtOut i)) × InnerWitOut)) + (compat : (OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn → + (InnerStmtOut × (∀ i, InnerOStmtOut i)) × InnerWitOut → Prop) + (lens : OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) := + Context.Lens.IsComplete outerRelIn innerRelIn outerRelOut innerRelOut compat lens.toContext + +/-- Conditions for the lens / transformation to preserve soundness -/ +class Statement.Lens.IsSound {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + (outerLangIn : Set OuterStmtIn) (outerLangOut : Set OuterStmtOut) + (innerLangIn : Set InnerStmtIn) (innerLangOut : Set InnerStmtOut) + (compatStmt : OuterStmtIn → InnerStmtOut → Prop) + (lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) where + + proj_sound : ∀ outerStmtIn, + outerStmtIn ∉ outerLangIn → lens.proj outerStmtIn ∉ innerLangIn + + lift_sound : ∀ outerStmtIn innerStmtOut, + compatStmt outerStmtIn innerStmtOut → + innerStmtOut ∉ innerLangOut → + lens.lift outerStmtIn innerStmtOut ∉ outerLangOut + +/-- The soundness condition for the oracle statement lens is just the one for the underlying + statement lens -/ +@[reducible, simp] +def OracleStatement.Lens.IsSound {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {Outer_ιₛᵢ : Type} {OuterOStmtIn : Outer_ιₛᵢ → Type} [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} {OuterOStmtOut : Outer_ιₛₒ → Type} [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} {InnerOStmtIn : Inner_ιₛᵢ → Type} [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} {InnerOStmtOut : Inner_ιₛₒ → Type} [∀ i, OracleInterface (InnerOStmtOut i)] + (outerLangIn : Set (OuterStmtIn × (∀ i, OuterOStmtIn i))) + (outerLangOut : Set (OuterStmtOut × (∀ i, OuterOStmtOut i))) + (innerLangIn : Set (InnerStmtIn × (∀ i, InnerOStmtIn i))) + (innerLangOut : Set (InnerStmtOut × (∀ i, InnerOStmtOut i))) + (compatStmt : + OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtOut × (∀ i, InnerOStmtOut i) → Prop) + (lens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut) := + Statement.Lens.IsSound outerLangIn outerLangOut innerLangIn innerLangOut compatStmt lens + +/-- Conditions for the extractor lens to preserve knowledge soundness -/ +class Extractor.Lens.IsKnowledgeSound + {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + (outerRelIn : Set (OuterStmtIn × OuterWitIn)) + (innerRelIn : Set (InnerStmtIn × InnerWitIn)) + (outerRelOut : Set (OuterStmtOut × OuterWitOut)) + (innerRelOut : Set (InnerStmtOut × InnerWitOut)) + (compatStmt : OuterStmtIn → InnerStmtOut → Prop) + (compatWit : OuterWitOut → InnerWitIn → Prop) + (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) where + + /-- outer_to_inner for output witness (note: for statements, it's `lift` in this condition) -/ + proj_knowledgeSound : ∀ outerStmtIn innerStmtOut outerWitOut, + compatStmt outerStmtIn innerStmtOut → + (lens.stmt.lift outerStmtIn innerStmtOut, outerWitOut) ∈ outerRelOut → + (innerStmtOut, lens.wit.proj (outerStmtIn, outerWitOut)) ∈ innerRelOut + + /-- inner_to_outer for input witness (note: for statements, it's `proj` in this condition) -/ + lift_knowledgeSound : ∀ outerStmtIn outerWitOut innerWitIn, + compatWit outerWitOut innerWitIn → + (lens.stmt.proj outerStmtIn, innerWitIn) ∈ innerRelIn → + (outerStmtIn, lens.wit.lift (outerStmtIn, outerWitOut) innerWitIn) ∈ outerRelIn + +namespace Extractor.Lens.IsKnowledgeSound + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + {outerRelIn : Set (OuterStmtIn × OuterWitIn)} + {innerRelIn : Set (InnerStmtIn × InnerWitIn)} + {outerRelOut : Set (OuterStmtOut × OuterWitOut)} + {innerRelOut : Set (InnerStmtOut × InnerWitOut)} + (compatStmt : OuterStmtIn → InnerStmtOut → Prop) + (compatWit : OuterWitOut → InnerWitIn → Prop) + (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + +/-- If an extractor lens is knowledge sound, then the associated statement lens is sound. -/ +instance [Inhabited OuterWitOut] + [instKS : lens.IsKnowledgeSound + outerRelIn innerRelIn + outerRelOut innerRelOut + compatStmt (fun _ _ => True)] : + lens.stmt.IsSound + outerRelIn.language outerRelOut.language + innerRelIn.language innerRelOut.language + compatStmt where + proj_sound := fun outerStmtIn hCompat => by + simp [Set.language] at hCompat ⊢ + intro innerWitIn hRelIn + contrapose! hCompat + let outerWitIn := lens.wit.lift (outerStmtIn, default) innerWitIn + have hOuterWitIn := instKS.lift_knowledgeSound outerStmtIn default innerWitIn (by simp) hRelIn + exact ⟨outerWitIn, hOuterWitIn⟩ + lift_sound := fun outerStmtIn innerStmtOut hCompat hInnerRelOut => by + simp [Set.language] at hCompat hInnerRelOut ⊢ + intro outerWitOut hOuterRelOut + contrapose! hInnerRelOut + let innerWitOut := lens.wit.proj (outerStmtIn, outerWitOut) + have hInnerWitOut := + instKS.proj_knowledgeSound outerStmtIn innerStmtOut outerWitOut hCompat hOuterRelOut + exact ⟨innerWitOut, hInnerWitOut⟩ + +end Extractor.Lens.IsKnowledgeSound + +section SpecialCases + +-- Plan (do not delete) + +-- 1. When the lens is over the input context only (keeping the output the same) +-- 1.1. Over the input statement only +-- 1.1.1. When the map is an equivalence +-- 1.2. Over the input witness only +-- 1.2.1. When the map is an equivalence + +-- TODO for oracle statements as we haven't figured it out + +-- 2. When the lens is over the output context only (keeping the input the same) +-- 2.1. Over the output statement only +-- 2.1.1. When the map is an equivalence +-- 2.2. Over the output witness only +-- 2.2.1. When the map is an equivalence + +-- When does this lead to secure protocols? Since one of input / output is trivial, this essentially +-- reduces to the security of the zero-round reduction (that is either the on the input or the +-- output context) + +variable {OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut : Type} + {Outer_ιₛᵢ : Type} {OuterOStmtIn : Outer_ιₛᵢ → Type} [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} {OuterOStmtOut : Outer_ιₛₒ → Type} [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} {InnerOStmtIn : Inner_ιₛᵢ → Type} [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} {InnerOStmtOut : Inner_ιₛₒ → Type} [∀ i, OracleInterface (InnerOStmtOut i)] + {OuterWitIn OuterWitOut InnerWitIn InnerWitOut : Type} + +namespace Statement.Lens + +/-- The identity lens for the statement, which acts as identity on the input and output. -/ +@[inline, reducible] +protected def id : + Statement.Lens OuterStmtIn OuterStmtOut OuterStmtIn OuterStmtOut := + PFunctor.Lens.id _ + +alias trivial := Statement.Lens.id + +/-- Lens for the statement which keeps the output the same, and hence only requires a + projection on the input. -/ +@[inline] +def ofInputOnly (projStmt : OuterStmtIn → InnerStmtIn) : + Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn OuterStmtOut := + ⟨projStmt, fun _ => id⟩ + +/-- Lens for the statement which keeps the input the same, and hence only requires a + lift on the output. -/ +@[inline] +def ofOutputOnly (liftStmt : OuterStmtIn → InnerStmtOut → OuterStmtOut) : + Statement.Lens OuterStmtIn OuterStmtOut OuterStmtIn InnerStmtOut := + ⟨id, liftStmt⟩ + +end Statement.Lens + +namespace OracleStatement.Lens + +-- TODO: replace with new definitions when we figure out the right definition for oracle statements +-- lens + +/-- The identity lens for the statement, which acts as identity on the input and output. -/ +@[inline, reducible] +protected def id : + OracleStatement.Lens OuterStmtIn OuterStmtOut OuterStmtIn OuterStmtOut + OuterOStmtIn OuterOStmtOut OuterOStmtIn OuterOStmtOut := + PFunctor.Lens.id _ + +alias trivial := OracleStatement.Lens.id + +/-- Lens for the statement which keeps the output the same, and hence only requires a + projection on the input. -/ +@[inline] +def ofInputOnly + (projStmt : OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtIn × (∀ i, InnerOStmtIn i)) : + OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn OuterStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn OuterOStmtOut := + ⟨projStmt, fun _ => id⟩ + +/-- Lens for the statement which keeps the input the same, and hence only requires a + lift on the output. -/ +@[inline] +def ofOutputOnly + (liftStmt : OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtOut × (∀ i, InnerOStmtOut i) → + OuterStmtOut × (∀ i, OuterOStmtOut i)) : + OracleStatement.Lens OuterStmtIn OuterStmtOut OuterStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut OuterOStmtIn InnerOStmtOut := + ⟨id, liftStmt⟩ + +end OracleStatement.Lens + +namespace Witness.Lens + +/-- The identity lens for the witness, which acts as projection from the context (statement + + witness) to the witness. -/ +@[inline, reducible] +protected def id : + Witness.Lens OuterStmtIn OuterStmtOut OuterWitIn OuterWitOut OuterWitIn OuterWitOut := + ⟨Prod.snd, fun _ => Prod.snd⟩ + +alias trivial := Witness.Lens.id + +/-- Lens for the witness which keeps the output context (statement + witness) the same, and hence + only requires a projection for the input witness. -/ +@[inline] +def ofInputOnly (projWit : OuterStmtIn × OuterWitIn → InnerWitIn) : + Witness.Lens OuterStmtIn OuterStmtOut OuterWitIn OuterWitOut InnerWitIn OuterWitOut := + ⟨projWit, fun _ => Prod.snd⟩ + +/-- Lens for the witness which keeps the input context (statement + witness) the same, and hence + only requires a lift for the output witness. -/ +@[inline] +def ofOutputOnly + (liftWit : OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → OuterWitOut) : + Witness.Lens OuterStmtIn InnerStmtOut OuterWitIn OuterWitOut OuterWitIn InnerWitOut := + ⟨Prod.snd, liftWit⟩ + +end Witness.Lens + +namespace Witness.InvLens + +/-- The identity inverse lens for the witness, whose projection is product projection to the second + component, and lifting is identity. -/ +@[inline, reducible] +protected def id : + Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut OuterWitIn OuterWitOut := + ⟨Prod.snd, fun _ => id⟩ + +alias trivial := Witness.InvLens.id + +/-- Inverse lens for the witness which is the identity on the input witness (inner to outer), and + only requires a projection for the output witness (outer to inner). -/ +@[inline] +def ofOutputOnly (projWit : OuterStmtIn × OuterWitOut → InnerWitOut) : + Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut OuterWitIn InnerWitOut := + ⟨projWit, fun _ => id⟩ + +/-- Inverse lens for the witness which is the second projection on the output witness (outer to + inner), and only requires a lift for the input witness (inner to outer). -/ +@[inline] +def ofInputOnly + (liftWit : OuterStmtIn × OuterWitOut → InnerWitIn → OuterWitIn) : + Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut InnerWitIn OuterWitOut := + ⟨Prod.snd, liftWit⟩ + +end Witness.InvLens + +namespace Context.Lens + +/-- The identity lens for the context, which combines the identity statement and witness lenses. -/ +@[inline, reducible] +protected def id : + Context.Lens OuterStmtIn OuterStmtOut OuterStmtIn OuterStmtOut + OuterWitIn OuterWitOut OuterWitIn OuterWitOut where + stmt := Statement.Lens.id + wit := Witness.Lens.id + +alias trivial := Context.Lens.id + +/-- Lens for the context which keeps the output contexts the same, and only requires projections on + the statement & witness for the input. -/ +@[inline] +def ofInputOnly + (stmtProj : OuterStmtIn → InnerStmtIn) + (witProj : OuterStmtIn × OuterWitIn → InnerWitIn) : + Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn OuterStmtOut + OuterWitIn OuterWitOut InnerWitIn OuterWitOut where + stmt := Statement.Lens.ofInputOnly stmtProj + wit := Witness.Lens.ofInputOnly witProj + +/-- Lens for the context which keeps the input contexts the same, and only requires lifts on the + statement & witness for the output. -/ +@[inline] +def ofOutputOnly + (witLift : + OuterStmtIn × OuterWitIn → InnerStmtOut × InnerWitOut → OuterWitOut) + (stmtLift : OuterStmtIn → InnerStmtOut → OuterStmtOut) : + Context.Lens OuterStmtIn OuterStmtOut OuterStmtIn InnerStmtOut + OuterWitIn OuterWitOut OuterWitIn InnerWitOut where + wit := Witness.Lens.ofOutputOnly witLift + stmt := Statement.Lens.ofOutputOnly stmtLift + +end Context.Lens + +namespace OracleContext.Lens + +/-- The identity lens for the context, which combines the identity statement and witness lenses. -/ +@[inline, reducible] +protected def id : + OracleContext.Lens OuterStmtIn OuterStmtOut OuterStmtIn OuterStmtOut + OuterOStmtIn OuterOStmtOut OuterOStmtIn OuterOStmtOut + OuterWitIn OuterWitOut OuterWitIn OuterWitOut where + stmt := OracleStatement.Lens.id + wit := Witness.Lens.id + +alias trivial := OracleContext.Lens.id + +/-- Lens for the oracle context which keeps the output contexts the same, and only requires + projections on the statement & witness for the input. -/ +@[inline] +def ofInputOnly + (stmtProj : OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtIn × (∀ i, InnerOStmtIn i)) + (witProj : (OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn → InnerWitIn) : + OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn OuterStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn OuterOStmtOut + OuterWitIn OuterWitOut InnerWitIn OuterWitOut where + stmt := OracleStatement.Lens.ofInputOnly stmtProj + wit := Witness.Lens.ofInputOnly witProj + +/-- Lens for the oracle context which keeps the input contexts the same, and only requires lifts on + the statement & witness for the output. -/ +@[inline] +def ofOutputOnly + (stmtLift : OuterStmtIn × (∀ i, OuterOStmtIn i) → InnerStmtOut × (∀ i, InnerOStmtOut i) → + OuterStmtOut × (∀ i, OuterOStmtOut i)) + (witLift : (OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn → + (InnerStmtOut × (∀ i, InnerOStmtOut i)) × InnerWitOut → OuterWitOut) : + OracleContext.Lens OuterStmtIn OuterStmtOut OuterStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut OuterOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut OuterWitIn InnerWitOut where + stmt := OracleStatement.Lens.ofOutputOnly stmtLift + wit := Witness.Lens.ofOutputOnly witLift + +end OracleContext.Lens + +namespace Extractor.Lens + +/-- The identity lens for the extractor on the witness, given a statement lens. -/ +@[inline, reducible] +def idWit {stmtLens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut} : + Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut OuterWitIn OuterWitOut where + stmt := stmtLens + wit := Witness.InvLens.id + +alias trivialWit := Extractor.Lens.idWit + +end Extractor.Lens + +end SpecialCases diff --git a/ArkLib/OracleReduction/LiftContext/OracleReduction.lean b/ArkLib/OracleReduction/LiftContext/OracleReduction.lean new file mode 100644 index 000000000..689a2ac98 --- /dev/null +++ b/ArkLib/OracleReduction/LiftContext/OracleReduction.lean @@ -0,0 +1,205 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.LiftContext.Reduction + +/-! + ## Lifting Oracle Reductions to Larger Contexts + + This file is a continuation of `LiftContext/Reduction.lean`, where we lift oracle reductions to + larger contexts. + + The only new thing here is the definition of the oracle verifier. The rest (oracle prover + + security properties) are just ported from `LiftContext/Reduction.lean`, with suitable conversions. +-/ + +open OracleSpec OracleComp ProtocolSpec + +open scoped NNReal + +variable {ι : Type} {oSpec : OracleSpec ι} + {OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut : Type} + {Outer_ιₛᵢ : Type} {OuterOStmtIn : Outer_ιₛᵢ → Type} [∀ i, OracleInterface (OuterOStmtIn i)] + {Outer_ιₛₒ : Type} {OuterOStmtOut : Outer_ιₛₒ → Type} [∀ i, OracleInterface (OuterOStmtOut i)] + {Inner_ιₛᵢ : Type} {InnerOStmtIn : Inner_ιₛᵢ → Type} [∀ i, OracleInterface (InnerOStmtIn i)] + {Inner_ιₛₒ : Type} {InnerOStmtOut : Inner_ιₛₒ → Type} [∀ i, OracleInterface (InnerOStmtOut i)] + {InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + +/-- The lifting of the prover from an inner oracle reduction to an outer oracle reduction, requiring + an associated oracle context lens -/ +def OracleProver.liftContext + (lens : OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (P : OracleProver oSpec InnerStmtIn InnerOStmtIn InnerWitIn + InnerStmtOut InnerOStmtOut InnerWitOut pSpec) : + OracleProver oSpec OuterStmtIn OuterOStmtIn OuterWitIn + OuterStmtOut OuterOStmtOut OuterWitOut pSpec := + Prover.liftContext lens.toContext P + +variable [∀ i, OracleInterface (pSpec.Message i)] + +/-- The lifting of the verifier from an inner oracle reduction to an outer oracle reduction, + requiring an associated oracle statement lens -/ +def OracleVerifier.liftContext + (lens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut) + (V : OracleVerifier oSpec InnerStmtIn InnerOStmtIn InnerStmtOut InnerOStmtOut pSpec) : + OracleVerifier oSpec OuterStmtIn OuterOStmtIn OuterStmtOut OuterOStmtOut pSpec where + verify := fun outerStmtIn transcript => sorry + embed := by + have := V.embed + + sorry + hEq := sorry + +/-- The lifting of an inner oracle reduction to an outer oracle reduction, + requiring an associated oracle context lens -/ +def OracleReduction.liftContext + (lens : OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (R : OracleReduction oSpec InnerStmtIn InnerOStmtIn InnerWitIn + InnerStmtOut InnerOStmtOut InnerWitOut pSpec) : + OracleReduction oSpec OuterStmtIn OuterOStmtIn OuterWitIn + OuterStmtOut OuterOStmtOut OuterWitOut pSpec where + prover := R.prover.liftContext lens + verifier := R.verifier.liftContext lens.stmt + +section Execution + +/-- The lifting of the verifier commutes with the conversion from the oracle verifier to the + verifier -/ +theorem OracleVerifier.liftContext_toVerifier_comm + {lens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut} + {V : OracleVerifier oSpec InnerStmtIn InnerOStmtIn InnerStmtOut InnerOStmtOut pSpec} : + (V.liftContext lens).toVerifier = V.toVerifier.liftContext lens := by + sorry + +/-- The lifting of the reduction commutes with the conversion from the oracle reduction to the + reduction -/ +theorem OracleReduction.liftContext_toReduction_comm + {lens : OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {R : OracleReduction oSpec InnerStmtIn InnerOStmtIn InnerWitIn + InnerStmtOut InnerOStmtOut InnerWitOut pSpec} : + (R.liftContext lens).toReduction = R.toReduction.liftContext lens.toContext := by + sorry + +end Execution + +section Security + +variable [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + {outerRelIn : Set ((OuterStmtIn × (∀ i, OuterOStmtIn i)) × OuterWitIn)} + {outerRelOut : Set ((OuterStmtOut × (∀ i, OuterOStmtOut i)) × OuterWitOut)} + {innerRelIn : Set ((InnerStmtIn × (∀ i, InnerOStmtIn i)) × InnerWitIn)} + {innerRelOut : Set ((InnerStmtOut × (∀ i, InnerOStmtOut i)) × InnerWitOut)} + +namespace OracleReduction + +variable + {lens : OracleContext.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {R : OracleReduction oSpec InnerStmtIn InnerOStmtIn InnerWitIn + InnerStmtOut InnerOStmtOut InnerWitOut pSpec} + [lensComplete : lens.toContext.IsComplete outerRelIn innerRelIn outerRelOut innerRelOut + (R.toReduction.compatContext lens.toContext)] + {completenessError : ℝ≥0} + +theorem liftContext_completeness + (h : R.completeness innerRelIn innerRelOut completenessError) : + (R.liftContext lens).completeness outerRelIn outerRelOut completenessError := by + unfold OracleReduction.completeness at h ⊢ + rw [liftContext_toReduction_comm] + exact R.toReduction.liftContext_completeness h (lens := lens.toContext) + +theorem liftContext_perfectCompleteness + (h : R.perfectCompleteness innerRelIn innerRelOut) : + (R.liftContext lens).perfectCompleteness outerRelIn outerRelOut := + liftContext_completeness h + +end OracleReduction + +namespace OracleVerifier + +variable {outerLangIn : Set (OuterStmtIn × (∀ i, OuterOStmtIn i))} + {outerLangOut : Set (OuterStmtOut × (∀ i, OuterOStmtOut i))} + {innerLangIn : Set (InnerStmtIn × (∀ i, InnerOStmtIn i))} + {innerLangOut : Set (InnerStmtOut × (∀ i, InnerOStmtOut i))} + [Inhabited InnerStmtOut] [∀ i, Inhabited (InnerOStmtOut i)] + +/-- Lifting the reduction preserves soundness, assuming the lens satisfies its soundness + conditions -/ +theorem liftContext_soundness + {soundnessError : ℝ≥0} + {lens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut} + (V : OracleVerifier oSpec InnerStmtIn InnerOStmtIn InnerStmtOut InnerOStmtOut pSpec) + [lensSound : lens.IsSound outerLangIn outerLangOut innerLangIn innerLangOut + (V.toVerifier.compatStatement lens)] + (h : V.soundness innerLangIn innerLangOut soundnessError) : + (V.liftContext lens).soundness outerLangIn outerLangOut soundnessError := by + unfold OracleVerifier.soundness at h ⊢ + rw [liftContext_toVerifier_comm] + exact V.toVerifier.liftContext_soundness h (lens := lens) + +theorem liftContext_knowledgeSoundness [Inhabited InnerWitIn] + {knowledgeError : ℝ≥0} + {stmtLens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut} + {witLens : Witness.InvLens (OuterStmtIn × ∀ i, OuterOStmtIn i) + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + (V : OracleVerifier oSpec InnerStmtIn InnerOStmtIn InnerStmtOut InnerOStmtOut pSpec) + [lensKS : Extractor.Lens.IsKnowledgeSound + outerRelIn innerRelIn outerRelOut innerRelOut + (V.toVerifier.compatStatement stmtLens) (fun _ _ => True) ⟨stmtLens, witLens⟩] + (h : V.knowledgeSoundness innerRelIn innerRelOut knowledgeError) : + (V.liftContext stmtLens).knowledgeSoundness outerRelIn outerRelOut + knowledgeError := by + unfold OracleVerifier.knowledgeSoundness at h ⊢ + rw [liftContext_toVerifier_comm] + exact V.toVerifier.liftContext_knowledgeSoundness h (stmtLens := stmtLens) (witLens := witLens) + +theorem liftContext_rbr_soundness + {rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0} + {lens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut} + (V : OracleVerifier oSpec InnerStmtIn InnerOStmtIn InnerStmtOut InnerOStmtOut pSpec) + [lensSound : lens.IsSound + outerLangIn outerLangOut innerLangIn innerLangOut + (V.toVerifier.compatStatement lens)] + (h : V.rbrSoundness innerLangIn innerLangOut rbrSoundnessError) : + (V.liftContext lens).rbrSoundness outerLangIn outerLangOut rbrSoundnessError := by + unfold OracleVerifier.rbrSoundness at h ⊢ + rw [liftContext_toVerifier_comm] + exact V.toVerifier.liftContext_rbr_soundness h (lens := lens) + +theorem liftContext_rbr_knowledgeSoundness [Inhabited InnerWitIn] + {rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0} + {stmtLens : OracleStatement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterOStmtIn OuterOStmtOut InnerOStmtIn InnerOStmtOut} + {witLens : Witness.InvLens (OuterStmtIn × ∀ i, OuterOStmtIn i) + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + (V : OracleVerifier oSpec InnerStmtIn InnerOStmtIn InnerStmtOut InnerOStmtOut pSpec) + [lensKS : Extractor.Lens.IsKnowledgeSound + outerRelIn innerRelIn outerRelOut innerRelOut + (V.toVerifier.compatStatement stmtLens) (fun _ _ => True) ⟨stmtLens, witLens⟩] + (h : V.rbrKnowledgeSoundness innerRelIn innerRelOut rbrKnowledgeError) : + (V.liftContext stmtLens).rbrKnowledgeSoundness outerRelIn outerRelOut + rbrKnowledgeError := by + unfold OracleVerifier.rbrKnowledgeSoundness at h ⊢ + rw [liftContext_toVerifier_comm] + exact V.toVerifier.liftContext_rbr_knowledgeSoundness h + (stmtLens := stmtLens) (witLens := witLens) + +end OracleVerifier + +end Security diff --git a/ArkLib/OracleReduction/LiftContext/Reduction.lean b/ArkLib/OracleReduction/LiftContext/Reduction.lean new file mode 100644 index 000000000..4d77301de --- /dev/null +++ b/ArkLib/OracleReduction/LiftContext/Reduction.lean @@ -0,0 +1,607 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.LiftContext.Lens +import ArkLib.OracleReduction.Security.RoundByRound +-- import ArkLib.OracleReduction.Security.StateRestoration + +/-! + ## Lifting Reductions to Larger Contexts + + Sequential composition is usually not enough to represent oracle reductions in a modular way. We + also need to formalize **virtual** oracle reductions, which lift reductions from one (virtual / + inner) context into the another (real / outer) context. + + This is what is meant when we informally say "apply so-and-so protocol to this quantity (derived + from the input statement & witness)". + + Put in other words, we define a mapping between the input-output interfaces of two (oracle) + reductions, without changing anything about the underlying reductions. + + Recall that the input-output interface of an oracle reduction consists of: + - Input: `OuterStmtIn : Type`, `OuterOStmtIn : ιₛᵢ → Type`, and `OuterWitIn : Type` + - Output: `OuterStmtOut : Type`, `OuterOStmtOut : ιₛₒ → Type`, and `OuterWitOut : Type` + + The liftContext is defined as the following mappings of projections / lifts: + + - `projStmt : OuterStmtIn → InnerStmtIn` + - `projOStmt : (simulation involving OuterOStmtIn to produce InnerOStmtIn)` + - `projWit : OuterWitIn → InnerWitIn` + - `liftStmt : OuterStmtIn × InnerStmtOut → OuterStmtOut` + - `liftOStmt : (simulation involving InnerOStmtOut to produce OuterOStmtOut)` + - `liftWit : OuterWitIn × InnerWitOut → OuterWitOut` + + Note that since completeness & soundness for oracle reductions are defined in terms of the same + properties after converting to (non-oracle) reductions, we only need to focus our efforts on the + non-oracle case. + + Note that this _exactly_ corresponds to lenses in programming languages / category theory. Namely, + liftContext on the inputs correspond to a `view`/`get` operation (our "proj"), while liftContext + on the output corresponds to a `modify`/`set` operation (our "lift"). + + More precisely, the `proj/lift` operations correspond to a Lens between two monomial polyonmial + functors: `OuterCtxIn y^ OuterCtxOut ⇆ InnerCtxIn y^ InnerCtxOut`. + + All the lens definitions are in `Lens.lean`. This file deals with the lens applied to reductions. + See `OracleReduction.lean` for the application to oracle reduction. +-/ + +open OracleSpec OracleComp ProtocolSpec + +open scoped NNReal + +variable {n : ℕ} {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} + {OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut : Type} + {InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut : Type} + +/-- The outer prover after lifting invokes the inner prover on the projected input, and + lifts the output -/ +def Prover.liftContext + (lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec) : + Prover oSpec OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut pSpec where + PrvState := fun i => P.PrvState i × OuterStmtIn × OuterWitIn + input := fun ctxIn => ⟨P.input <| lens.proj ctxIn, ctxIn⟩ + sendMessage := fun i ⟨prvState, stmtIn, witIn⟩ => do + let ⟨msg, prvState'⟩ ← P.sendMessage i prvState + return ⟨msg, ⟨prvState', stmtIn, witIn⟩⟩ + receiveChallenge := fun i ⟨prvState, stmtIn, witIn⟩ chal => + ⟨P.receiveChallenge i prvState chal, stmtIn, witIn⟩ + output := fun ⟨prvState, stmtIn, witIn⟩ => + let ⟨innerStmtOut, innerWitOut⟩ := P.output prvState + lens.lift (stmtIn, witIn) (innerStmtOut, innerWitOut) + +/-- The outer verifier after lifting invokes the inner verifier on the projected input, and + lifts the output -/ +def Verifier.liftContext + (lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) : + Verifier oSpec OuterStmtIn OuterStmtOut pSpec where + verify := fun stmtIn transcript => do + let innerStmtIn := lens.proj stmtIn + let innerStmtOut ← V.verify innerStmtIn transcript + return lens.lift stmtIn innerStmtOut + +/-- The outer reduction after lifting is the combination of the lifting of the prover and + verifier -/ + +def Reduction.liftContext + (lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (R : Reduction oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec) : + Reduction oSpec OuterStmtIn OuterWitIn OuterStmtOut OuterWitOut pSpec where + prover := R.prover.liftContext lens + verifier := R.verifier.liftContext lens.stmt + +open Verifier in +/-- The outer extractor after lifting invokes the inner extractor on the projected input, and + lifts the output -/ +def Extractor.Straightline.liftContext + (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (E : Extractor.Straightline oSpec InnerStmtIn InnerWitIn InnerWitOut pSpec) : + Extractor.Straightline oSpec OuterStmtIn OuterWitIn OuterWitOut pSpec := + fun outerStmtIn outerWitOut fullTranscript proveQueryLog verifyQueryLog => do + let ⟨innerStmtIn, innerWitOut⟩ := lens.proj (outerStmtIn, outerWitOut) + let innerWitIn ← E innerStmtIn innerWitOut fullTranscript proveQueryLog verifyQueryLog + return lens.wit.lift (outerStmtIn, outerWitOut) innerWitIn + +open Verifier in +/-- The outer round-by-round extractor after lifting invokes the inner extractor on the projected + input, and lifts the output -/ +def Extractor.RoundByRound.liftContext + (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (E : Extractor.RoundByRound oSpec InnerStmtIn InnerWitIn pSpec) : + Extractor.RoundByRound oSpec OuterStmtIn OuterWitIn pSpec := + sorry + -- fun roundIdx outerStmtIn fullTranscript proveQueryLog => + -- rbrLensInv.liftWit (E roundIdx (lens.projStmt outerStmtIn) fullTranscript proveQueryLog) + +/-- Compatibility relation between the outer input statement and the inner output statement, +relative to a verifier. + +We require that the inner output statement is a possible output of the verifier on the outer +input statement, for any given transcript. Note that we have to existentially quantify over +transcripts since we only reference the verifier, and there's no way to get the transcript without +a prover. -/ +def Verifier.compatStatement + (lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) : + OuterStmtIn → InnerStmtOut → Prop := + fun outerStmtIn innerStmtOut => + ∃ transcript, innerStmtOut ∈ (V.run (lens.proj outerStmtIn) transcript).support + +/-- Compatibility relation between the outer input context and the inner output context, relative +to a reduction. + +We require that the inner output context (statement + witness) is a possible output of the reduction +on the outer input context (statement + witness). -/ +def Reduction.compatContext + (lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (R : Reduction oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec) : + (OuterStmtIn × OuterWitIn) → (InnerStmtOut × InnerWitOut) → Prop := + fun outerCtxIn innerCtxOut => + innerCtxOut ∈ + Prod.fst '' (R.run (lens.stmt.proj outerCtxIn.1) (lens.wit.proj outerCtxIn)).support + +/-- Compatibility relation between the outer input witness and the inner output witness, relative to + a straightline extractor. + +We require that the inner output witness is a possible output of the straightline extractor on the +outer input witness, for a given input statement, transcript, and prover and verifier's query logs. +-/ +def Extractor.Straightline.compatWit + (lens : Extractor.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut) + (E : Extractor.Straightline oSpec InnerStmtIn InnerWitIn InnerWitOut pSpec) : + OuterStmtIn × OuterWitOut → InnerWitIn → Prop := + fun ⟨outerStmtIn, outerWitOut⟩ innerWitIn => + ∃ stmt tr logP logV, innerWitIn ∈ + (E stmt (lens.wit.proj (outerStmtIn, outerWitOut)) tr logP logV).support + +/-- The outer state function after lifting invokes the inner state function on the projected + input, and lifts the output -/ +def Verifier.StateFunction.liftContext [oSpec.FiniteRange] + (lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut) + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) + (outerLangIn : Set OuterStmtIn) (outerLangOut : Set OuterStmtOut) + (innerLangIn : Set InnerStmtIn) (innerLangOut : Set InnerStmtOut) + [lensSound : lens.IsSound outerLangIn outerLangOut innerLangIn innerLangOut + (V.compatStatement lens)] + (stF : V.StateFunction innerLangIn innerLangOut) : + (V.liftContext lens).StateFunction outerLangIn outerLangOut +where + toFun := fun m outerStmtIn transcript => + stF m (lens.proj outerStmtIn) transcript + toFun_empty := fun stmt hStmt => + stF.toFun_empty (lens.proj stmt) (lensSound.proj_sound stmt hStmt) + toFun_next := fun m hDir outerStmtIn transcript hStmt msg => + stF.toFun_next m hDir (lens.proj outerStmtIn) transcript hStmt msg + toFun_full := fun outerStmtIn transcript hStmt => by + have h := stF.toFun_full (lens.proj outerStmtIn) transcript hStmt + simp [Verifier.run, Verifier.liftContext] at h ⊢ + intro innerStmtOut hSupport + apply lensSound.lift_sound + · simp [compatStatement]; exact ⟨transcript, hSupport⟩ + · exact h innerStmtOut hSupport + +section Theorems + +/- Theorems about liftContext interacting with reduction execution and security properties -/ + +namespace Prover + +/- Breaking down the intertwining of liftContext and prover execution -/ + +/-- Lifting the prover intertwines with the process round function -/ +theorem liftContext_processRound + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {i : Fin n} + {P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec} + {resultRound : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) + (pSpec.Transcript i.castSucc × (P.liftContext lens).PrvState i.castSucc)} : + (P.liftContext lens).processRound i resultRound + = do + let ⟨transcript, prvState, outerStmtIn, outerWitIn⟩ ← resultRound + let ⟨newTranscript, newPrvState⟩ ← P.processRound i (do return ⟨transcript, prvState⟩) + return ⟨newTranscript, ⟨newPrvState, outerStmtIn, outerWitIn⟩⟩ := by + unfold processRound liftContext + simp + congr 1; funext + split <;> simp + +/-- Lemma needed for the proof of `liftContext_runToRound` -/ +private lemma bind_map_processRound_pure {i : Fin n} + (P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec) + (comp : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) + (pSpec.Transcript i.castSucc × P.PrvState i.castSucc)) + {α : Type} (f : pSpec.Transcript i.succ × P.PrvState i.succ → α) : + (do let result ← comp; f <$> P.processRound i (pure result)) + = f <$> P.processRound i comp := by + simp [processRound] + +theorem liftContext_runToRound + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} {i : Fin (n + 1)} + (P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec) : + (P.liftContext lens).runToRound i outerStmtIn outerWitIn + = do + let ⟨transcript, prvState⟩ ← + (P.runToRound i).uncurry (lens.proj (outerStmtIn, outerWitIn)) + return ⟨transcript, ⟨prvState, outerStmtIn, outerWitIn⟩⟩ := by + unfold runToRound Function.uncurry + induction i using Fin.induction with + | zero => simp [liftContext] + | succ i ih => simp [liftContext_processRound, ih, bind_map_processRound_pure] + +-- Requires more lemmas about `simulateQ` for logging oracles +theorem liftContext_runWithLogToRound + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} {i : Fin (n + 1)} + (P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec) : + (P.liftContext lens).runWithLogToRound i outerStmtIn outerWitIn + = do + let ⟨transcript, prvState, queryLog⟩ ← + (P.runWithLogToRound i).uncurry (lens.proj (outerStmtIn, outerWitIn)) + return ⟨transcript, ⟨prvState, outerStmtIn, outerWitIn⟩, queryLog⟩ := by + unfold runWithLogToRound + induction i using Fin.induction with + | zero => simp [liftContext, Function.uncurry] + | succ i ih => simp [liftContext_runToRound, Function.uncurry] + +/-- Running the lifted outer prover is equivalent to running the inner prover on the projected + input, and then integrating the output -/ +theorem liftContext_run + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} + {P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec} : + (P.liftContext lens).run outerStmtIn outerWitIn + = do + let ⟨innerCtxOut, fullTranscript⟩ ← + P.run.uncurry (lens.proj (outerStmtIn, outerWitIn)) + return ⟨lens.lift (outerStmtIn, outerWitIn) innerCtxOut, + fullTranscript⟩ := by + simp only [run, liftContext_runToRound] + simp [liftContext, Function.uncurry] + +/- Lifting the prover intertwines with the runWithLog function -/ +theorem liftContext_runWithLog + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} + {P : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec} : + (P.liftContext lens).runWithLog outerStmtIn outerWitIn + = do + let ⟨innerCtxOut, fullTranscript, queryLog⟩ ← + P.runWithLog.uncurry (lens.proj (outerStmtIn, outerWitIn)) + return ⟨lens.lift (outerStmtIn, outerWitIn) innerCtxOut, + fullTranscript, + queryLog⟩ := by + simp only [runWithLog, liftContext_runWithLogToRound] + simp [liftContext, Function.uncurry] + +end Prover + +namespace Reduction + +theorem liftContext_run + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} + {R : Reduction oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec} : + (R.liftContext lens).run outerStmtIn outerWitIn = do + let ⟨innerCtxOut, verInnerStmtOut, fullTranscript⟩ ← + R.run.uncurry (lens.proj (outerStmtIn, outerWitIn)) + return ⟨lens.lift (outerStmtIn, outerWitIn) innerCtxOut, + lens.stmt.lift outerStmtIn verInnerStmtOut, + fullTranscript⟩ := by + unfold run + simp [liftContext, Prover.liftContext_run, Verifier.liftContext, Verifier.run, Function.uncurry] + +theorem liftContext_runWithLog + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + {outerStmtIn : OuterStmtIn} {outerWitIn : OuterWitIn} + {R : Reduction oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec} : + (R.liftContext lens).runWithLog outerStmtIn outerWitIn = do + let ⟨innerCtxOut, verInnerStmtOut, fullTranscript, queryLog⟩ ← + R.runWithLog.uncurry (lens.proj (outerStmtIn, outerWitIn)) + return ⟨lens.lift (outerStmtIn, outerWitIn) innerCtxOut, + lens.stmt.lift outerStmtIn verInnerStmtOut, + fullTranscript, + queryLog⟩ := by + unfold runWithLog + simp [liftContext, Prover.liftContext_runWithLog, Verifier.liftContext, Verifier.run] + +end Reduction + +variable [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + {outerRelIn : Set (OuterStmtIn × OuterWitIn)} {outerRelOut : Set (OuterStmtOut × OuterWitOut)} + {innerRelIn : Set (InnerStmtIn × InnerWitIn)} {innerRelOut : Set (InnerStmtOut × InnerWitOut)} + +namespace Reduction + +variable + {R : Reduction oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec} + {completenessError : ℝ≥0} + {lens : Context.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut + OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + [lensComplete : lens.IsComplete outerRelIn innerRelIn outerRelOut innerRelOut + (R.compatContext lens)] + +/-- Lifting the reduction preserves completeness, assuming the lens satisfies its completeness + conditions +-/ +theorem liftContext_completeness + (h : R.completeness innerRelIn innerRelOut completenessError) : + (R.liftContext lens).completeness outerRelIn outerRelOut completenessError := by + unfold completeness at h ⊢ + intro outerStmtIn outerWitIn hRelIn + have hR := h (lens.stmt.proj outerStmtIn) (lens.wit.proj (outerStmtIn, outerWitIn)) + (lensComplete.proj_complete _ _ hRelIn) + rw [Reduction.liftContext_run] + refine le_trans hR ?_ + simp + refine probEvent_mono ?_ + intro ⟨innerContextOut, a, b⟩ hSupport ⟨hRelOut, hRelOut'⟩ + have : innerContextOut ∈ + Prod.fst <$> + (R.run (lens.stmt.proj outerStmtIn) (lens.wit.proj (outerStmtIn, outerWitIn))).support := by + simp + exact ⟨a, b, hSupport⟩ + simp_all + rw [← hRelOut'] at hRelOut ⊢ + refine lensComplete.lift_complete _ _ _ _ ?_ hRelIn hRelOut + simp [compatContext]; exact this + +theorem liftContext_perfectCompleteness + (h : R.perfectCompleteness innerRelIn innerRelOut) : + (R.liftContext lens).perfectCompleteness outerRelIn outerRelOut := by + exact liftContext_completeness h + +-- Can't turn the above into an instance because Lean needs to synthesize `innerRelIn` and +-- `innerRelOut` out of thin air. + +-- instance [Reduction.IsComplete innerRelIn innerRelOut R completenessError] : +-- R.liftContext.IsComplete outerRelIn outerRelOut completenessError := +-- ⟨R.liftContext.completeness⟩ + +-- instance [R.IsPerfectComplete relIn relOut] : +-- R.liftContext.IsPerfectComplete relIn relOut := +-- ⟨fun _ => R.liftContext.perfectCompleteness _ _ _⟩ + +end Reduction + +namespace Verifier + +/-- Lifting the reduction preserves soundness, assuming the lens satisfies its soundness + conditions -/ +theorem liftContext_soundness [Inhabited InnerStmtOut] + {outerLangIn : Set OuterStmtIn} {outerLangOut : Set OuterStmtOut} + {innerLangIn : Set InnerStmtIn} {innerLangOut : Set InnerStmtOut} + {soundnessError : ℝ≥0} + {lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut} + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) + -- TODO: figure out the right compatibility relation for the IsSound condition + [lensSound : lens.IsSound outerLangIn outerLangOut innerLangIn innerLangOut + (V.compatStatement lens)] + (h : V.soundness innerLangIn innerLangOut soundnessError) : + (V.liftContext lens).soundness outerLangIn outerLangOut soundnessError := by + unfold soundness Reduction.run at h ⊢ + -- Note: there is no distinction between `Outer` and `Inner` here + intro WitIn WitOut outerWitIn outerP outerStmtIn hOuterStmtIn + simp at h ⊢ + have innerP : Prover oSpec InnerStmtIn WitIn InnerStmtOut WitOut pSpec := { + PrvState := outerP.PrvState + input := fun _ => outerP.input (outerStmtIn, outerWitIn) + sendMessage := outerP.sendMessage + receiveChallenge := outerP.receiveChallenge + output := fun state => + let ⟨outerStmtOut, outerWitOut⟩ := outerP.output state + ⟨default, outerWitOut⟩ + } + have : lens.proj outerStmtIn ∉ innerLangIn := by + apply lensSound.proj_sound + exact hOuterStmtIn + have hSound := h WitIn WitOut outerWitIn innerP (lens.proj outerStmtIn) this + refine le_trans ?_ hSound + simp [Verifier.liftContext, Verifier.run] + -- Need to massage the two `probEvent`s so that they have the same base computation `oa` + -- Then apply `lensSound.lift_sound`? + sorry + +/- + Lifting the reduction preserves knowledge soundness, assuming the lens satisfies its knowledge + soundness conditions + + Note: since knowledge soundness is defined existentially in terms of the extractor, we also cannot + impose any meaningful compatibility conditions on the witnesses (outer output & inner input), + hence `compatWit` field is just always true + + (future extensions may define lifting relative to a particular extractor, if needed) +-/ +theorem liftContext_knowledgeSoundness [Inhabited InnerStmtOut] [Inhabited InnerWitIn] + {outerRelIn : Set (OuterStmtIn × OuterWitIn)} {outerRelOut : Set (OuterStmtOut × OuterWitOut)} + {innerRelIn : Set (InnerStmtIn × InnerWitIn)} {innerRelOut : Set (InnerStmtOut × InnerWitOut)} + {knowledgeError : ℝ≥0} + {stmtLens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut} + {witLens : Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) + [lensKS : Extractor.Lens.IsKnowledgeSound outerRelIn innerRelIn outerRelOut innerRelOut + (V.compatStatement stmtLens) (fun _ _ => True) ⟨stmtLens, witLens⟩] + (h : V.knowledgeSoundness innerRelIn innerRelOut knowledgeError) : + (V.liftContext stmtLens).knowledgeSoundness outerRelIn outerRelOut + knowledgeError := by + unfold knowledgeSoundness at h ⊢ + obtain ⟨E, h'⟩ := h + refine ⟨E.liftContext ⟨stmtLens, witLens⟩, ?_⟩ + intro outerStmtIn outerWitIn outerP + simp [Extractor.Straightline.liftContext] + let innerP : Prover oSpec InnerStmtIn InnerWitIn InnerStmtOut InnerWitOut pSpec := + { + PrvState := outerP.PrvState + input := fun _ => outerP.input (outerStmtIn, outerWitIn) + sendMessage := outerP.sendMessage + receiveChallenge := outerP.receiveChallenge + output := fun state => + let ⟨outerStmtOut, outerWitOut⟩ := outerP.output state + ⟨default, witLens.proj (outerStmtIn, outerWitOut)⟩ + } + have h_innerP_input {innerStmtIn} {innerWitIn} : + innerP.input (innerStmtIn, innerWitIn) = outerP.input (outerStmtIn, outerWitIn) := rfl + simp at h' + have hR := h' (stmtLens.proj outerStmtIn) default innerP + simp at hR + simp [Reduction.runWithLog, Verifier.liftContext, Verifier.run] at hR ⊢ + have h_innerP_runWithLog {innerStmtIn} {innerWitIn} : + innerP.runWithLog innerStmtIn innerWitIn + = do + let ⟨⟨_, outerWitOut⟩, rest⟩ ← outerP.runWithLog outerStmtIn outerWitIn + return ⟨⟨default, witLens.proj (outerStmtIn, outerWitOut)⟩, rest⟩ := by + sorry + refine le_trans ?_ hR + -- Massage the two `probEvent`s so that they have the same base computation `oa`? + simp [h_innerP_runWithLog] + -- apply probEvent_mono ?_ + sorry + +/- + Lifting the reduction preserves round-by-round soundness, assuming the lens satisfies its + soundness conditions +-/ +theorem liftContext_rbr_soundness [Inhabited InnerStmtOut] + {outerLangIn : Set OuterStmtIn} {outerLangOut : Set OuterStmtOut} + {innerLangIn : Set InnerStmtIn} {innerLangOut : Set InnerStmtOut} + {rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0} + {lens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut} + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) + -- TODO: figure out the right compatibility relation for the IsSound condition + [lensSound : lens.IsSound outerLangIn outerLangOut innerLangIn innerLangOut + (V.compatStatement lens)] + (h : V.rbrSoundness innerLangIn innerLangOut rbrSoundnessError) : + (V.liftContext lens).rbrSoundness outerLangIn outerLangOut rbrSoundnessError := by + unfold rbrSoundness at h ⊢ + obtain ⟨stF, h⟩ := h + simp at h ⊢ + refine ⟨stF.liftContext lens (lensSound := lensSound), ?_⟩ + intro outerStmtIn hOuterStmtIn WitIn WitOut witIn outerP roundIdx hDir + have innerP : Prover oSpec InnerStmtIn WitIn InnerStmtOut WitOut pSpec := { + PrvState := outerP.PrvState + input := fun _ => outerP.input (outerStmtIn, witIn) + sendMessage := outerP.sendMessage + receiveChallenge := outerP.receiveChallenge + output := fun state => + let ⟨outerStmtOut, outerWitOut⟩ := outerP.output state + ⟨default, outerWitOut⟩ + } + have h' := h (lens.proj outerStmtIn) (lensSound.proj_sound _ hOuterStmtIn) + WitIn WitOut witIn innerP roundIdx hDir + refine le_trans ?_ h' + sorry + +/- + Lifting the reduction preserves round-by-round knowledge soundness, assuming the lens + satisfies its knowledge soundness conditions +-/ +theorem liftContext_rbr_knowledgeSoundness [Inhabited InnerStmtOut] [Inhabited InnerWitIn] + {outerRelIn : Set (OuterStmtIn × OuterWitIn)} {outerRelOut : Set (OuterStmtOut × OuterWitOut)} + {innerRelIn : Set (InnerStmtIn × InnerWitIn)} {innerRelOut : Set (InnerStmtOut × InnerWitOut)} + {rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0} + {stmtLens : Statement.Lens OuterStmtIn OuterStmtOut InnerStmtIn InnerStmtOut} + {witLens : Witness.InvLens OuterStmtIn OuterWitIn OuterWitOut InnerWitIn InnerWitOut} + (V : Verifier oSpec InnerStmtIn InnerStmtOut pSpec) + [lensKS : Extractor.Lens.IsKnowledgeSound outerRelIn innerRelIn outerRelOut innerRelOut + (V.compatStatement stmtLens) (fun _ _ => True) ⟨stmtLens, witLens⟩] + (h : V.rbrKnowledgeSoundness innerRelIn innerRelOut rbrKnowledgeError) : + (V.liftContext stmtLens).rbrKnowledgeSoundness outerRelIn outerRelOut + rbrKnowledgeError := by + unfold rbrKnowledgeSoundness at h ⊢ + obtain ⟨stF, E, h⟩ := h + simp at h ⊢ + -- refine ⟨stF.liftContext (lens := lens.toStatement.Lens) + -- (lensSound := lensKnowledgeSound.toSound), + -- ?_, ?_⟩ + sorry + +end Verifier + +end Theorems + +section Test + +open Polynomial + +-- Testing out sum-check-like relations + +noncomputable section + +def OuterStmtIn_Test := ℤ[X] × ℤ[X] × ℤ +def InnerStmtIn_Test := ℤ[X] × ℤ + +@[simp] +def outerRelIn_Test : Set (OuterStmtIn_Test × Unit) := + setOf (fun ⟨⟨p, q, t⟩, _⟩ => ∑ x ∈ {0, 1}, (p * q).eval x = t) +@[simp] +def innerRelIn_Test : Set (InnerStmtIn_Test × Unit) := + setOf (fun ⟨⟨f, t⟩, _⟩ => ∑ x ∈ {0, 1}, f.eval x = t) + +def OuterStmtOut_Test := ℤ[X] × ℤ[X] × ℤ × ℤ +def InnerStmtOut_Test := ℤ[X] × ℤ × ℤ + +@[simp] +def outerRelOut_Test : Set (OuterStmtOut_Test × Unit) := + setOf (fun ⟨⟨p, q, t, r⟩, _⟩ => (p * q).eval r = t) +@[simp] +def innerRelOut_Test : Set (InnerStmtOut_Test × Unit) := + setOf (fun ⟨⟨f, t, r⟩, _⟩ => f.eval r = t) + +@[simp] +def testStmtLens : + Statement.Lens OuterStmtIn_Test OuterStmtOut_Test InnerStmtIn_Test InnerStmtOut_Test := + ⟨fun ⟨p, q, t⟩ => ⟨p * q, t⟩, fun ⟨p, q, _⟩ ⟨_, t', u⟩ => (p, q, t', u)⟩ + +@[simp] +def testLens : Context.Lens OuterStmtIn_Test OuterStmtOut_Test InnerStmtIn_Test InnerStmtOut_Test + Unit Unit Unit Unit where + stmt := testStmtLens + wit := Witness.Lens.id + +@[simp] +def testLensE : Extractor.Lens OuterStmtIn_Test OuterStmtOut_Test InnerStmtIn_Test InnerStmtOut_Test + Unit Unit Unit Unit where + stmt := testStmtLens + wit := Witness.InvLens.id + +instance instTestLensComplete : testLens.IsComplete + outerRelIn_Test innerRelIn_Test outerRelOut_Test innerRelOut_Test + (fun ⟨⟨p, q, _⟩, _⟩ ⟨⟨f, _⟩, _⟩ => p * q = f) where + proj_complete := fun ⟨p, q, t⟩ () hRelIn => by simp_all + lift_complete := fun ⟨p, q, t⟩ _ ⟨f, t', r⟩ _ hCompat hRelIn hRelOut' => by + simp_all only [outerRelIn_Test, eval_mul, Finset.mem_singleton, zero_ne_one, + not_false_eq_true, Finset.sum_insert, Finset.sum_singleton, Set.mem_setOf_eq, + innerRelOut_Test, outerRelOut_Test, testLens, testStmtLens] + simp [← hRelOut', ← hCompat] + +def instTestLensKnowledgeSound : testLensE.IsKnowledgeSound + outerRelIn_Test innerRelIn_Test outerRelOut_Test innerRelOut_Test + (fun ⟨p, q, _⟩ ⟨f, _⟩ => p * q = f) (fun _ _ => True) where + proj_knowledgeSound := fun ⟨p, q, t⟩ ⟨f, t', r⟩ _ h h' => by + simp_all only [outerRelOut_Test, eval_mul, Statement.Lens.lift, + testLensE, testStmtLens, Set.mem_setOf_eq, innerRelOut_Test] + simp [← h', ← h] + lift_knowledgeSound := fun ⟨p, q, t⟩ _ _ _ _ => by + simp_all + +end + +end Test diff --git a/ArkLib/OracleReduction/OracleInterface.lean b/ArkLib/OracleReduction/OracleInterface.lean index e81c699bf..5824b543f 100644 --- a/ArkLib/OracleReduction/OracleInterface.lean +++ b/ArkLib/OracleReduction/OracleInterface.lean @@ -5,6 +5,7 @@ Authors: Quang Dao -/ import VCVio +import ArkLib.ToVCVio.SimOracle import ArkLib.Data.MvPolynomial.Notation import Mathlib.Algebra.Polynomial.Roots -- import ArkLib.Data.MlPoly.Basic @@ -27,120 +28,61 @@ import Mathlib.Algebra.Polynomial.Roots - Vectors. This instance turns vectors into oracles for which one can query specific positions. -/ -/-- `⊕ᵥ` is notation for `Sum.elim`, e.g. sending `α → γ` and `β → γ` to `α ⊕ β → γ`. -/ -infixr:35 " ⊕ᵥ " => Sum.elim +universe u v w open OracleComp OracleSpec OracleQuery -variable {ι ιₜ : Type} - -@[reducible] -def SimOracle.Stateful (spec : OracleSpec ι) (specₜ : OracleSpec ιₜ) (σ : Type) := - QueryImpl spec (StateT σ (OracleComp specₜ)) - -@[reducible] -def SimOracle.Stateless (spec : OracleSpec ι) (specₜ : OracleSpec ιₜ) := - QueryImpl spec (OracleComp specₜ) - -@[reducible] -def SimOracle.Impl (spec : OracleSpec ι) := QueryImpl spec Option - -namespace SimOracle - -variable {ι₁ ι₂ ιₜ₁ ιₜ₂ : Type} {spec : OracleSpec ι} {spec₁ : OracleSpec ι₁} - {spec₂ : OracleSpec ι₂} {specₜ : OracleSpec ιₜ} {specₜ₁ : OracleSpec ιₜ₁} - {specₜ₂ : OracleSpec ιₜ₂} {σ τ α β : Type} - -variable [DecidableEq ι] - -open OracleSpec - -def fnOracle (spec : OracleSpec ι) (f : (i : ι) → spec.domain i → spec.range i) : - SimOracle.Impl spec where - impl | query i t => f i t - -def statelessOracle (baseSpec : OracleSpec ιₜ) (spec : OracleSpec ι) - (f : (i : ι) → spec.domain i → spec.range i) : - SimOracle.Stateless (baseSpec ++ₒ spec) baseSpec where - impl - | query (.inl i) t => query i t - | query (.inr i) t => pure (f i t) - --- instance : (loggingOracle (spec := spec)).IsTracking where --- state_indep | query _ _, _ => rfl - -def append' (so₁ : SimOracle.Stateful spec₁ specₜ₁ σ) (so₂ : SimOracle.Stateful spec₂ specₜ₂ τ) : - SimOracle.Stateful (spec₁ ++ₒ spec₂) (specₜ₁ ++ₒ specₜ₂) (σ × τ) where - impl - | query (.inl i) t => fun (s₁, s₂) ↦ do - let (u, s₁') ← so₁.impl (query i t) s₁; return (u, s₁', s₂) - | query (.inr i) t => fun (s₁, s₂) ↦ do - let (u, s₂') ← so₂.impl (query i t) s₂; return (u, s₁, s₂') - -def dedup {ι : Type} (spec : OracleSpec ι) : SimOracle.Stateless (spec ++ₒ spec) spec where - impl - | query (.inl i) t => query i t - | query (.inr i) t => query i t - --- theorem append'_dedup (so₁ : SimOracle spec₁ specₜ σ) (so₂ : SimOracle spec₂ specₜ τ) : --- append so₁ so₂ = (dedup specₜ ∘ₛ append' so₁ so₂).equivState (.prodPUnit _) := by --- sorry - --- /-- Answer all oracle queries to `oSpec` with a deterministic function `f` having the same domain --- and range as `oSpec`. -/ --- def fnOracle {ι : Type} (spec : OracleSpec ι) --- (f : (i : ι) → spec.domain i → spec.range i) : SimOracle spec []ₒ PUnit := --- statelessOracle fun (query i q) ↦ pure (f i q) - -def lift {ι₁ ι₂ ι : Type} {σ : Type} (oSpec₁ : OracleSpec ι₁) (oSpec₂ : OracleSpec ι₂) - (oSpec : OracleSpec ι) (so : SimOracle.Stateful oSpec₁ oSpec₂ σ) : - SimOracle.Stateful (oSpec ++ₒ oSpec₁) (oSpec ++ₒ oSpec₂) σ where - impl := fun q s => match q with - | query (.inl i) q => do return ⟨← query i q, s⟩ - | query (.inr i) q => so.impl (query (spec := oSpec₁) i q) s - --- def liftLeft' {ι₁ ι₂ ι : Type} {σ : Type} {oSpec₁ : OracleSpec ι₁} {oSpec₂ : OracleSpec ι₂} --- (oSpec : OracleSpec ι) (so : SimOracle oSpec₁ oSpec₂ σ) : --- SimOracle (oSpec ++ₒ oSpec₁) (oSpec ++ₒ oSpec₂) σ := --- (append' idOracle so).equivState (.punitProd σ) - -def liftLeftNil {ι : Type} {σ : Type} (oSpec : OracleSpec ι) : - SimOracle.Stateful ([]ₒ ++ₒ oSpec) oSpec σ where impl - | query (.inr i) q => fun s ↦ do return ⟨← query i q, s⟩ - -def liftRightNil {ι : Type} {σ : Type} (oSpec : OracleSpec ι) : - SimOracle.Stateful (oSpec ++ₒ []ₒ) oSpec σ where impl - | query (.inl i) q => fun s ↦ do return ⟨← query i q, s⟩ - -end SimOracle - /-- `OracleInterface` is a type class that provides an oracle interface for a type `Message`. It consists of a query type `Query`, a response type `Response`, and a function `oracle` that - transforms a message `m : Message` into a function `Query → Response`. -/ + transforms a message `m : Message` into a function `Query → Response`. + + TODO: turn `(Query, Response)` into a general `PFunctor` (i.e. `Response : Query → Type`) This + allows for better compositionality of `OracleInterface`, including (indexed) sum, instead of + requiring indexed family of `OracleInterface`s. + + However, this won't be possible until `OracleSpec` is changed to be an alias for `PFunctor` -/ @[ext] -class OracleInterface (Message : Type) where - Query : Type - Response : Type +class OracleInterface (Message : Type u) where + Query : Type v + Response : Type w oracle : Message → Query → Response namespace OracleInterface +/-- The default instance for `OracleInterface`, where the query is trivial (a `Unit`) and the + response returns the data. We do not register this as an instance, instead explicitly calling it + where necessary. +-/ +def instDefault {Message : Type u} : OracleInterface Message where + Query := Unit + Response := Message + oracle := fun m _ => m + +instance {Message : Type u} : Inhabited (OracleInterface Message) := + ⟨instDefault⟩ + open SimOracle /-- Converts an indexed type family of oracle interfaces into an oracle specification. -/ -def toOracleSpec {ι : Type} (v : ι → Type) [O : ∀ i, OracleInterface (v i)] : +def toOracleSpec {ι : Type u} (v : ι → Type v) [O : ∀ i, OracleInterface (v i)] : OracleSpec ι := fun i => ((O i).Query, (O i).Response) @[inherit_doc] notation "[" term "]ₒ" => toOracleSpec term -instance {ι : Type} (v : ι → Type) [O : ∀ i, OracleInterface (v i)] +/-- Given an underlying data for an indexed type family of oracle interfaces `v`, + we can give an implementation of all queries to the interface defined by `v` -/ +def toOracleImpl {ι : Type u} (v : ι → Type v) [O : ∀ i, OracleInterface (v i)] + (data : ∀ i, v i) : QueryImpl [v]ₒ Id where + impl | query i t => (O i).oracle (data i) t + +instance {ι : Type u} (v : ι → Type v) [O : ∀ i, OracleInterface (v i)] [h : ∀ i, DecidableEq (Query (v i))] [h' : ∀ i, DecidableEq (Response (v i))] : [v]ₒ.DecidableEq where domain_decidableEq' := h range_decidableEq' := h' -instance {ι : Type} (v : ι → Type) [O : ∀ i, OracleInterface (v i)] +instance {ι : Type u} (v : ι → Type v) [O : ∀ i, OracleInterface (v i)] [h : ∀ i, Fintype (Response (v i))] [h' : ∀ i, Inhabited (Response (v i))] : [v]ₒ.FiniteRange where @@ -148,16 +90,78 @@ instance {ι : Type} (v : ι → Type) [O : ∀ i, OracleInterface (v i)] range_inhabited' := h' @[reducible, inline] -instance {ι₁ : Type} {T₁ : ι₁ → Type} [∀ i, OracleInterface (T₁ i)] - {ι₂ : Type} {T₂ : ι₂ → Type} [∀ i, OracleInterface (T₂ i)] : - ∀ i, OracleInterface (Sum.elim T₁ T₂ i) := +instance {ι₁ : Type u} {T₁ : ι₁ → Type v} [inst₁ : ∀ i, OracleInterface (T₁ i)] + {ι₂ : Type u} {T₂ : ι₂ → Type v} [inst₂ : ∀ i, OracleInterface (T₂ i)] : + ∀ i, OracleInterface (Sum.rec T₁ T₂ i) := fun i => match i with - | .inl i => by dsimp; infer_instance - | .inr i => by dsimp; infer_instance + | .inl i => inst₁ i + | .inr i => inst₂ i + +/-- The tensor product oracle interface for the product of two types `α` and `β`, each with its own + oracle interface, is defined as: + - The query & response types are the product of the two query & response types. + - The oracle will run both oracles and return the pair of responses. + +This is a low priority instance since we do not expect to have this behavior often. See `instProd` +for the sum behavior on the interface. -/ +@[reducible, inline] +instance (priority := low) instTensorProd {α β : Type*} + [Oα : OracleInterface α] [Oβ : OracleInterface β] : OracleInterface (α × β) where + Query := Oα.Query × Oβ.Query + Response := Oα.Response × Oβ.Response + oracle := fun (a, b) (q₁, q₂) => (Oα.oracle a q₁, Oβ.oracle b q₂) + +/-- The product oracle interface for the product of two types `α` and `β`, each with its own oracle + interface, is defined as: + - The query & response types are the sum type of the two query & response types. + - The oracle will answer depending on the input query. + +This is the behavior more often assumed, i.e. when we send multiple oracle messages in a round. +See `instTensor` for the tensor product behavior on the interface. -/ +@[reducible, inline] +instance instProd {α β : Type*} [Oα : OracleInterface α] [Oβ : OracleInterface β] : + OracleInterface (α × β) where + Query := Oα.Query ⊕ Oβ.Query + Response := Oα.Response ⊕ Oβ.Response + oracle := fun (a, b) q => match q with + | .inl q => .inl (Oα.oracle a q) + | .inr q => .inr (Oβ.oracle b q) + +/-- The indexed tensor product oracle interface for the dependent product of a type family `v`, + indexed by `ι`, each having an oracle interface, is defined as: + - The query & response types are the dependent product of the query & response types of the type + family. + - The oracle, on a given query specifying the index `i` of the type family, will run the oracle + of `v i` and return the response. + +This is a low priority instance since we do not expect to have this behavior often. See `instForall` +for the product behavior on the interface (with dependent sums for the query and response types). -/ +@[reducible, inline] +instance (priority := low) instTensorForall {ι : Type u} (v : ι → Type v) + [O : ∀ i, OracleInterface (v i)] : OracleInterface (∀ i, v i) where + Query := (i : ι) → (O i).Query + Response := (i : ι) → (O i).Response + oracle := fun f q i => (O i).oracle (f i) (q i) + +/-- The indexed product oracle interface for the dependent product of a type family `v`, indexed by + `ι`, each having an oracle interface, is defined as: + - The query & response types are the dependent product of the query & response types of the type + family. + - The oracle, on a given query specifying the index `i` of the type family, will run the oracle + of `v i` and return the response. + +This is the behavior usually assumed, i.e. when we send multiple oracle messages in a round. +See `instTensorForall` for the tensor product behavior on the interface. -/ +@[reducible, inline] +instance instForall {ι : Type u} (v : ι → Type v) [O : ∀ i, OracleInterface (v i)] : + OracleInterface (∀ i, v i) where + Query := (i : ι) × (O i).Query + Response := (i : ι) × (O i).Response + oracle := fun f ⟨i, q⟩ => ⟨i, (O i).oracle (f i) q⟩ -def append {ι₁ : Type} {T₁ : ι₁ → Type} [∀ i, OracleInterface (T₁ i)] - {ι₂ : Type} {T₂ : ι₂ → Type} [∀ i, OracleInterface (T₂ i)] : OracleSpec (ι₁ ⊕ ι₂) := - [Sum.elim T₁ T₂]ₒ +def append {ι₁ : Type u} {T₁ : ι₁ → Type v} [∀ i, OracleInterface (T₁ i)] + {ι₂ : Type u} {T₂ : ι₂ → Type v} [∀ i, OracleInterface (T₂ i)] : OracleSpec (ι₁ ⊕ ι₂) := + [Sum.rec T₁ T₂]ₒ /-- Combines multiple oracle specifications into a single oracle by routing queries to the appropriate underlying oracle. Takes: @@ -165,10 +169,10 @@ def append {ι₁ : Type} {T₁ : ι₁ → Type} [∀ i, OracleInterface (T₁ - An indexed type family `T` with `OracleInterface` instances - Values of that type family Returns a stateless oracle that routes queries to the appropriate underlying oracle. -/ -def simOracle {ι : Type} (oSpec : OracleSpec ι) {ι' : Type} {T : ι' → Type} +def simOracle {ι : Type u} (oSpec : OracleSpec ι) {ι' : Type v} {T : ι' → Type w} [∀ i, OracleInterface (T i)] (t : (i : ι') → T i) : SimOracle.Stateless (oSpec ++ₒ [T]ₒ) oSpec := - SimOracle.statelessOracle _ _ (fun i q => oracle (t i) q) + idOracle ++ₛₒ (fnOracle [T]ₒ (fun i => oracle (t i))) /-- Combines multiple oracle specifications into a single oracle by routing queries to the appropriate underlying oracle. Takes: @@ -176,13 +180,14 @@ def simOracle {ι : Type} (oSpec : OracleSpec ι) {ι' : Type} {T : ι' → Type - Two indexed type families `T₁` and `T₂` with `OracleInterface` instances - Values of those type families Returns a stateless oracle that routes queries to the appropriate underlying oracle. -/ -def simOracle2 {ι : Type} (oSpec : OracleSpec ι) - {ι₁ : Type} {T₁ : ι₁ → Type} [∀ i, OracleInterface (T₁ i)] - {ι₂ : Type} {T₂ : ι₂ → Type} [∀ i, OracleInterface (T₂ i)] +def simOracle2 {ι : Type u} (oSpec : OracleSpec ι) + {ι₁ : Type v} {T₁ : ι₁ → Type w} [∀ i, OracleInterface (T₁ i)] + {ι₂ : Type v} {T₂ : ι₂ → Type w} [∀ i, OracleInterface (T₂ i)] (t₁ : ∀ i, T₁ i) (t₂ : ∀ i, T₂ i) : SimOracle.Stateless (oSpec ++ₒ ([T₁]ₒ ++ₒ [T₂]ₒ)) oSpec := - SimOracle.statelessOracle _ _ (fun i q => match i with - | .inl i => oracle (t₁ i) q - | .inr i => oracle (t₂ i) q) + idOracle ++ₛₒ + fnOracle ([T₁]ₒ ++ₒ [T₂]ₒ) (fun i => match i with + | .inl i => oracle (t₁ i) + | .inr i => oracle (t₂ i)) open Finset in /-- A message type together with a `OracleInterface` instance is said to have **oracle distance** @@ -194,7 +199,7 @@ open Finset in This property corresponds to the distance of a code, when the oracle instance is to encode the message and the query is a position of the codeword. In particular, it applies to `(Mv)Polynomial`. -/ -def distanceLE (Message : Type) [O : OracleInterface Message] +def distanceLE (Message : Type*) [O : OracleInterface Message] [Fintype (O.Query)] [DecidableEq (O.Response)] (d : ℕ) : Prop := ∀ a b : Message, a ≠ b → #{q | OracleInterface.oracle a q = OracleInterface.oracle b q} ≤ d @@ -205,7 +210,7 @@ section Polynomial open Polynomial MvPolynomial -variable {R : Type} [CommSemiring R] {d : ℕ} {σ : Type} +variable {R : Type*} [CommSemiring R] {d : ℕ} {σ : Type*} /-- Univariate polynomials can be accessed via evaluation queries. -/ @[reducible, inline] @@ -252,7 +257,7 @@ section PolynomialDistance open Polynomial MvPolynomial -variable {R : Type} [CommRing R] {d : ℕ} [Fintype R] [DecidableEq R] [IsDomain R] +variable {R : Type*} [CommRing R] {d : ℕ} [Fintype R] [DecidableEq R] [IsDomain R] -- TODO: golf this theorem @[simp] @@ -261,7 +266,7 @@ theorem distanceLE_polynomial_degreeLT : OracleInterface.distanceLE (R⦃< d⦄[ intro p hp p' hp' hNe have : ∀ q ∈ Finset.univ, p.eval q = p'.eval q ↔ q ∈ (p - p').roots := by intro q _ - simp [mem_roots] + simp constructor <;> intro h · constructor · intro h'; contrapose! hNe; exact sub_eq_zero.mp h' @@ -304,7 +309,7 @@ theorem distanceLE_polynomial_degreeLE : OracleInterface.distanceLE (R⦃≤ d obtain ⟨x, hMem, hx⟩ := this exact ⟨x, hMem, fun h => by simp_all⟩ -theorem distanceLE_mvPolynomial_degreeLE {σ : Type} [Fintype σ] [DecidableEq σ] : +theorem distanceLE_mvPolynomial_degreeLE {σ : Type*} [Fintype σ] [DecidableEq σ] : OracleInterface.distanceLE (R⦃≤ d⦄[X σ]) (Fintype.card σ * d) := by simp [OracleInterface.distanceLE, instOracleInterfaceMvPolynomialDegreeLE, MvPolynomial.mem_restrictDegree] @@ -315,7 +320,7 @@ end PolynomialDistance section Vector -variable {n : ℕ} {α : Type} +variable {n : ℕ} {α : Type*} /-- Vectors of the form `Fin n → α` can be accessed via queries on their indices. -/ instance instOracleInterfaceForallFin : OracleInterface (Fin n → α) where @@ -339,15 +344,14 @@ end Vector section Test -variable {ι : Type} {spec : OracleSpec ι} {R : Type} [CommSemiring R] +variable {ι : Type u} {spec : OracleSpec ι} {R : Type*} [CommSemiring R] open Polynomial OracleInterface SimOracle OracleSpec in theorem poly_query_list_mapM {m : ℕ} (D : Fin m ↪ R) (p : R[X]) : simulateQ (simOracle spec (fun _ : Unit => p)) (List.finRange m |>.mapM (fun i => query (spec := [fun _ : Unit => R[X]]ₒ) () (D i))) = (pure (List.finRange m |>.map (fun i => p.eval (D i))) : OracleComp spec (List R)) := by - simp [simOracle, OracleSpec.SubSpec.liftM_query_eq_liftM_liftM, StateT.run'_eq, - simulateQ, StateT.run] + simp [simOracle, OracleSpec.SubSpec.liftM_query_eq_liftM_liftM, simulateQ] sorry end Test diff --git a/ArkLib/OracleReduction/Prelude.lean b/ArkLib/OracleReduction/Prelude.lean index 9479e32ac..d9d70d290 100644 --- a/ArkLib/OracleReduction/Prelude.lean +++ b/ArkLib/OracleReduction/Prelude.lean @@ -5,6 +5,7 @@ Authors: Quang Dao -/ import VCVio +import Batteries.Data.Vector.Lemmas /-! # Prelude for Interactive (Oracle) Reductions @@ -14,6 +15,15 @@ import VCVio open OracleComp +-- -- Notation for sums (maybe not needed?) +-- @[inherit_doc] postfix:max "↪ₗ" => Sum.inl +-- @[inherit_doc] postfix:max "↪ᵣ" => Sum.inr + +/-- `⊕ᵥ` is notation for `Sum.rec`, the dependent elimination of `Sum. + +This sends `(a : α) → γ (.inl a)` and `(b : β) → γ (.inr b)` to `(a : α ⊕ β) → γ a`. -/ +infixr:35 " ⊕ᵥ " => Sum.rec + -- Figure out where to put this instance instance instDecidableEqOption {α : Type*} [DecidableEq α] : DecidableEq (Option α) := inferInstance @@ -25,9 +35,22 @@ class VCVCompatible (α : Type*) extends Fintype α, Inhabited α where instance {α : Type*} [VCVCompatible α] : DecidableEq α := VCVCompatible.type_decidableEq' +-- TODO: port first to batteries, second to mathlib + +@[simp] +theorem Vector.ofFn_get {α : Type*} {n : ℕ} (v : Vector α n) : Vector.ofFn (Vector.get v) = v := by + ext + simp [getElem] + +def Equiv.rootVectorEquivFin {α : Type*} {n : ℕ} : Vector α n ≃ (Fin n → α) := + ⟨Vector.get, Vector.ofFn, Vector.ofFn_get, fun f => funext <| Vector.get_ofFn f⟩ + +instance Vector.instFintype {α : Type*} {n : ℕ} [VCVCompatible α] : Fintype (Vector α n) := + Fintype.ofEquiv _ (Equiv.rootVectorEquivFin).symm + instance {α : Type*} {n : ℕ} [VCVCompatible α] : VCVCompatible (Fin n → α) where -instance {α : Type*} {n : ℕ} [VCVCompatible α] : VCVCompatible (Vector α n) := sorry +instance {α : Type*} {n : ℕ} [VCVCompatible α] : VCVCompatible (Vector α n) where /-- `Sampleable` extends `VCVCompabible` with `SelectableType` -/ class Sampleable (α : Type) extends VCVCompatible α, SelectableType α @@ -53,24 +76,37 @@ instance : Coe (Fin 2) Direction := ⟨directionEquivFin2.invFun⟩ section Relation -/-- The associated language `Set α` for a relation `α → β → Prop`. -/ -def Function.language {α β} (rel : α → β → Prop) : Set α := - {stmt | ∃ wit, rel stmt wit} +/-- The associated language `Set α` for a relation `Set (α × β)`. -/ +@[reducible] +def Set.language {α β} (rel : Set (α × β)) : Set α := + Prod.fst '' rel + +@[simp] +theorem Set.mem_language_iff {α β} (rel : Set (α × β)) (stmt : α) : + stmt ∈ rel.language ↔ ∃ wit, (stmt, wit) ∈ rel := by + simp [language] + +@[simp] +theorem Set.not_mem_language_iff {α β} (rel : Set (α × β)) (stmt : α) : + stmt ∉ rel.language ↔ ∀ wit, (stmt, wit) ∉ rel := by + simp [language] /-- The trivial relation on Boolean statement and unit witness, which outputs the Boolean (i.e. accepts or rejects). -/ -def acceptRejectRel : Bool → Unit → Prop := fun b _ => b +def acceptRejectRel : Set (Bool × Unit) := + { (true, ()) } /-- The trivial relation on Boolean statement, no oracle statements, and unit witness. -/ -def acceptRejectOracleRel : Bool × (∀ _ : Empty, Unit) → Unit → Prop := fun ⟨b, _⟩ _ => b +def acceptRejectOracleRel : Set ((Bool × (∀ _ : Empty, Unit)) × Unit) := + { ((true, isEmptyElim), ()) } @[simp] theorem acceptRejectRel_language : acceptRejectRel.language = { true } := by - unfold Function.language acceptRejectRel; simp + unfold Set.language acceptRejectRel; simp @[simp] theorem acceptRejectOracleRel_language : - acceptRejectOracleRel.language = { ⟨true, isEmptyElim⟩ } := by - unfold Function.language acceptRejectOracleRel; simp; ext; aesop + acceptRejectOracleRel.language = { (true, isEmptyElim) } := by + unfold Set.language acceptRejectOracleRel; simp end Relation diff --git a/ArkLib/OracleReduction/ProtocolSpec.lean b/ArkLib/OracleReduction/ProtocolSpec.lean new file mode 100644 index 000000000..2624513d0 --- /dev/null +++ b/ArkLib/OracleReduction/ProtocolSpec.lean @@ -0,0 +1,482 @@ +/- +Copyright (c) 2024 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.Data.Fin.Basic +import ArkLib.OracleReduction.Prelude +import ArkLib.OracleReduction.OracleInterface + +/-! +# Protocol Specifications for (Oracle) Reductions + +This file defines the `ProtocolSpec` type, which is used to specify the protocol between the prover +and the verifier. +-/ + +open OracleComp OracleSpec + +/-- Type signature for an interactive protocol, with `n` messages exchanged. -/ +@[reducible] +def ProtocolSpec (n : ℕ) := Fin n → Direction × Type + +variable {n : ℕ} + +namespace ProtocolSpec + +@[simp] +abbrev getDir (pSpec : ProtocolSpec n) (i : Fin n) := pSpec i |>.1 + +@[simp] +abbrev getType (pSpec : ProtocolSpec n) (i : Fin n) := pSpec i |>.2 + +/-- Subtype of `Fin n` for the indices corresponding to messages in a protocol specification -/ +@[reducible, simp] +def MessageIdx (pSpec : ProtocolSpec n) := + {i : Fin n // (pSpec i).1 = Direction.P_to_V} + +/-- Subtype of `Fin n` for the indices corresponding to challenges in a protocol specification -/ +@[reducible, simp] +def ChallengeIdx (pSpec : ProtocolSpec n) := + {i : Fin n // (pSpec i).1 = Direction.V_to_P} + +/-- Subtype of `Fin k` for the indices corresponding to messages in a protocol specification up to + round `k` -/ +@[reducible, simp] +def MessageIdxUpTo (k : Fin (n + 1)) (pSpec : ProtocolSpec n) := + {i : Fin k // (pSpec <| i.castLE (by omega)).1 = Direction.P_to_V} + +/-- Subtype of `Fin k` for the indices corresponding to challenges in a protocol specification up to + round `k` -/ +@[reducible, simp] +def ChallengeIdxUpTo (k : Fin (n + 1)) (pSpec : ProtocolSpec n) := + {i : Fin k // (pSpec <| i.castLE (by omega)).1 = Direction.V_to_P} + +instance {pSpec : ProtocolSpec n} : CoeHead (MessageIdx pSpec) (Fin n) where + coe := fun i => i.1 +instance {pSpec : ProtocolSpec n} : CoeHead (ChallengeIdx pSpec) (Fin n) where + coe := fun i => i.1 + +/-- The type of the `i`-th message in a protocol specification. + +This does not distinguish between messages received in full or as an oracle. -/ +@[reducible, inline, specialize, simp] +def Message (pSpec : ProtocolSpec n) (i : MessageIdx pSpec) := (pSpec i.val).2 + +/-- The type of the `i`-th challenge in a protocol specification -/ +@[reducible, inline, specialize, simp] +def Challenge (pSpec : ProtocolSpec n) (i : ChallengeIdx pSpec) := (pSpec i.val).2 + +/-- The type of all messages in a protocol specification. Uncurried version of `Message`. -/ +@[reducible, inline, specialize] +def Messages (pSpec : ProtocolSpec n) : Type := ∀ i, pSpec.Message i + +/-- The type of all challenges in a protocol specification -/ +@[reducible, inline, specialize] +def Challenges (pSpec : ProtocolSpec n) : Type := ∀ i, pSpec.Challenge i + +/-- The indexed family of messages from the prover up to round `k`. + +Note that by definition, `MessagesUpTo (Fin.last n)` is definitionally equal to `Messages`. -/ +@[reducible, inline, specialize] +def MessagesUpTo (k : Fin (n + 1)) (pSpec : ProtocolSpec n) := + (i : pSpec.MessageIdxUpTo k) → pSpec.Message ⟨i.val.castLE (by omega), i.property⟩ + +/-- The indexed family of challenges from the verifier up to round `k`. + +Note that by definition, `ChallengesUpTo (Fin.last n)` is definitionally equal to `Challenges`. -/ +@[reducible, inline, specialize] +def ChallengesUpTo (k : Fin (n + 1)) (pSpec : ProtocolSpec n) := + (i : pSpec.ChallengeIdxUpTo k) → pSpec.Challenge ⟨i.val.castLE (by omega), i.property⟩ + +section Instances + +/-- There is only one protocol specification with 0 messages (the empty one) -/ +instance : Unique (ProtocolSpec 0) := inferInstance + +-- Two different ways to write the empty protocol specification: `![]` and `default` + +instance : ∀ i, VCVCompatible (Challenge ![] i) := fun ⟨i, _⟩ => Fin.elim0 i +instance : ∀ i, OracleInterface (Message ![] i) := fun ⟨i, _⟩ => Fin.elim0 i + +instance : ∀ i, VCVCompatible ((default : ProtocolSpec 0).Challenge i) := fun ⟨i, _⟩ => Fin.elim0 i +instance : ∀ i, OracleInterface ((default : ProtocolSpec 0).Message i) := fun ⟨i, _⟩ => Fin.elim0 i + +variable {Msg Chal : Type} + +instance : IsEmpty (ChallengeIdx ![(.P_to_V, Msg)]) := by + simp [ChallengeIdx] + infer_instance + +instance : Unique (MessageIdx ![(.P_to_V, Msg)]) where + default := ⟨0, by simp⟩ + uniq := fun i => by ext; simp + +instance [inst : OracleInterface Msg] : ∀ i, OracleInterface (Message ![(.P_to_V, Msg)] i) + | ⟨0, _⟩ => inst + +instance : ∀ i, VCVCompatible (Challenge ![(.P_to_V, Msg)] i) + | ⟨0, h⟩ => nomatch h + +instance : IsEmpty (MessageIdx ![(.V_to_P, Chal)]) := by + simp [MessageIdx] + infer_instance + +instance : Unique (ChallengeIdx ![(.V_to_P, Chal)]) where + default := ⟨0, by simp⟩ + uniq := fun i => by ext; simp + +instance : ∀ i, OracleInterface (Message ![(.V_to_P, Chal)] i) + | ⟨0, h⟩ => nomatch h + +instance [inst : VCVCompatible Chal] : ∀ i, VCVCompatible (Challenge ![(.V_to_P, Chal)] i) + | ⟨0, _⟩ => inst + +end Instances + +variable {pSpec : ProtocolSpec n} + +namespace Messages + +/-- Take the messages up to round `k` -/ +def take (k : Fin (n + 1)) (messages : Messages pSpec) : MessagesUpTo k pSpec := + fun i => messages ⟨i.val.castLE (by omega), i.property⟩ + +end Messages + +namespace Challenges + +/-- Take the challenges up to round `k` -/ +def take (k : Fin (n + 1)) (challenges : Challenges pSpec) : ChallengesUpTo k pSpec := + fun i => challenges ⟨i.val.castLE (by omega), i.property⟩ + +end Challenges + +namespace MessagesUpTo + +/-- There is only one transcript for the empty protocol, + represented as `default : ProtocolSpec 0` -/ +instance {k : Fin 1} : Unique (MessagesUpTo k (default : ProtocolSpec 0)) where + default := fun i => () + uniq := by solve_by_elim + +/-- There is only one transcript for the empty protocol, represented as `![]` -/ +instance {k : Fin 1} : Unique (MessagesUpTo k ![]) where + default := fun ⟨⟨i, h⟩, _⟩ => by + have : k = 0 := Fin.fin_one_eq_zero k + subst this; simp at h + uniq := fun _ => by + ext ⟨⟨i, h⟩, _⟩ + have : k = 0 := Fin.fin_one_eq_zero k + subst this; simp at h + +/-- There is only one transcript for any protocol specification with cutoff index 0 -/ +instance : Unique (MessagesUpTo 0 pSpec) where + default := fun ⟨i, _⟩ => Fin.elim0 i + uniq := fun T => by ext ⟨i, _⟩; exact Fin.elim0 i + +/-- Concatenate the `k`-th message to the end of the tuple of messages up to round `k`, assuming + round `k` is a message round. -/ +def concat {k : Fin n} (messages : MessagesUpTo k.castSucc pSpec) + (h : (pSpec k).1 = .P_to_V) (msg : pSpec.Message ⟨k, h⟩) : MessagesUpTo k.succ pSpec := + fun i => if hi : i.1.1 < k then messages ⟨⟨i.1.1, hi⟩, i.property⟩ else + (by simp [Fin.eq_last_of_not_lt hi]; exact msg) + +/-- Extend the tuple of messages up to round `k` to up to round `k + 1`, assuming round `k` is a + challenge round (so no message from the prover is sent). -/ +def extend {k : Fin n} (messages : MessagesUpTo k.castSucc pSpec) + (h : (pSpec k).1 = .V_to_P) : MessagesUpTo k.succ pSpec := + fun i => if hi : i.1.1 < k then messages ⟨⟨i.1.1, hi⟩, i.property⟩ else + -- contradiction proof + (by + haveI := Fin.eq_last_of_not_lt hi + haveI := i.property + simp_all [Fin.castLE]) + +end MessagesUpTo + +namespace ChallengesUpTo + +/-- There is only one transcript for the empty protocol, + represented as `default : ProtocolSpec 0` -/ +instance {k : Fin 1} : Unique (ChallengesUpTo k (default : ProtocolSpec 0)) where + default := fun i => () + uniq := by solve_by_elim + +/-- There is only one transcript for the empty protocol, represented as `![]` -/ +instance {k : Fin 1} : Unique (ChallengesUpTo k ![]) where + default := fun ⟨⟨i, h⟩, _⟩ => by + have : k = 0 := Fin.fin_one_eq_zero k + subst this; simp at h + uniq := fun _ => by + ext ⟨⟨i, h⟩, _⟩ + have : k = 0 := Fin.fin_one_eq_zero k + subst this; simp at h + +/-- There is only one transcript for any protocol specification with cutoff index 0 -/ +instance : Unique (ChallengesUpTo 0 pSpec) where + default := fun ⟨i, _⟩ => Fin.elim0 i + uniq := fun T => by ext ⟨i, _⟩; exact Fin.elim0 i + +/-- Concatenate the `k`-th challenge to the end of the tuple of challenges up to round `k`, assuming + round `k` is a challenge round. -/ +def concat {k : Fin n} (challenges : ChallengesUpTo k.castSucc pSpec) + (h : (pSpec k).1 = .V_to_P) (chal : pSpec.Challenge ⟨k, h⟩) : ChallengesUpTo k.succ pSpec := + fun i => if hi : i.1.1 < k then challenges ⟨⟨i.1.1, hi⟩, i.property⟩ else + (by simp [Fin.eq_last_of_not_lt hi]; exact chal) + +/-- Extend the tuple of challenges up to round `k` to up to round `k + 1`, assuming round `k` is a + message round (so no challenge from the verifier is sent). -/ +def extend {k : Fin n} (challenges : ChallengesUpTo k.castSucc pSpec) + (h : (pSpec k).1 = .P_to_V) : ChallengesUpTo k.succ pSpec := + fun i => if hi : i.1.1 < k then challenges ⟨⟨i.1.1, hi⟩, i.property⟩ else + -- contradiction proof + (by + haveI := Fin.eq_last_of_not_lt hi + haveI := i.property + simp_all [Fin.castLE]) + +end ChallengesUpTo + +/-- A (partial) transcript of a protocol specification, indexed by some `k : Fin (n + 1)`, is a +list of messages from the protocol for all indices `i` less than `k`. + +Note that by definition, `Transcript (Fin.last n) pSpec` is definitionally equal to +`FullTranscript pSpec`. -/ +@[reducible, inline, specialize] +def Transcript (k : Fin (n + 1)) (pSpec : ProtocolSpec n) := + (i : Fin k) → pSpec.getType (Fin.castLE (by omega) i) + +/-- The full transcript of an interactive protocol, which is a list of messages and challenges. + +Note that this is definitionally equal to `Transcript (Fin.last n) pSpec`. -/ +@[reducible, inline, specialize] +def FullTranscript (pSpec : ProtocolSpec n) := (i : Fin n) → pSpec.getType i + +namespace FullTranscript + +@[reducible, inline, specialize] +def messages (transcript : FullTranscript pSpec) (i : MessageIdx pSpec) := + transcript i.val + +@[reducible, inline, specialize] +def challenges (transcript : FullTranscript pSpec) (i : ChallengeIdx pSpec) := + transcript i.val + +/-- There is only one full transcript (the empty one) for an empty protocol -/ +instance : Unique (FullTranscript (default : ProtocolSpec 0)) := inferInstance + +end FullTranscript + +/-! ### Restriction of Protocol Specifications & Transcripts -/ + +section Restrict + +variable {n : ℕ} + +/-- Take the first `m ≤ n` rounds of a `ProtocolSpec n` -/ +abbrev take (m : ℕ) (h : m ≤ n) (pSpec : ProtocolSpec n) := Fin.take m h pSpec + +/-- Take the last `m ≤ n` rounds of a `ProtocolSpec n` -/ +abbrev rtake (m : ℕ) (h : m ≤ n) (pSpec : ProtocolSpec n) := Fin.rtake m h pSpec + +/-- Take the first `m ≤ n` rounds of a (full) transcript for a protocol specification `pSpec` -/ +abbrev FullTranscript.take {pSpec : ProtocolSpec n} (m : ℕ) (h : m ≤ n) + (transcript : FullTranscript pSpec) : FullTranscript (pSpec.take m h) := + Fin.take m h transcript + +/-- Take the last `m ≤ n` rounds of a (full) transcript for a protocol specification `pSpec` -/ +abbrev FullTranscript.rtake {pSpec : ProtocolSpec n} (m : ℕ) (h : m ≤ n) + (transcript : FullTranscript pSpec) : FullTranscript (pSpec.rtake m h) := + Fin.rtake m h transcript + +end Restrict + +namespace Transcript + +/-- There is only one transcript for the empty protocol -/ +instance {k : Fin 1} : Unique (Transcript k (default : ProtocolSpec 0)) where + default := fun i => () + uniq := by solve_by_elim + +/-- There is only one transcript for the empty protocol, represented as `![]` -/ +instance {k : Fin 1} : Unique (Transcript k ![]) where + default := fun ⟨i, h⟩ => by + have : k = 0 := Fin.fin_one_eq_zero k + subst this; simp at h + uniq := fun _ => by + ext ⟨i, h⟩ + have : k = 0 := Fin.fin_one_eq_zero k + subst this; simp at h + +/-- There is only one transcript for any protocol with cutoff index 0 -/ +instance : Unique (Transcript 0 pSpec) where + default := fun i => Fin.elim0 i + uniq := fun T => by ext i; exact Fin.elim0 i + +-- Potential natural re-indexing of messages and challenges. +-- Not needed for now, but could be useful. + +-- instance instFinEnumMessageIdx : FinEnum pSpec.MessageIdx := +-- FinEnum.Subtype.finEnum fun x ↦ pSpec.getDir x = Direction.P_to_V +-- instance instFinEnumChallengeIdx : FinEnum pSpec.ChallengeIdx := +-- FinEnum.Subtype.finEnum fun x ↦ pSpec.getDir x = Direction.V_to_P + +/-- Concatenate a message to the end of a partial transcript. This is definitionally equivalent to + `Fin.snoc`. -/ +@[inline] +abbrev concat {m : Fin n} (msg : pSpec.getType m) + (T : Transcript m.castSucc pSpec) : Transcript m.succ pSpec := Fin.snoc T msg + +-- Define conversions to and from `Transcript` with `MessagesUpTo` and `ChallengesUpTo` + +variable {k : Fin (n + 1)} + +/-- Extract messages from a transcript up to round `k` -/ +def toMessagesUpTo (transcript : Transcript k pSpec) : MessagesUpTo k pSpec := + fun ⟨i, _⟩ => transcript i + +/-- Extract challenges from a transcript up to round `k` -/ +def toChallengesUpTo (transcript : Transcript k pSpec) : ChallengesUpTo k pSpec := + fun ⟨i, _⟩ => transcript i + +def toMessagesChallenges (transcript : Transcript k pSpec) : + MessagesUpTo k pSpec × ChallengesUpTo k pSpec := + (transcript.toMessagesUpTo, transcript.toChallengesUpTo) + +def ofMessagesChallenges (messages : MessagesUpTo k pSpec) + (challenges : ChallengesUpTo k pSpec) : Transcript k pSpec := + fun i => match h : pSpec.getDir (i.castLE (by omega)) with + | Direction.P_to_V => messages ⟨i.castLE (by omega), h⟩ + | Direction.V_to_P => challenges ⟨i.castLE (by omega), h⟩ + +/-- An equivalence between transcripts up to round `k` and the tuple of messages and challenges up + to round `k`. -/ +@[simps!] +def equivMessagesChallenges : + Transcript k pSpec ≃ (MessagesUpTo k pSpec × ChallengesUpTo k pSpec) where + toFun := toMessagesChallenges + invFun := ofMessagesChallenges.uncurry + left_inv := fun T => by + ext i + simp [ofMessagesChallenges, toMessagesChallenges, toMessagesUpTo, toChallengesUpTo] + split <;> simp + right_inv := fun ⟨messages, challenges⟩ => by + ext i + · have : (pSpec <| i.val.castLE (by omega)).1 = Direction.P_to_V := i.property + simp [ofMessagesChallenges, toMessagesChallenges, toMessagesUpTo] + split <;> aesop + · have : (pSpec <| i.val.castLE (by omega)).1 = Direction.V_to_P := i.property + simp [ofMessagesChallenges, toMessagesChallenges, toChallengesUpTo] + split <;> aesop + +-- TODO: state theorem that `Transcript.concat` is equivalent to `MessagesUpTo.{concat/extend}` with +-- `ChallengesUpTo.{extend/concat}`, depending on the direction of the round + +end Transcript + +/-- The specification of whether each message in a protocol specification is available in full + (`None`) or received as an oracle (`Some (instOracleInterface (pSpec.Message i))`). + + This is defined as a type class for notational convenience. -/ +class OracleInterfaces (pSpec : ProtocolSpec n) where + oracleInterfaces : ∀ i, Option (OracleInterface (pSpec.Message i)) + +section OracleInterfaces + +variable (pSpec : ProtocolSpec n) [inst : OracleInterfaces pSpec] + +/-- Subtype of `pSpec.MessageIdx` for messages that are received as oracles -/ +@[reducible, inline, specialize] +def OracleMessageIdx := {i : pSpec.MessageIdx // (inst.oracleInterfaces i).isSome } + +/-- The oracle interface instances for messages that are received as oracles -/ +instance {i : OracleMessageIdx pSpec} : OracleInterface (pSpec.Message i) := + (inst.oracleInterfaces i).get i.2 + +/-- Subtype of `pSpec.MessageIdx` for messages that are received in full -/ +@[reducible, inline, specialize] +def PlainMessageIdx := {i : pSpec.MessageIdx // (inst.oracleInterfaces i).isNone } + +/-- The type of messages that are received in full -/ +@[reducible, inline, specialize] +def PlainMessage (i : pSpec.PlainMessageIdx) := pSpec.Message i.1 + +/-- The type of messages that are received as oracles -/ +@[reducible, inline, specialize] +def OracleMessage (i : pSpec.OracleMessageIdx) := pSpec.Message i.1 + +def PlainMessages (pSpec : ProtocolSpec n) [OracleInterfaces pSpec] : Type := + ∀ i, pSpec.PlainMessage i + +def OracleMessages (pSpec : ProtocolSpec n) [OracleInterfaces pSpec] : Type := + ∀ i, pSpec.OracleMessage i + +-- TODO: re-define `OracleReduction` to depend on these oracle interfaces, since currently we +-- assume that _all_ messages are available as oracles in an oracle reduction + +-- Alternatively, we can define a `HybridReduction` structure, where the oracle interface for each +-- message is optional, that can be specialized to `OracleReduction` and `Reduction` + +end OracleInterfaces + +/-- Turn each verifier's challenge into an oracle, where querying a unit type gives back the + challenge -/ +@[reducible, inline, specialize] +instance instChallengeOracleInterface {pSpec : ProtocolSpec n} {i : pSpec.ChallengeIdx} : + OracleInterface (pSpec.Challenge i) where + Query := Unit + Response := pSpec.Challenge i + oracle := fun c _ => c + +/-- Query a verifier's challenge for a given challenge round `i`, given the "trivial" challenge +oracle interface -/ +@[reducible, inline, specialize] +def getChallenge (pSpec : ProtocolSpec n) (i : pSpec.ChallengeIdx) : + OracleComp [pSpec.Challenge]ₒ (pSpec.Challenge i) := + (query i () : OracleQuery [pSpec.Challenge]ₒ (pSpec.Challenge i)) + +/-- Turn each verifier's challenge into an oracle, where one needs to query + with an input statement and prior messages up to that round to get a challenge -/ +@[reducible, inline, specialize] +def instChallengeOracleInterfaceFiatShamir {pSpec : ProtocolSpec n} {i : pSpec.ChallengeIdx} + {StmtIn : Type} : OracleInterface (pSpec.Challenge i) where + Query := StmtIn × pSpec.MessagesUpTo i.1.castSucc + Response := pSpec.Challenge i + oracle := fun c _ => c + +/-- The oracle interface for Fiat-Shamir. + +This is the (inefficient) version where we hash the input statement and the entire transcript up to +the point of deriving a new challenge. + +Some variants of Fiat-Shamir takes in a salt each round. We assume that such salts are included in +the input statement (i.e. we can always transform a given reduction into one where every round has a +random salt). -/ +@[inline, reducible] +def srChallengeOracle (Statement : Type) {n : ℕ} (pSpec : ProtocolSpec n) : + OracleSpec pSpec.ChallengeIdx := + fun i => (Statement × pSpec.MessagesUpTo i.1.castSucc, pSpec.Challenge i) + +alias fiatShamirSpec := srChallengeOracle + +instance {pSpec : ProtocolSpec n} {Statement : Type} [∀ i, VCVCompatible (pSpec.Challenge i)] : + OracleSpec.FiniteRange (srChallengeOracle Statement pSpec) where + range_inhabited' := fun i => by simp [OracleSpec.range]; infer_instance + range_fintype' := fun i => by simp [OracleSpec.range]; infer_instance + +instance {pSpec : ProtocolSpec n} {Statement : Type} [∀ i, VCVCompatible (pSpec.Challenge i)] : + OracleSpec.FiniteRange (fiatShamirSpec Statement pSpec) := + inferInstanceAs (OracleSpec.FiniteRange (srChallengeOracle Statement pSpec)) + +end ProtocolSpec + +-- Notation for the type signature of an interactive protocol +notation "𝒫——⟦" term "⟧⟶𝒱" => (Direction.P_to_V, term) +notation "𝒫⟵⟦" term "⟧——𝒱" => (Direction.V_to_P, term) + +-- Test notation +def pSpecNotationTest : ProtocolSpec 2 := + ![ 𝒫——⟦ Polynomial (ZMod 101) ⟧⟶𝒱, + 𝒫⟵⟦ ZMod 101 ⟧——𝒱] diff --git a/ArkLib/OracleReduction/Security/Basic.lean b/ArkLib/OracleReduction/Security/Basic.lean index c460336e2..c608e2f2c 100644 --- a/ArkLib/OracleReduction/Security/Basic.lean +++ b/ArkLib/OracleReduction/Security/Basic.lean @@ -1,5 +1,5 @@ /- -Copyright (c) 2024 ArkLib Contributors. All rights reserved. +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ @@ -9,20 +9,19 @@ import ArkLib.OracleReduction.Execution /-! # Security Definitions for (Oracle) Reductions - We define the following security properties for (oracle) reductions: + This file defines basic security notions for (oracle) reductions: - (Perfect) Completeness - - (Knowledge) Soundness: We define many variants of soundness and knowledge soundness, including - - (Standard) soundness - - State-restoration soundness - - Round-by-round soundness - All definitions are in the adaptive prover setting. + - (Straightline) (Knowledge) Soundness - - Zero-knowledge: This will be defined in the future + - (Honest-verifier) Zero-knowledge For each security notion, we provide a typeclass for it, so that security can be synthesized automatically with verified transformations. + + See other files in the same directory for more refined soundness notions (i.e. state-restoration, + round-by-round, rewinding, etc.) -/ noncomputable section @@ -30,68 +29,54 @@ noncomputable section open OracleComp OracleSpec ProtocolSpec open scoped NNReal -section find_home - -universe u w - --- #print probEvent - --- #print probEvent_def - --- theorem probEvent_not {ι : Type u} {spec : OracleSpec ι} {α : Type w} {oa : OracleComp spec α} --- [spec.FiniteRange] {p : α → Prop} : [p | oa] ≤ 1 - [fun a => ¬ p a | oa] := by --- simp - -end find_home - -variable {n : ℕ} {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] - {StmtIn WitIn StmtOut WitOut : Type} +variable {ι : Type} {oSpec : OracleSpec ι} [oSpec.FiniteRange] namespace Reduction section Completeness -/-- - A reduction satisfies **completeness** with error `completenessError ≥ 0` and with respect to - input relation `relIn` and output relation `relOut`, if for all valid statement-witness pair - `(stmtIn, witIn)` for `relIn`, the execution between the honest prover and the honest verifier - will result in a tuple `((prvStmtOut, witOut), stmtOut)` such that +variable {StmtIn WitIn StmtOut WitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} [∀ i, VCVCompatible (pSpec.Challenge i)] + +/-- A reduction satisfies **completeness** with error `completenessError ≥ 0` and with respect to + input relation `relIn` and output relation `relOut` (represented as sets), if for all valid + statement-witness pair `(stmtIn, witIn) ∈ relIn`, the execution between the honest prover + and the honest verifier will result in a tuple `((prvStmtOut, witOut), stmtOut)` such that - - `relOut stmtOut witOut = True`, (the output statement-witness pair is valid) and + - `(stmtOut, witOut) ∈ relOut`, (the output statement-witness pair is valid) and - `prvStmtOut = stmtOut`, (the output statements are the same from both prover and verifier) except with probability `completenessError`. -/ -def completeness (relIn : StmtIn → WitIn → Prop) - (relOut : StmtOut → WitOut → Prop) - (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) +def completeness (relIn : Set (StmtIn × WitIn)) + (relOut : Set (StmtOut × WitOut)) + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) (completenessError : ℝ≥0) : Prop := ∀ stmtIn : StmtIn, ∀ witIn : WitIn, - relIn stmtIn witIn → - [fun ⟨(prvStmtOut, witOut), stmtOut, _⟩ => relOut stmtOut witOut ∧ prvStmtOut = stmtOut + (stmtIn, witIn) ∈ relIn → + [fun ⟨(prvStmtOut, witOut), stmtOut, _⟩ => (stmtOut, witOut) ∈ relOut ∧ prvStmtOut = stmtOut | reduction.run stmtIn witIn] ≥ 1 - completenessError /-- A reduction satisfies **perfect completeness** if it satisfies completeness with error `0`. -/ -def perfectCompleteness (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) : Prop := +def perfectCompleteness (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) : Prop := completeness relIn relOut reduction 0 /-- Type class for completeness for a reduction -/ -class IsComplete (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) +class IsComplete (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) where completenessError : ℝ≥0 is_complete : completeness relIn relOut reduction completenessError /-- Type class for perfect completeness for a reduction -/ -class IsPerfectComplete (reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) - (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) where +class IsPerfectComplete (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec) where is_perfect_complete : perfectCompleteness relIn relOut reduction -variable {relIn : StmtIn → WitIn → Prop} {relOut : StmtOut → WitOut → Prop} - {reduction : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut} +variable {relIn : Set (StmtIn × WitIn)} {relOut : Set (StmtOut × WitOut)} + {reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut pSpec} instance [reduction.IsPerfectComplete relIn relOut] : IsComplete relIn relOut reduction where completenessError := 0 @@ -102,8 +87,8 @@ instance [reduction.IsPerfectComplete relIn relOut] : IsComplete relIn relOut re @[simp] theorem perfectCompleteness_eq_prob_one : reduction.perfectCompleteness relIn relOut ↔ - ∀ stmtIn witIn, relIn stmtIn witIn → - [fun ⟨(prvStmtOut, witOut), stmtOut, _⟩ => relOut stmtOut witOut ∧ prvStmtOut = stmtOut + ∀ stmtIn witIn, (stmtIn, witIn) ∈ relIn → + [fun ⟨(prvStmtOut, witOut), stmtOut, _⟩ => (stmtOut, witOut) ∈ relOut ∧ prvStmtOut = stmtOut | reduction.run stmtIn witIn] = 1 := by refine forall_congr' fun stmtIn => forall_congr' fun stmtOut => forall_congr' fun _ => ?_ rw [ENNReal.coe_zero, tsub_zero, ge_iff_le, OracleComp.one_le_probEvent_iff, @@ -122,74 +107,26 @@ end Reduction section Soundness -/- We define 3 variants each of soundness and knowledge soundness, all in the adaptive setting: (our - definitions are automatically in the adaptive setting, since there is no `crs`?) +/-! We define 3 variants each of soundness and knowledge soundness: 1. (Plain) soundness 2. Knowledge soundness - 3. State-restoration soundness - 4. State-restoration knowledge soundness - 5. Round-by-round soundness - 6. Round-by-round knowledge soundness --/ -section Prover + For adaptivity, we may want to seed the definition with a term + `chooseStmtIn : OracleComp oSpec StmtIn` + (though this is essentially the same as quantifying over all `stmtIn : StmtIn`). -/-! Note: all soundness definitions are really defined for the **verifier** only. The (honest) + Note: all soundness definitions are really defined for the **verifier** only. The (honest) prover does not feature into the definitions. +-/ -TODO: first define soundness in the `(Oracle)Verifier` namespace, then soundness for -`(Oracle)Reduction` should just be a wrapper over the verifier's definitions. -/ - -/-- It's not clear whether we need the stronger `AdaptiveProver` type, since the soundness notions - are stated with regards to an arbitrary statement anyway (for plain soundness, the statement is - arbitrary among the ones that are not in the language). -/ -structure AdaptiveProver (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) extends Prover pSpec oSpec StmtIn WitIn StmtOut WitOut - where - chooseStmtIn : OracleComp oSpec StmtIn - --- /-- Version of `challengeOracle` that requires querying with the statement and prior messages. - --- This is a stepping stone toward the Fiat-Shamir transform. -/ -def srChallengeOracle (pSpec : ProtocolSpec n) (Statement : Type) : - OracleSpec (pSpec.ChallengeIdx) := - fun i => (Statement × pSpec.Transcript i.1, pSpec.Challenge i) - -/-- A **state-restoration** prover in a reduction is a modified prover that has query access to - challenge oracles that can return the `i`-th challenge, for all `i : pSpec.ChallengeIdx`, given - the input statement and the transcript up to that point. - - It further takes in the input statement and witness, and outputs a full transcript of interaction, - along with the output statement and witness. -/ -structure SRProver (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) where - srProve : StmtIn → WitIn → - OracleComp (oSpec ++ₒ (srChallengeOracle pSpec StmtIn)) - (pSpec.FullTranscript × StmtOut × WitOut) - --- /-- Running a state-restoration prover -/ --- def SRProver.run --- (prover : SRProver pSpec oSpec StmtIn WitIn StmtOut WitOut) --- (stmtIn : StmtIn) (witIn : WitIn) : --- OracleComp (oSpec ++ₒ challengeOracle' pSpec StmtIn) --- (StmtOut × WitOut × pSpec.FullTranscript × --- QueryLog (oSpec ++ₒ challengeOracle' pSpec StmtIn)) --- := do --- let ⟨state, stmt, transcript⟩ ← prover.stateRestorationQuery stmtIn --- return ⟨transcript, state⟩ - -end Prover - -section Extractor +namespace Extractor /- We define different types of extractors here -/ -variable {n : ℕ} (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) - (StmtIn WitIn WitOut : Type) +variable (oSpec : OracleSpec ι) (StmtIn WitIn WitOut : Type) {n : ℕ} (pSpec : ProtocolSpec n) -/-- - A straightline, deterministic, non-oracle-querying extractor takes in the output witness, the +/-- A straightline, deterministic, non-oracle-querying extractor takes in the output witness, the initial statement, the IOR transcript, and the query logs from the prover and verifier, and returns a corresponding initial witness. @@ -199,59 +136,24 @@ variable {n : ℕ} (pSpec : ProtocolSpec n) {ι : Type} (oSpec : OracleSpec ι) This form of extractor suffices for proving knowledge soundness of most hash-based IOPs. -/ -def StraightlineExtractor := - WitOut → -- output witness +def Straightline := StmtIn → -- input statement + WitOut → -- output witness FullTranscript pSpec → -- reduction transcript QueryLog oSpec → -- prover's query log QueryLog oSpec → -- verifier's query log OracleComp oSpec WitIn -- input witness -/-- A round-by-round extractor with index `m` is given the input statement, a partial transcript - of length `m`, the prover's query log, and returns a witness to the statement. - - Note that the RBR extractor does not need to take in the output statement or witness. -/ -def RBRExtractor := (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → QueryLog oSpec → WitIn - -section Rewinding - -/-! TODO: under development -/ - -/-- The oracle interface to call the prover as a black box -/ -def OracleSpec.proverOracle (pSpec : ProtocolSpec n) (StmtIn : Type) : - OracleSpec pSpec.MessageIdx := fun i => (StmtIn × pSpec.Transcript i, pSpec.Message i) - --- def SimOracle.proverImpl (P : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : --- SimOracle.Stateless (OracleSpec.proverOracle pSpec StmtIn) oSpec := sorry - -structure RewindingExtractor (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) - (StmtIn StmtOut WitIn WitOut : Type) where - /-- The state of the extractor -/ - ExtState : Type - /-- Simulate challenge queries for the prover -/ - simChallenge : SimOracle.Stateful [pSpec.Challenge]ₒ [pSpec.Challenge]ₒ ExtState - /-- Simulate oracle queries for the prover -/ - simOracle : SimOracle.Stateful oSpec oSpec ExtState - /-- Run the extractor with the prover's oracle interface, allowing for calling the prover multiple - times -/ - runExt : StmtOut → WitOut → StmtIn → - StateT ExtState (OracleComp (OracleSpec.proverOracle pSpec StmtIn)) WitIn - --- Challenge: need environment to update & maintain the prover's states after each extractor query - --- def RewindingExtractor.run --- (P : AdaptiveProver pSpec oSpec StmtIn WitIn StmtOut WitOut) --- (E : RewindingExtractor pSpec oSpec StmtIn StmtOut WitIn WitOut) : --- OracleComp oSpec WitIn := sorry - -end Rewinding - end Extractor namespace Verifier -/-- - A reduction satisfies **soundness** with error `soundnessError ≥ 0` and with respect to input +variable {oSpec : OracleSpec ι} + {StmtIn WitIn StmtOut WitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + +/-- A reduction satisfies **soundness** with error `soundnessError ≥ 0` and with respect to input language `langIn : Set StmtIn` and output language `langOut : Set StmtOut` if: - for all (malicious) provers with arbitrary types for `WitIn`, `WitOut`, - for all arbitrary `witIn`, @@ -264,11 +166,11 @@ namespace Verifier `stmtOut ∉ langOut` with probability at least `1 - soundnessError`) -/ def soundness (langIn : Set StmtIn) (langOut : Set StmtOut) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) (soundnessError : ℝ≥0) : Prop := ∀ WitIn WitOut : Type, ∀ witIn : WitIn, - ∀ prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut, + ∀ prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec, ∀ stmtIn ∉ langIn, letI reduction := Reduction.mk prover verifier [fun ⟨_, stmtOut, _⟩ => stmtOut ∈ langOut @@ -276,7 +178,7 @@ def soundness (langIn : Set StmtIn) (langOut : Set StmtOut) /-- Type class for soundness for a verifier -/ class IsSound (langIn : Set StmtIn) (langOut : Set StmtOut) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) where + (verifier : Verifier oSpec StmtIn StmtOut pSpec) where soundnessError : ℝ≥0 is_sound : soundness langIn langOut verifier soundnessError @@ -284,8 +186,7 @@ class IsSound (langIn : Set StmtIn) (langOut : Set StmtOut) -- functions (receive challenges and send messages), and be able to observe & simulate the prover's -- oracle queries -/-- - A reduction satisfies **(straightline) knowledge soundness** with error `knowledgeError ≥ 0` and +/-- A reduction satisfies **(straightline) knowledge soundness** with error `knowledgeError ≥ 0` and with respect to input relation `relIn` and output relation `relOut` if: - there exists a straightline extractor `E`, such that - for all input statement `stmtIn`, witness `witIn`, and (malicious) prover `prover`, @@ -295,260 +196,47 @@ class IsSound (langIn : Set StmtIn) (langOut : Set StmtOut) then the probability that `(stmtIn, witIn')` is not valid and yet `(stmtOut, witOut)` is valid is at most `knowledgeError`. -/ -def knowledgeSoundness (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) (knowledgeError : ℝ≥0) : Prop := - ∃ extractor : StraightlineExtractor pSpec oSpec StmtIn WitIn WitOut, +def knowledgeSoundness (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) (knowledgeError : ℝ≥0) : Prop := + ∃ extractor : Extractor.Straightline oSpec StmtIn WitIn WitOut pSpec, ∀ stmtIn : StmtIn, ∀ witIn : WitIn, - ∀ prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut, + ∀ prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec, letI reduction := Reduction.mk prover verifier [fun ⟨stmtIn, witIn, stmtOut, witOut⟩ => - ¬ relIn stmtIn witIn ∧ relOut stmtOut witOut + (stmtIn, witIn) ∉ relIn ∧ (stmtOut, witOut) ∈ relOut | do let ⟨(_, witOut), stmtOut, transcript, proveQueryLog, verifyQueryLog⟩ ← reduction.runWithLog stmtIn witIn let extractedWitIn ← - liftComp (extractor witOut stmtIn transcript proveQueryLog.fst verifyQueryLog) _ + liftComp (extractor stmtIn witOut transcript proveQueryLog.fst verifyQueryLog) _ return (stmtIn, extractedWitIn, stmtOut, witOut)] ≤ knowledgeError /-- Type class for knowledge soundness for a verifier -/ -class IsKnowledgeSound (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) where +class IsKnowledgeSound (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) where knowledgeError : ℝ≥0 is_knowledge_sound : knowledgeSoundness relIn relOut verifier knowledgeError /-- An extractor is **monotone** if its success probability on a given query log is the same as the success probability on any extension of that query log. -/ -class StraightlineExtractor.IsMonotone [oSpec.FiniteRange] - (E : StraightlineExtractor pSpec oSpec StmtIn WitIn WitOut) - (relIn : StmtIn → WitIn → Prop) where +class Extractor.Straightline.IsMonotone + (relIn : Set (StmtIn × WitIn)) + (E : Extractor.Straightline oSpec StmtIn WitIn WitOut pSpec) + [oSpec.FiniteRange] + where is_monotone : ∀ witOut stmtIn transcript, ∀ proveQueryLog₁ proveQueryLog₂ : oSpec.QueryLog, ∀ verifyQueryLog₁ verifyQueryLog₂ : oSpec.QueryLog, proveQueryLog₁.Sublist proveQueryLog₂ → verifyQueryLog₁.Sublist verifyQueryLog₂ → -- Placeholder probability for now, probably need to consider the whole game - [fun witIn => relIn stmtIn witIn | E witOut stmtIn transcript proveQueryLog₁ verifyQueryLog₁] ≤ - [fun witIn => relIn stmtIn witIn | E witOut stmtIn transcript proveQueryLog₂ verifyQueryLog₂] + [fun witIn => (stmtIn, witIn) ∈ relIn | + E stmtIn witOut transcript proveQueryLog₁ verifyQueryLog₁] ≤ + [fun witIn => (stmtIn, witIn) ∈ relIn | + E stmtIn witOut transcript proveQueryLog₂ verifyQueryLog₂] -- Pr[extraction game succeeds on proveQueryLog₁, verifyQueryLog₁] -- ≤ Pr[extraction game succeeds on proveQueryLog₂, verifyQueryLog₂] -section StateRestoration - --- /-- State-restoration soundness -/ --- def srSoundness (verifier : Verifier pSpec oSpec StmtIn StmtOut) --- (langIn : Set StmtIn) (langOut : Set StmtOut) (SRSoundnessError : ENNReal) : Prop := --- ∀ stmtIn ∉ langIn, --- ∀ witIn : WitIn, --- ∀ SRProver : SRProver pSpec oSpec StmtIn WitIn StmtOut WitOut, --- let ⟨_, witOut, transcript, queryLog⟩ ← (simulateQ ... (SRProver.run stmtIn witIn)).run --- let stmtOut ← verifier.run stmtIn transcript --- return stmtOut ∉ langOut - --- State-restoration knowledge soundness (w/ straightline extractor) - -end StateRestoration - -section RoundByRound - -instance : Fintype (pSpec.ChallengeIdx) := Subtype.fintype (fun i => pSpec.getDir i = .V_to_P) - -/-- A (deterministic) state function for a verifier, with respect to input language `langIn` and - output language `langOut`. This is used to define round-by-round (knowledge) soundness. -/ -structure StateFunction (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) [oSpec.FiniteRange] - (langIn : Set StmtIn) (langOut : Set StmtOut) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - where - toFun : (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → Prop - /-- For all input statement not in the language, the state function is false for the empty - transcript -/ - toFun_empty : ∀ stmt ∉ langIn, toFun 0 stmt default = False - /-- If the state function is false for a partial transcript, and the next message is from the - prover to the verifier, then the state function is also false for the new partial transcript - regardless of the message -/ - toFun_next : ∀ m, pSpec.getDir m = .P_to_V → ∀ stmt tr, toFun m.castSucc stmt tr = False → - ∀ msg, toFun m.succ stmt (tr.snoc msg) = False - /-- If the state function is false for a full transcript, the verifier will not output a statement - in the output language -/ - toFun_full : ∀ stmt tr, toFun (.last n) stmt tr = False → - [(· ∈ langOut) | verifier.run stmt tr] = 0 - -instance {langIn : Set StmtIn} {langOut : Set StmtOut} - {verifier : Verifier pSpec oSpec StmtIn StmtOut} : - CoeFun (verifier.StateFunction pSpec oSpec langIn langOut) - (fun _ => (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → Prop) := ⟨fun f => f.toFun⟩ - -/-- - A protocol with `verifier` satisfies round-by-round soundness with respect to input language - `langIn`, output language `langOut`, and error `rbrSoundnessError` if: - - - there exists a state function `stateFunction` for the verifier and the input/output languages, - such that - - for all initial statement `stmtIn` not in `langIn`, - - for all initial witness `witIn`, - - for all provers `prover`, - - for all `i : Fin n` that is a round corresponding to a challenge, - - the probability that: - - the state function is false for the partial transcript output by the prover - - the state function is true for the partial transcript appended by next challenge (chosen - randomly) - - is at most `rbrSoundnessError i`. --/ -def rbrSoundness (langIn : Set StmtIn) (langOut : Set StmtOut) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - ∃ stateFunction : verifier.StateFunction pSpec oSpec langIn langOut, - ∀ stmtIn ∉ langIn, - ∀ WitIn WitOut : Type, - ∀ witIn : WitIn, - ∀ prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut, - ∀ i : pSpec.ChallengeIdx, - let ex : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) _ := do - return (← prover.runToRound i.1.castSucc stmtIn witIn, ← pSpec.getChallenge i) - [fun ⟨⟨transcript, _⟩, challenge⟩ => - ¬ stateFunction i.1.castSucc stmtIn transcript ∧ - stateFunction i.1.succ stmtIn (transcript.snoc challenge) - | ex] ≤ - rbrSoundnessError i - -/-- Type class for round-by-round soundness for a verifier - -Note that we put the error as a field in the type class to make it easier for synthesization -(often the rbr error will need additional simplification / proof) -/ -class IsRBRSound (langIn : Set StmtIn) (langOut : Set StmtOut) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) where - rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0 - is_rbr_sound : rbrSoundness langIn langOut verifier rbrSoundnessError - -/-- - A protocol with `verifier` satisfies round-by-round knowledge soundness with respect to input - relation `relIn`, output relation `relOut`, and error `rbrKnowledgeError` if: - - - there exists a state function `stateFunction` for the verifier and the languages of the - input/output relations, such that - - for all initial statement `stmtIn` not in the language of `relIn`, - - for all initial witness `witIn`, - - for all provers `prover`, - - for all `i : Fin n` that is a round corresponding to a challenge, - - the probability that: - - the state function is false for the partial transcript output by the prover - - the state function is true for the partial transcript appended by next challenge (chosen - randomly) - - is at most `rbrKnowledgeError i`. --/ -def rbrKnowledgeSoundness (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - ∃ stateFunction : verifier.StateFunction pSpec oSpec relIn.language relOut.language, - ∃ extractor : RBRExtractor pSpec oSpec StmtIn WitIn, - ∀ stmtIn : StmtIn, - ∀ witIn : WitIn, - ∀ prover : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut, - ∀ i : pSpec.ChallengeIdx, - let ex : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) _ := (do - let result ← prover.runWithLogToRound i.1.castSucc stmtIn witIn - let chal ← pSpec.getChallenge i - return (result, chal)) - [fun ⟨⟨transcript, _, proveQueryLog⟩, challenge⟩ => - letI extractedWitIn := extractor i.1.castSucc stmtIn transcript proveQueryLog.fst - ¬ relIn stmtIn extractedWitIn ∧ - ¬ stateFunction i.1.castSucc stmtIn transcript ∧ - stateFunction i.1.succ stmtIn (transcript.snoc challenge) - | ex] ≤ rbrKnowledgeError i - -/-- Type class for round-by-round knowledge soundness for a verifier - -Note that we put the error as a field in the type class to make it easier for synthesization -(often the rbr error will need additional simplification / proof) --/ -class IsRBRKnowledgeSound (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) where - rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0 - is_rbr_knowledge_sound : rbrKnowledgeSoundness relIn relOut verifier rbrKnowledgeError - -/-- A round-by-round extractor is **monotone** if its success probability on a given query log - is the same as the success probability on any extension of that query log. -/ -class RBRExtractor.IsMonotone (E : RBRExtractor pSpec oSpec StmtIn WitIn) - (relIn : StmtIn → WitIn → Prop) where - is_monotone : ∀ roundIdx stmtIn transcript, - ∀ proveQueryLog₁ proveQueryLog₂ : oSpec.QueryLog, - -- ∀ verifyQueryLog₁ verifyQueryLog₂ : oSpec.QueryLog, - proveQueryLog₁.Sublist proveQueryLog₂ → - -- verifyQueryLog₁.Sublist verifyQueryLog₂ → - -- Placeholder condition for now, will need to consider the whole game w/ probabilities - relIn stmtIn (E roundIdx stmtIn transcript proveQueryLog₁) → - relIn stmtIn (E roundIdx stmtIn transcript proveQueryLog₂) - -end RoundByRound - -section Implications - -/- TODO: add the following results -- `knowledgeSoundness` implies `soundness` -- `roundByRoundSoundness` implies `soundness` -- `roundByRoundKnowledgeSoundness` implies `roundByRoundSoundness` -- `roundByRoundKnowledgeSoundness` implies `knowledgeSoundness` - -In other words, we have a lattice of security notions, with `knowledge` and `roundByRound` being -two strengthenings of soundness. --/ - -/-- Knowledge soundness with knowledge error `knowledgeError < 1` implies soundness with the same -soundness error `knowledgeError`, and for the corresponding input and output languages. -/ -theorem knowledgeSoundness_implies_soundness (relIn : StmtIn → WitIn → Prop) - (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - (knowledgeError : ℝ≥0) (hLt : knowledgeError < 1) : - knowledgeSoundness relIn relOut verifier knowledgeError → - soundness relIn.language relOut.language verifier knowledgeError := by - simp [knowledgeSoundness, soundness, Function.language, Function.uncurry] - intro extractor hKS WitIn' WitOut' witIn' prover stmtIn hStmtIn - sorry - -- have hKS' := hKS stmtIn witIn' prover - -- clear hKS - -- contrapose! hKS' - -- constructor - -- · convert hKS'; rename_i result - -- obtain ⟨transcript, queryLog, stmtOut, witOut⟩ := result - -- simp - -- sorry - -- · simp only [Function.language, Set.mem_setOf_eq, not_exists] at hStmtIn - -- simp only [Functor.map, Seq.seq, PMF.bind_bind, Function.comp_apply, PMF.pure_bind, hStmtIn, - -- PMF.bind_const, PMF.pure_apply, eq_iff_iff, iff_false, not_true_eq_false, ↓reduceIte, - -- zero_add, ENNReal.coe_lt_one_iff, hLt] - -/-- Round-by-round soundness with error `rbrSoundnessError` implies soundness with error -`∑ i, rbrSoundnessError i`, where the sum is over all rounds `i`. -/ -theorem rbrSoundness_implies_soundness (langIn : Set StmtIn) (langOut : Set StmtOut) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : - rbrSoundness langIn langOut verifier rbrSoundnessError → - soundness langIn langOut verifier (∑ i, rbrSoundnessError i) := by sorry - -/-- Round-by-round knowledge soundness with error `rbrKnowledgeError` implies round-by-round -soundness with the same error `rbrKnowledgeError`. -/ -theorem rbrKnowledgeSoundness_implies_rbrSoundness (relIn : StmtIn → WitIn → Prop) - (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : - rbrKnowledgeSoundness relIn relOut verifier rbrKnowledgeError → - rbrSoundness relIn.language relOut.language verifier rbrKnowledgeError := by - sorry - -/-- Round-by-round knowledge soundness with error `rbrKnowledgeError` implies knowledge soundness -with error `∑ i, rbrKnowledgeError i`, where the sum is over all rounds `i`. -/ -theorem rbrKnowledgeSoundness_implies_knowledgeSoundness - (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (verifier : Verifier pSpec oSpec StmtIn StmtOut) - (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : - rbrKnowledgeSoundness relIn relOut verifier rbrKnowledgeError → - knowledgeSoundness relIn relOut verifier (∑ i, rbrKnowledgeError i) := by sorry - -end Implications - end Verifier end Soundness @@ -557,10 +245,14 @@ namespace Reduction section ZeroKnowledge --- The simulator should have programming access to the shared oracles `oSpec` -structure Simulator (SimState : Type) where +/-- A simulator for a reduction needs to produce the same transcript as the prover (but potentially + all at once, instead of sequentially). We also grant the simulator the power to program the shared + oracles `oSpec` -/ +structure Simulator {ι : Type} (oSpec : OracleSpec ι) (StmtIn : Type) + {n : ℕ} (pSpec : ProtocolSpec n) where + SimState : Type oracleSim : SimOracle.Stateful oSpec oSpec SimState - proverSim : StmtIn → SimState → PMF (pSpec.FullTranscript × SimState) + proverSim : StmtIn → StateT SimState (OracleComp oSpec) pSpec.FullTranscript /- We define honest-verifier zero-knowledge as follows: @@ -584,33 +276,35 @@ end ZeroKnowledge end Reduction - /-! Completeness and soundness are the same as for non-oracle reductions. Only zero-knowledge is different (but we haven't defined it yet) -/ open Reduction -variable {n : ℕ} {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - [∀ i, OracleInterface (pSpec.Message i)] [∀ i, VCVCompatible (pSpec.Challenge i)] - {StmtIn WitIn StmtOut WitOut : Type} - {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} [∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} [oSpec.FiniteRange] +section OracleProtocol + +variable + {StmtIn : Type} {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} {WitIn : Type} + {StmtOut : Type} {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} {WitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] + [∀ i, OracleInterface (pSpec.Message i)] [∀ i, VCVCompatible (pSpec.Challenge i)] namespace OracleReduction /-- Completeness of an oracle reduction is the same as for non-oracle reductions. -/ def completeness - (relIn : (StmtIn × ∀ i, OStmtIn i) → WitIn → Prop) - (relOut : (StmtOut × ∀ i, OStmtOut i) → WitOut → Prop) - (oracleReduction : OracleReduction pSpec oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut) + (relIn : Set ((StmtIn × ∀ i, OStmtIn i) × WitIn)) + (relOut : Set ((StmtOut × ∀ i, OStmtOut i) × WitOut)) + (oracleReduction : OracleReduction oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut pSpec) (completenessError : ℝ≥0) : Prop := Reduction.completeness relIn relOut oracleReduction.toReduction completenessError /-- Perfect completeness of an oracle reduction is the same as for non-oracle reductions. -/ def perfectCompleteness - (relIn : (StmtIn × ∀ i, OStmtIn i) → WitIn → Prop) - (relOut : (StmtOut × ∀ i, OStmtOut i) → WitOut → Prop) - (oracleReduction : OracleReduction pSpec oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut) : + (relIn : Set ((StmtIn × ∀ i, OStmtIn i) × WitIn)) + (relOut : Set ((StmtOut × ∀ i, OStmtOut i) × WitOut)) + (oracleReduction : OracleReduction oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut pSpec) : Prop := Reduction.perfectCompleteness relIn relOut oracleReduction.toReduction @@ -622,44 +316,27 @@ namespace OracleVerifier def soundness (langIn : Set (StmtIn × ∀ i, OStmtIn i)) (langOut : Set (StmtOut × ∀ i, OStmtOut i)) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) (soundnessError : ℝ≥0) : Prop := verifier.toVerifier.soundness langIn langOut soundnessError /-- Knowledge soundness of an oracle reduction is the same as for non-oracle reductions. -/ def knowledgeSoundness - (relIn : (StmtIn × ∀ i, OStmtIn i) → WitIn → Prop) - (relOut : (StmtOut × ∀ i, OStmtOut i) → WitOut → Prop) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) + (relIn : Set ((StmtIn × ∀ i, OStmtIn i) × WitIn)) + (relOut : Set ((StmtOut × ∀ i, OStmtOut i) × WitOut)) + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) (knowledgeError : ℝ≥0) : Prop := verifier.toVerifier.knowledgeSoundness relIn relOut knowledgeError -@[reducible, simp] -def StateFunction (pSpec : ProtocolSpec n) (oSpec : OracleSpec ι) - [∀ i, OracleInterface (pSpec.Message i)] [oSpec.FiniteRange] - (langIn : Set (StmtIn × ∀ i, OStmtIn i)) - (langOut : Set (StmtOut × ∀ i, OStmtOut i)) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) := - verifier.toVerifier.StateFunction pSpec oSpec langIn langOut +end OracleVerifier -/-- Round-by-round soundness of an oracle reduction is the same as for non-oracle reductions. -/ -def rbrSoundness - (langIn : Set (StmtIn × ∀ i, OStmtIn i)) - (langOut : Set (StmtOut × ∀ i, OStmtOut i)) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) - (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - verifier.toVerifier.rbrSoundness langIn langOut rbrSoundnessError - -/-- Round-by-round knowledge soundness of an oracle reduction is the same as for non-oracle -reductions. -/ -def rbrKnowledgeSoundness - (relIn : (StmtIn × ∀ i, OStmtIn i) → WitIn → Prop) - (relOut : (StmtOut × ∀ i, OStmtOut i) → WitOut → Prop) - (verifier : OracleVerifier pSpec oSpec StmtIn StmtOut OStmtIn OStmtOut) - (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - verifier.toVerifier.rbrKnowledgeSoundness relIn relOut rbrKnowledgeError +end OracleProtocol -end OracleVerifier +variable {Statement : Type} {ιₛ : Type} {OStatement : ιₛ → Type} {Witness : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [∀ i, OracleInterface (OStatement i)] + [∀ i, OracleInterface (pSpec.Message i)] + [∀ i, VCVCompatible (pSpec.Challenge i)] namespace Proof @@ -668,65 +345,47 @@ namespace Proof open Reduction -variable {Statement Witness : Type} - @[reducible, simp] -def completeness (relation : Statement → Witness → Prop) (completenessError : ℝ≥0) - (proof : Proof pSpec oSpec Statement Witness) : Prop := +def completeness (relation : Set (Statement × Witness)) (completenessError : ℝ≥0) + (proof : Proof oSpec Statement Witness pSpec) : Prop := Reduction.completeness relation acceptRejectRel proof completenessError @[reducible, simp] -def perfectCompleteness (relation : Statement → Witness → Prop) - (proof : Proof pSpec oSpec Statement Witness) : Prop := +def perfectCompleteness (relation : Set (Statement × Witness)) + (proof : Proof oSpec Statement Witness pSpec) : Prop := Reduction.perfectCompleteness relation acceptRejectRel proof @[reducible, simp] def soundness (langIn : Set Statement) - (verifier : Verifier pSpec oSpec Statement Bool) + (verifier : Verifier oSpec Statement Bool pSpec) (soundnessError : ℝ≥0) : Prop := verifier.soundness langIn acceptRejectRel.language soundnessError @[reducible, simp] -def knowledgeSoundness (relation : Statement → Bool → Prop) - (verifier : Verifier pSpec oSpec Statement Bool) +def knowledgeSoundness (relation : Set (Statement × Bool)) + (verifier : Verifier oSpec Statement Bool pSpec) (knowledgeError : ℝ≥0) : Prop := verifier.knowledgeSoundness relation acceptRejectRel knowledgeError -@[reducible, simp] -def rbrSoundness (langIn : Set Statement) - (verifier : Verifier pSpec oSpec Statement Bool) - (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - verifier.rbrSoundness langIn acceptRejectRel.language rbrSoundnessError - -@[reducible, simp] -def rbrKnowledgeSoundness (relation : Statement → Bool → Prop) - (verifier : Verifier pSpec oSpec Statement Bool) - (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - verifier.rbrKnowledgeSoundness relation acceptRejectRel rbrKnowledgeError - end Proof namespace OracleProof open OracleReduction -variable {Statement Witness : Type} {ιₛ : Type} {OStatement : ιₛ → Type} - [∀ i, OracleInterface (pSpec.Message i)] - [∀ i, OracleInterface (OStatement i)] - /-- Completeness of an oracle reduction is the same as for non-oracle reductions. -/ @[reducible, simp] def completeness - (relation : (Statement × ∀ i, OStatement i) → Witness → Prop) - (oracleProof : OracleProof pSpec oSpec Statement Witness OStatement) + (relation : Set ((Statement × ∀ i, OStatement i) × Witness)) + (oracleProof : OracleProof oSpec Statement OStatement Witness pSpec) (completenessError : ℝ≥0) : Prop := OracleReduction.completeness relation acceptRejectOracleRel oracleProof completenessError /-- Perfect completeness of an oracle reduction is the same as for non-oracle reductions. -/ @[reducible, simp] def perfectCompleteness - (relation : (Statement × ∀ i, OStatement i) → Witness → Prop) - (oracleProof : OracleProof pSpec oSpec Statement Witness OStatement) : + (relation : Set ((Statement × ∀ i, OStatement i) × Witness)) + (oracleProof : OracleProof oSpec Statement OStatement Witness pSpec) : Prop := OracleReduction.perfectCompleteness relation acceptRejectOracleRel oracleProof @@ -734,33 +393,17 @@ def perfectCompleteness @[reducible, simp] def soundness (langIn : Set (Statement × ∀ i, OStatement i)) - (verifier : OracleVerifier pSpec oSpec Statement Bool OStatement (fun _ : Empty => Unit)) + (verifier : OracleVerifier oSpec Statement OStatement Bool (fun _ : Empty => Unit) pSpec) (soundnessError : ℝ≥0) : Prop := - verifier.soundness langIn acceptRejectOracleRel.language soundnessError + verifier.toVerifier.soundness langIn acceptRejectOracleRel.language soundnessError /-- Knowledge soundness of an oracle reduction is the same as for non-oracle reductions. -/ @[reducible, simp] def knowledgeSoundness - (relation : (Statement × ∀ i, OStatement i) → Witness → Prop) - (verifier : OracleVerifier pSpec oSpec Statement Bool OStatement (fun _ : Empty => Unit)) + (relation : Set ((Statement × ∀ i, OStatement i) × Witness)) + (verifier : OracleVerifier oSpec Statement OStatement Bool (fun _ : Empty => Unit) pSpec) (knowledgeError : ℝ≥0) : Prop := - verifier.knowledgeSoundness relation acceptRejectOracleRel knowledgeError - -/-- Round-by-round soundness of an oracle reduction is the same as for non-oracle reductions. -/ -@[reducible, simp] -def rbrSoundness - (langIn : Set (Statement × ∀ i, OStatement i)) - (verifier : OracleVerifier pSpec oSpec Statement Bool OStatement (fun _ : Empty => Unit)) - (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - verifier.rbrSoundness langIn acceptRejectOracleRel.language rbrSoundnessError - -/-- Round-by-round knowledge soundness of an oracle reduction is the same as for non-oracle -reductions. -/ -def rbrKnowledgeSoundness - (relIn : (Statement × ∀ i, OStatement i) → Witness → Prop) - (verifier : OracleVerifier pSpec oSpec Statement Bool OStatement (fun _ : Empty => Unit)) - (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - verifier.rbrKnowledgeSoundness relIn acceptRejectOracleRel rbrKnowledgeError + verifier.toVerifier.knowledgeSoundness relation acceptRejectOracleRel knowledgeError end OracleProof diff --git a/ArkLib/OracleReduction/Security/Implications.lean b/ArkLib/OracleReduction/Security/Implications.lean new file mode 100644 index 000000000..d80249047 --- /dev/null +++ b/ArkLib/OracleReduction/Security/Implications.lean @@ -0,0 +1,86 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +-- import ArkLib.OracleReduction.Security.Basic +import ArkLib.OracleReduction.Security.RoundByRound + +noncomputable section + +open OracleComp OracleSpec ProtocolSpec +open scoped NNReal + +variable {ι : Type} {oSpec : OracleSpec ι} [oSpec.FiniteRange] +variable {StmtIn WitIn StmtOut WitOut : Type} {n : ℕ} {pSpec : ProtocolSpec n} +variable [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + +namespace Verifier + +section Implications + +/- TODO: add the following results +- `knowledgeSoundness` implies `soundness` +- `roundByRoundSoundness` implies `soundness` +- `roundByRoundKnowledgeSoundness` implies `roundByRoundSoundness` +- `roundByRoundKnowledgeSoundness` implies `knowledgeSoundness` + +In other words, we have a lattice of security notions, with `knowledge` and `roundByRound` being +two strengthenings of soundness. +-/ + +/-- Knowledge soundness with knowledge error `knowledgeError < 1` implies soundness with the same +soundness error `knowledgeError`, and for the corresponding input and output languages. -/ +theorem knowledgeSoundness_implies_soundness (relIn : Set (StmtIn × WitIn)) + (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (knowledgeError : ℝ≥0) (hLt : knowledgeError < 1) : + knowledgeSoundness relIn relOut verifier knowledgeError → + soundness relIn.language relOut.language verifier knowledgeError := by + simp [knowledgeSoundness, soundness, Set.language, Function.uncurry] + intro extractor hKS WitIn' WitOut' witIn' prover stmtIn hStmtIn + sorry + -- have hKS' := hKS stmtIn witIn' prover + -- clear hKS + -- contrapose! hKS' + -- constructor + -- · convert hKS'; rename_i result + -- obtain ⟨transcript, queryLog, stmtOut, witOut⟩ := result + -- simp + -- sorry + -- · simp only [Set.language, Set.mem_setOf_eq, not_exists] at hStmtIn + -- simp only [Functor.map, Seq.seq, PMF.bind_bind, Function.comp_apply, PMF.pure_bind, hStmtIn, + -- PMF.bind_const, PMF.pure_apply, eq_iff_iff, iff_false, not_true_eq_false, ↓reduceIte, + -- zero_add, ENNReal.coe_lt_one_iff, hLt] + +/-- Round-by-round soundness with error `rbrSoundnessError` implies soundness with error +`∑ i, rbrSoundnessError i`, where the sum is over all rounds `i`. -/ +theorem rbrSoundness_implies_soundness (langIn : Set StmtIn) (langOut : Set StmtOut) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : + rbrSoundness langIn langOut verifier rbrSoundnessError → + soundness langIn langOut verifier (∑ i, rbrSoundnessError i) := by sorry + +/-- Round-by-round knowledge soundness with error `rbrKnowledgeError` implies round-by-round +soundness with the same error `rbrKnowledgeError`. -/ +theorem rbrKnowledgeSoundness_implies_rbrSoundness + (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : + rbrKnowledgeSoundness relIn relOut verifier rbrKnowledgeError → + rbrSoundness relIn.language relOut.language verifier rbrKnowledgeError := by + sorry + +/-- Round-by-round knowledge soundness with error `rbrKnowledgeError` implies knowledge soundness +with error `∑ i, rbrKnowledgeError i`, where the sum is over all rounds `i`. -/ +theorem rbrKnowledgeSoundness_implies_knowledgeSoundness + (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : + rbrKnowledgeSoundness relIn relOut verifier rbrKnowledgeError → + knowledgeSoundness relIn relOut verifier (∑ i, rbrKnowledgeError i) := by sorry + +end Implications + +end Verifier diff --git a/ArkLib/OracleReduction/Security/Rewinding.lean b/ArkLib/OracleReduction/Security/Rewinding.lean new file mode 100644 index 000000000..514e43f31 --- /dev/null +++ b/ArkLib/OracleReduction/Security/Rewinding.lean @@ -0,0 +1,59 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # Rewinding Knowledge Soundness + + This file defines rewinding knowledge soundness for (oracle) reductions. +-/ + +noncomputable section + +open OracleComp OracleSpec ProtocolSpec +open scoped NNReal + +variable {ι : Type} {oSpec : OracleSpec ι} [oSpec.FiniteRange] + +namespace Extractor + +section Rewinding + +/-! TODO: under development -/ + +/-- The oracle interface to call the prover as a black box -/ +def OracleSpec.proverOracle (StmtIn : Type) {n : ℕ} (pSpec : ProtocolSpec n) : + OracleSpec pSpec.MessageIdx := + fun i => (StmtIn × pSpec.Transcript i.val.castSucc, pSpec.Message i) + +-- def SimOracle.proverImpl (P : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : +-- SimOracle.Stateless (OracleSpec.proverOracle pSpec StmtIn) oSpec := sorry + +structure Rewinding (oSpec : OracleSpec ι) + (StmtIn StmtOut WitIn WitOut : Type) {n : ℕ} (pSpec : ProtocolSpec n) where + /-- The state of the extractor -/ + ExtState : Type + /-- Simulate challenge queries for the prover -/ + simChallenge : SimOracle.Stateful [pSpec.Challenge]ₒ [pSpec.Challenge]ₒ ExtState + /-- Simulate oracle queries for the prover -/ + simOracle : SimOracle.Stateful oSpec oSpec ExtState + /-- Run the extractor with the prover's oracle interface, allowing for calling the prover multiple + times -/ + runExt : StmtOut → WitOut → StmtIn → + StateT ExtState (OracleComp (OracleSpec.proverOracle StmtIn pSpec)) WitIn + +-- Challenge: need environment to update & maintain the prover's states after each extractor query +-- This will hopefully go away after the refactor of prover's type to be an iterated monad + +-- def Rewinding.run +-- (P : Prover.Adaptive pSpec oSpec StmtIn WitIn StmtOut WitOut) +-- (E : Extractor.Rewinding pSpec oSpec StmtIn StmtOut WitIn WitOut) : +-- OracleComp oSpec WitIn := sorry + +end Rewinding + +end Extractor diff --git a/ArkLib/OracleReduction/Security/RoundByRound.lean b/ArkLib/OracleReduction/Security/RoundByRound.lean new file mode 100644 index 000000000..74e70bc7d --- /dev/null +++ b/ArkLib/OracleReduction/Security/RoundByRound.lean @@ -0,0 +1,328 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # Round-by-Round Security Definitions + + This file defines round-by-round security notions for (oracle) reductions. +-/ + +noncomputable section + +open OracleComp OracleSpec ProtocolSpec +open scoped NNReal + +variable {ι : Type} {oSpec : OracleSpec ι} [oSpec.FiniteRange] +variable {StmtIn WitIn StmtOut WitOut : Type} {n : ℕ} {pSpec : ProtocolSpec n} +variable [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + +namespace Extractor + +/-- A round-by-round extractor with index `m` is given the input statement, a partial transcript + of length `m`, the prover's query log, and returns a witness to the statement. + + Note that the RBR extractor does not need to take in the output statement or witness. -/ +def RoundByRound (oSpec : OracleSpec ι) (StmtIn WitIn : Type) {n : ℕ} (pSpec : ProtocolSpec n) := + (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → QueryLog oSpec → WitIn + +/-- A round-by-round extractor is **monotone** if its success probability on a given query log + is the same as the success probability on any extension of that query log. -/ +class RoundByRound.IsMonotone (E : RoundByRound oSpec StmtIn WitIn pSpec) + (relIn : Set (StmtIn × WitIn)) where + is_monotone : ∀ roundIdx stmtIn transcript, + ∀ proveQueryLog₁ proveQueryLog₂ : oSpec.QueryLog, + -- ∀ verifyQueryLog₁ verifyQueryLog₂ : oSpec.QueryLog, + proveQueryLog₁.Sublist proveQueryLog₂ → + -- verifyQueryLog₁.Sublist verifyQueryLog₂ → + -- Placeholder condition for now, will need to consider the whole game w/ probabilities + (stmtIn, E roundIdx stmtIn transcript proveQueryLog₁) ∈ relIn → + (stmtIn, E roundIdx stmtIn transcript proveQueryLog₂) ∈ relIn + +end Extractor + +namespace Verifier + +section RoundByRound + +instance : Fintype (pSpec.ChallengeIdx) := Subtype.fintype (fun i => pSpec.getDir i = .V_to_P) + +/-- A (deterministic) state function for a verifier, with respect to input language `langIn` and + output language `langOut`. This is used to define round-by-round soundness. -/ +structure StateFunction + (langIn : Set StmtIn) (langOut : Set StmtOut) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + where + toFun : (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → Prop + /-- For all input statement not in the language, the state function is false for the empty + transcript -/ + toFun_empty : ∀ stmt ∉ langIn, ¬ toFun 0 stmt default + /-- If the state function is false for a partial transcript, and the next message is from the + prover to the verifier, then the state function is also false for the new partial transcript + regardless of the message -/ + toFun_next : ∀ m, pSpec.getDir m = .P_to_V → + ∀ stmt tr, ¬ toFun m.castSucc stmt tr → + ∀ msg, ¬ toFun m.succ stmt (tr.concat msg) + /-- If the state function is false for a full transcript, the verifier will not output a statement + in the output language -/ + toFun_full : ∀ stmt tr, ¬ toFun (.last n) stmt tr → + [(· ∈ langOut) | verifier.run stmt tr] = 0 + +/-- A knowledge state function for a verifier, with respect to input relation `relIn`, output + relation `relOut`, and intermediate witness types `WitMid`. This is used to define + round-by-round knowledge soundness. -/ +structure KnowledgeStateFunction + (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (WitMid : Fin (n + 1) → Type) + where + /-- The knowledge state function: takes in round index, input statement, transcript up to that + round, and intermediate witness of that round, and returns True/False. -/ + toFun : (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → WitMid m → Prop + /-- For all input statement such that for all input witness, the statement and witness is not + in the input relation, the state function is false for the empty transcript and any witness. -/ + toFun_empty : ∀ stmtIn, stmtIn ∉ relIn.language → + ∀ witMid, ¬ toFun 0 stmtIn default witMid + /-- If the state function is false for a partial transcript, and the next message is from the + prover to the verifier, then the state function is also false for the new partial transcript + regardless of the message and the next intermediate witness. -/ + toFun_next : ∀ m, pSpec.getDir m = .P_to_V → + ∀ stmtIn tr, (∀ witMid, ¬ toFun m.castSucc stmtIn tr witMid) → + ∀ msg, (∀ witMid', ¬ toFun m.succ stmtIn (tr.concat msg) witMid') + toFun_full : ∀ stmtIn tr, (∀ witMid, ¬ toFun (.last n) stmtIn tr witMid) → + [fun stmtOut => stmtOut ∈ relOut.language | verifier.run stmtIn tr ] = 0 + +/-- A knowledge state function gives rise to a state function -/ +def KnowledgeStateFunction.toStateFunction + {relIn : Set (StmtIn × WitIn)} {relOut : Set (StmtOut × WitOut)} + {verifier : Verifier oSpec StmtIn StmtOut pSpec} {WitMid : Fin (n + 1) → Type} + (kSF : KnowledgeStateFunction relIn relOut verifier WitMid) : + verifier.StateFunction relIn.language relOut.language where + toFun := fun m stmtIn tr => ∃ witMid, kSF.toFun m stmtIn tr witMid + toFun_empty := fun stmtIn hStmtIn => by + simp; exact kSF.toFun_empty stmtIn (by simpa [Set.language] using hStmtIn) + toFun_next := fun m hDir stmtIn tr hToFunNext msg => by + simp; exact kSF.toFun_next m hDir stmtIn tr (by simpa [Set.language] using hToFunNext) msg + toFun_full := fun stmtIn tr hToFunFull => by + exact kSF.toFun_full stmtIn tr (by simpa [Set.language] using hToFunFull) + +/-- A round-by-round extractor basically goes backwards, extracting witnesses round-by-round in +opposite to the prover. -/ +structure NewExtractor.RoundByRound (WitMid : Fin (n + 1) → Type) where + -- what if, just one function? + -- extract : (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → WitMid m → QueryLog oSpec → WitIn + extractIn : WitMid 0 → WitIn + extractMid : (m : Fin n) → StmtIn → Transcript m.succ pSpec → + WitMid m.succ → QueryLog oSpec → WitMid m.castSucc + extractOut : WitOut → WitMid (.last n) + +instance {langIn : Set StmtIn} {langOut : Set StmtOut} + {verifier : Verifier oSpec StmtIn StmtOut pSpec} : + CoeFun (verifier.StateFunction langIn langOut) + (fun _ => (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → Prop) := ⟨fun f => f.toFun⟩ + +instance {relIn : Set (StmtIn × WitIn)} {relOut : Set (StmtOut × WitOut)} + {verifier : Verifier oSpec StmtIn StmtOut pSpec} {WitMid : Fin (n + 1) → Type} : + CoeFun (verifier.KnowledgeStateFunction relIn relOut WitMid) + (fun _ => (m : Fin (n + 1)) → StmtIn → Transcript m pSpec → WitMid m → Prop) := + ⟨fun f => f.toFun⟩ + +/-- A protocol with `verifier` satisfies round-by-round soundness with respect to input language + `langIn`, output language `langOut`, and error `rbrSoundnessError` if: + + - there exists a state function `stateFunction` for the verifier and the input/output languages, + such that + - for all initial statement `stmtIn` not in `langIn`, + - for all initial witness `witIn`, + - for all provers `prover`, + - for all `i : Fin n` that is a round corresponding to a challenge, + + the probability that: + - the state function is false for the partial transcript output by the prover + - the state function is true for the partial transcript appended by next challenge (chosen + randomly) + + is at most `rbrSoundnessError i`. +-/ +def rbrSoundness (langIn : Set StmtIn) (langOut : Set StmtOut) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + ∃ stateFunction : verifier.StateFunction langIn langOut, + ∀ stmtIn ∉ langIn, + ∀ WitIn WitOut : Type, + ∀ witIn : WitIn, + ∀ prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec, + ∀ i : pSpec.ChallengeIdx, + let ex : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) _ := do + return (← prover.runToRound i.1.castSucc stmtIn witIn, ← pSpec.getChallenge i) + [fun ⟨⟨transcript, _⟩, challenge⟩ => + ¬ stateFunction i.1.castSucc stmtIn transcript ∧ + stateFunction i.1.succ stmtIn (transcript.concat challenge) + | ex] ≤ + rbrSoundnessError i + +/-- Type class for round-by-round soundness for a verifier + +Note that we put the error as a field in the type class to make it easier for synthesization +(often the rbr error will need additional simplification / proof) -/ +class IsRBRSound (langIn : Set StmtIn) (langOut : Set StmtOut) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) where + rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0 + is_rbr_sound : rbrSoundness langIn langOut verifier rbrSoundnessError + +/-- A protocol with `verifier` satisfies round-by-round knowledge soundness with respect to input + relation `relIn`, output relation `relOut`, and error `rbrKnowledgeError` if: + + - there exists a state function `stateFunction` for the verifier and the languages of the + input/output relations, such that + - for all initial statement `stmtIn` not in the language of `relIn`, + - for all initial witness `witIn`, + - for all provers `prover`, + - for all `i : Fin n` that is a round corresponding to a challenge, + + the probability that: + - the state function is false for the partial transcript output by the prover + - the state function is true for the partial transcript appended by next challenge (chosen + randomly) + + is at most `rbrKnowledgeError i`. +-/ +def rbrKnowledgeSoundness (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + ∃ stateFunction : verifier.StateFunction relIn.language relOut.language, + ∃ extractor : Extractor.RoundByRound oSpec StmtIn WitIn pSpec, + ∀ stmtIn : StmtIn, + ∀ witIn : WitIn, + ∀ prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec, + ∀ i : pSpec.ChallengeIdx, + let ex : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) _ := (do + let result ← prover.runWithLogToRound i.1.castSucc stmtIn witIn + let chal ← pSpec.getChallenge i + return (result, chal)) + [fun ⟨⟨transcript, _, proveQueryLog⟩, challenge⟩ => + letI extractedWitIn := extractor i.1.castSucc stmtIn transcript proveQueryLog.fst + (stmtIn, extractedWitIn) ∉ relIn ∧ + ¬ stateFunction i.1.castSucc stmtIn transcript ∧ + stateFunction i.1.succ stmtIn (transcript.concat challenge) + | ex] ≤ rbrKnowledgeError i + +-- Tentative new definition of rbr knowledge soundness, using the knowledge state function +def newRbrKnowledgeSoundness (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + ∃ WitMid : Fin (n + 1) → Type, + ∃ kSF : verifier.KnowledgeStateFunction relIn relOut WitMid, + ∃ extractor : NewExtractor.RoundByRound WitMid (WitIn := WitIn) (WitOut := WitOut), + ∀ stmtIn : StmtIn, + ∀ witIn : WitIn, + ∀ prover : Prover oSpec StmtIn WitIn StmtOut WitOut pSpec, + ∀ i : pSpec.ChallengeIdx, + let ex : OracleComp (oSpec ++ₒ [pSpec.Challenge]ₒ) _ := (do + let result ← prover.runWithLogToRound i.1.castSucc stmtIn witIn + let chal ← pSpec.getChallenge i + return (result, chal)) + [fun ⟨⟨transcript, _, proveQueryLog⟩, challenge⟩ => + ∃ witMid, + ¬ kSF i.1.castSucc stmtIn transcript + (extractor.extractMid i.1 stmtIn (transcript.concat challenge) witMid proveQueryLog) ∧ + kSF i.1.succ stmtIn (transcript.concat challenge) witMid + | ex] ≤ rbrKnowledgeError i + +/-- Type class for round-by-round knowledge soundness for a verifier + +Note that we put the error as a field in the type class to make it easier for synthesization +(often the rbr error will need additional simplification / proof) +-/ +class IsRBRKnowledgeSound (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + (verifier : Verifier oSpec StmtIn StmtOut pSpec) where + rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0 + is_rbr_knowledge_sound : rbrKnowledgeSoundness relIn relOut verifier rbrKnowledgeError + +end RoundByRound + +end Verifier + +open Verifier + +section OracleProtocol + +variable + {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} + {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} + [Oₛᵢ : ∀ i, OracleInterface (OStmtIn i)] + [∀ i, OracleInterface (pSpec.Message i)] + +namespace OracleVerifier + +@[reducible, simp] +def StateFunction + (langIn : Set (StmtIn × ∀ i, OStmtIn i)) + (langOut : Set (StmtOut × ∀ i, OStmtOut i)) + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) := + verifier.toVerifier.StateFunction langIn langOut + +/-- Round-by-round soundness of an oracle reduction is the same as for non-oracle reductions. -/ +def rbrSoundness + (langIn : Set (StmtIn × ∀ i, OStmtIn i)) + (langOut : Set (StmtOut × ∀ i, OStmtOut i)) + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) + (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + verifier.toVerifier.rbrSoundness langIn langOut rbrSoundnessError + +/-- Round-by-round knowledge soundness of an oracle reduction is the same as for non-oracle +reductions. -/ +def rbrKnowledgeSoundness + (relIn : Set ((StmtIn × ∀ i, OStmtIn i) × WitIn)) + (relOut : Set ((StmtOut × ∀ i, OStmtOut i) × WitOut)) + (verifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + verifier.toVerifier.rbrKnowledgeSoundness relIn relOut rbrKnowledgeError + +end OracleVerifier + +end OracleProtocol + +variable {Statement : Type} {ιₛ : Type} {OStatement : ιₛ → Type} {Witness : Type} + [∀ i, OracleInterface (OStatement i)] + [∀ i, OracleInterface (pSpec.Message i)] + +namespace Proof + +@[reducible, simp] +def rbrSoundness (langIn : Set Statement) + (verifier : Verifier oSpec Statement Bool pSpec) + (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + verifier.rbrSoundness langIn acceptRejectRel.language rbrSoundnessError + +@[reducible, simp] +def rbrKnowledgeSoundness (relation : Set (Statement × Bool)) + (verifier : Verifier oSpec Statement Bool pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + verifier.rbrKnowledgeSoundness relation acceptRejectRel rbrKnowledgeError + +end Proof + +namespace OracleProof + +/-- Round-by-round soundness of an oracle reduction is the same as for non-oracle reductions. -/ +@[reducible, simp] +def rbrSoundness + (langIn : Set (Statement × ∀ i, OStatement i)) + (verifier : OracleVerifier oSpec Statement OStatement Bool (fun _ : Empty => Unit) pSpec) + (rbrSoundnessError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + verifier.rbrSoundness langIn acceptRejectOracleRel.language rbrSoundnessError + +/-- Round-by-round knowledge soundness of an oracle reduction is the same as for non-oracle +reductions. -/ +def rbrKnowledgeSoundness + (relIn : Set ((Statement × ∀ i, OStatement i) × Witness)) + (verifier : OracleVerifier oSpec Statement OStatement Bool (fun _ : Empty => Unit) pSpec) + (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := + verifier.rbrKnowledgeSoundness relIn acceptRejectOracleRel rbrKnowledgeError + +end OracleProof diff --git a/ArkLib/OracleReduction/Security/StateRestoration.lean b/ArkLib/OracleReduction/Security/StateRestoration.lean new file mode 100644 index 000000000..a30b6aff1 --- /dev/null +++ b/ArkLib/OracleReduction/Security/StateRestoration.lean @@ -0,0 +1,68 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # State-Restoration Security Definitions + + This file defines state-restoration security notions for (oracle) reductions. +-/ + +noncomputable section + +open OracleComp OracleSpec ProtocolSpec +open scoped NNReal + +variable {ι : Type} {oSpec : OracleSpec ι} [oSpec.FiniteRange] + +namespace Prover + +/-- A **state-restoration** prover in a reduction is a modified prover that has query access to + challenge oracles that can return the `i`-th challenge, for all `i : pSpec.ChallengeIdx`, given + the input statement and the transcript up to that point. + + It further takes in the input statement and witness, and outputs a full transcript of interaction, + along with the output statement and witness. -/ +def StateRestoration (oSpec : OracleSpec ι) + (StmtIn WitIn StmtOut WitOut : Type) {n : ℕ} (pSpec : ProtocolSpec n) := + StmtIn → WitIn → OracleComp (oSpec ++ₒ (srChallengeOracle StmtIn pSpec)) + ((StmtOut × WitOut) × pSpec.FullTranscript) + +end Prover + +namespace Verifier + +variable {oSpec : OracleSpec ι} + {StmtIn WitIn StmtOut WitOut : Type} + {n : ℕ} {pSpec : ProtocolSpec n} + [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] + +namespace StateRestoration + +-- /-- State-restoration soundness -/ +-- def srSoundness (verifier : Verifier pSpec oSpec StmtIn StmtOut) +-- (langIn : Set StmtIn) (langOut : Set StmtOut) (SRSoundnessError : ENNReal) : Prop := +-- ∀ stmtIn ∉ langIn, +-- ∀ witIn : WitIn, +-- ∀ Prover.StateRestoration : Prover.StateRestoration pSpec oSpec StmtIn WitIn StmtOut WitOut, +-- let ⟨_, witOut, transcript, queryLog⟩ ← (simulateQ ... (Prover.StateRestoration.run stmtIn witIn)).run +-- let stmtOut ← verifier.run stmtIn transcript +-- return stmtOut ∉ langOut + +-- State-restoration knowledge soundness (w/ straightline extractor) + +end StateRestoration + +end Verifier + +namespace OracleVerifier + + + +end OracleVerifier + +end diff --git a/ArkLib/OracleReduction/Transform/FiatShamir.lean b/ArkLib/OracleReduction/Transform/FiatShamir.lean deleted file mode 100644 index 2c4b1835e..000000000 --- a/ArkLib/OracleReduction/Transform/FiatShamir.lean +++ /dev/null @@ -1,136 +0,0 @@ -/- -Copyright (c) 2024 ArkLib Contributors. All rights reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Authors: Quang Dao --/ - -import ArkLib.OracleReduction.Composition.Sequential.General - -/-! - # The Fiat-Shamir Transformation - - This file defines the Fiat-Shamir transformation. This transformation takes in a (public-coin) - interactive reduction (IR) `R` and transforms it into a non-interactive reduction via removing - interaction from `R`. In particular, in the transformed reduction, queries to the verifier's - challenges are replaced by queries to a random oracle. - - We will show that the transformation satisfies security properties as follows: - - - Completeness is preserved - - State-restoration (knowledge) soundness implies (knowledge) soundness - - Honest-verifier zero-knowledge implies zero-knowledge - - ## Notes - - Our formalization mostly follows the treatment in the Chiesa-Yogev textbook. - - There are many variants of Fiat-Shamir. The most theoretical one is that one hashes the entire - statement and transcript up to the point of deriving a new challenge. This is inefficient and not - actually how it is implemented in practice. - - In contrast, most implementations today use the hash function as a **cryptographic sponge**, which - permits adding in bitstrings (serialized from the messages) and squeezing out new bitstrings (that - are then de-serialized into challenges). --/ - -open ProtocolSpec - -variable {n : ℕ} - -section Instances - -variable {Message : Type} - -instance : IsEmpty (ChallengeIdx ![(.P_to_V, Message)]) := by - simp [ChallengeIdx] - infer_instance - -instance : Unique (MessageIdx ![(.P_to_V, Message)]) where - default := ⟨0, by simp⟩ - uniq := fun i => by ext; simp - -instance : ∀ i, VCVCompatible (Challenge ![(.P_to_V, Message)] i) := - fun i => isEmptyElim i - -end Instances - -/-- Turn each verifier's challenge into an oracle, where one needs to query - with an input statement - and a prior transcript to get a challenge (useful for Fiat-Shamir) -/ -@[reducible, inline, specialize] -def instChallengeOracleInterfaceFiatShamir {pSpec : ProtocolSpec n} {i : pSpec.ChallengeIdx} - {StmtIn : Type} : OracleInterface (pSpec.Challenge i) where - Query := StmtIn × Transcript i.1.castSucc pSpec - Response := pSpec.Challenge i - oracle := fun c _ => c - -/-- The oracle interface for Fiat-Shamir. - -This is the (inefficient) version where we hash the input statement and the entire transcript up to -the point of deriving a new challenge. - -Some variants of Fiat-Shamir takes in a salt each round. We assume that such salts are included in -the input statement (i.e. we can always transform a given reduction into one where every round has a -random salt). -/ -def fiatShamirSpec (pSpec : ProtocolSpec n) (StmtIn : Type) : OracleSpec pSpec.ChallengeIdx := - fun i => (StmtIn × Transcript i.1.succ pSpec, pSpec.Challenge i) - -instance {pSpec : ProtocolSpec n} {StmtIn : Type} [∀ i, VCVCompatible (pSpec.Challenge i)] : - OracleSpec.FiniteRange (fiatShamirSpec pSpec StmtIn) where - range_inhabited' := fun i => by simp [fiatShamirSpec, OracleSpec.range]; infer_instance - range_fintype' := fun i => by simp [fiatShamirSpec, OracleSpec.range]; infer_instance - --- TODO: define a more efficient version where one hashes the most recent challenge along with the --- messages since that challenge (the first challenge is obtained by hashing the statement & the --- messages so far) - -variable {pSpec : ProtocolSpec n} {ι : Type} {oSpec : OracleSpec ι} - {StmtIn WitIn StmtOut WitOut : Type} [VCVCompatible StmtIn] [∀ i, VCVCompatible (pSpec i).2] - (Salt : Type) [VCVCompatible Salt] - -def Prover.fiatShamir (P : Prover pSpec oSpec StmtIn WitIn StmtOut WitOut) : - NonInteractiveProver (oSpec ++ₒ fiatShamirSpec pSpec StmtIn) - StmtIn WitIn StmtOut WitOut (∀ i, pSpec.Message i) where - PrvState := fun i => match i with - | 0 => P.PrvState 0 - | _ => P.PrvState (Fin.last n) - input := P.input - -- - sendMessage := fun idx state => by - simp [Unique.uniq _ idx, default] at state ⊢ - sorry - -- This function is never invoked so we apply the elimination principle - receiveChallenge := fun idx _ _ => isEmptyElim idx - output := P.output - -def Verifier.fiatShamir (V : Verifier pSpec oSpec StmtIn StmtOut) : - NonInteractiveVerifier (oSpec ++ₒ fiatShamirSpec pSpec StmtIn) - StmtIn StmtOut (∀ i, pSpec.Message i) where - verify := fun stmtIn messages => - let transcript := sorry - V.verify stmtIn transcript - -/-- TODO: change `unifSpec` to something more appropriate (maybe a `CryptographicSponge`) -/ -def Reduction.fiatShamir (R : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) : - NonInteractiveReduction (oSpec ++ₒ fiatShamirSpec pSpec StmtIn) - StmtIn WitIn StmtOut WitOut (∀ i, pSpec.Message i) := - ⟨R.prover.fiatShamir, R.verifier.fiatShamir⟩ - -section Security - -noncomputable section - -open scoped NNReal - -variable [DecidableEq ι] [oSpec.FiniteRange] [∀ i, VCVCompatible (pSpec.Challenge i)] - -theorem fiatShamir_completeness (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) - (completenessError : ℝ≥0) (R : Reduction pSpec oSpec StmtIn WitIn StmtOut WitOut) : - R.completeness relIn relOut completenessError → - (R.fiatShamir).completeness relIn relOut completenessError := sorry - --- TODO: state-restoration (knowledge) soundness implies (knowledge) soundness after Fiat-Shamir - -end - -end Security diff --git a/ArkLib/OracleReduction/VectorIOR.lean b/ArkLib/OracleReduction/VectorIOR.lean index eeddbf7fd..5190d9b4e 100644 --- a/ArkLib/OracleReduction/VectorIOR.lean +++ b/ArkLib/OracleReduction/VectorIOR.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import ArkLib.OracleReduction.Security.Basic +import ArkLib.OracleReduction.Security.RoundByRound /-! # Vector Interactive Oracle Reductions @@ -92,11 +92,12 @@ variable {n : ℕ} {ι : Type} statements are vectors over some alphabet. We do _not_ require the (oracle) statements and witnesses to be vectors as well, though this can be done if needed. -/ @[reducible] -def VectorIOR (vPSpec : ProtocolSpec.VectorSpec n) (A : Type) (oSpec : OracleSpec ι) - (StmtIn WitIn StmtOut WitOut : Type) - {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) [∀ i, OracleInterface (OStmtIn i)] - {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) := - OracleReduction (vPSpec.toProtocolSpec A) oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut +def VectorIOR (oSpec : OracleSpec ι) + (StmtIn : Type) {ιₛᵢ : Type} (OStmtIn : ιₛᵢ → Type) (WitIn : Type) + (StmtOut : Type) {ιₛₒ : Type} (OStmtOut : ιₛₒ → Type) (WitOut : Type) + (vPSpec : ProtocolSpec.VectorSpec n) (A : Type) + [∀ i, OracleInterface (OStmtIn i)] := + OracleReduction oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut (vPSpec.toProtocolSpec A) /-- Vector Interactive Oracle Proofs @@ -104,10 +105,11 @@ This is just a specialization of `OracleProof` to the case where all messages an are vectors over some alphabet. We do _not_ require the (oracle) statements and witnesses to be vectors as well, though this can be done if needed. -/ @[reducible] -def VectorIOP (vPSpec : ProtocolSpec.VectorSpec n) (A : Type) (oSpec : OracleSpec ι) - (Statement Witness : Type) - {ιₛ : Type} (OStatement : ιₛ → Type) [∀ i, OracleInterface (OStatement i)] := - OracleProof (vPSpec.toProtocolSpec A) oSpec Statement Witness OStatement +def VectorIOP (oSpec : OracleSpec ι) + (Statement : Type) {ιₛ : Type} (OStatement : ιₛ → Type) (Witness : Type) + (vPSpec : ProtocolSpec.VectorSpec n) (A : Type) + [∀ i, OracleInterface (OStatement i)] := + OracleProof oSpec Statement OStatement Witness (vPSpec.toProtocolSpec A) variable {n : ℕ} {ι : Type} {vPSpec : ProtocolSpec.VectorSpec n} {A : Type} {oSpec : OracleSpec ι} [VCVCompatible A] [oSpec.FiniteRange] @@ -123,10 +125,10 @@ variable {StmtIn WitIn StmtOut WitOut : Type} {ιₛᵢ : Type} {OStmtIn : ιₛ round-by-round knowledge error `ε_rbr` if it satisfies (perfect) completeness and round-by-round knowledge soundness with respect to `relIn`, `relOut`, and `ε_rbr`. -/ class IsSecure - (relIn : (StmtIn × ∀ i, OStmtIn i) → WitIn → Prop) - (relOut : (StmtOut × ∀ i, OStmtOut i) → WitOut → Prop) + (relIn : Set ((StmtIn × ∀ i, OStmtIn i) × WitIn)) + (relOut : Set ((StmtOut × ∀ i, OStmtOut i) × WitOut)) (ε_rbr : vPSpec.ChallengeIdx → ℝ≥0) - (vectorIOR : VectorIOR vPSpec A oSpec StmtIn WitIn StmtOut WitOut OStmtIn OStmtOut) where + (vectorIOR : VectorIOR oSpec StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut vPSpec A) where /-- The reduction is perfectly complete. -/ is_complete : vectorIOR.perfectCompleteness relIn relOut @@ -149,9 +151,9 @@ variable {Statement Witness : Type} {ιₛ : Type} {OStatement : ιₛ → Type} error `ε_rbr` if it satisfies (perfect) completeness and round-by-round knowledge soundness with respect to `relation` and `ε_rbr`. -/ class IsSecure - (relation : (Statement × ∀ i, OStatement i) → Witness → Prop) + (relation : Set ((Statement × ∀ i, OStatement i) × Witness)) (ε_rbr : vPSpec.ChallengeIdx → ℝ≥0) - (vectorIOP : VectorIOP vPSpec A oSpec Statement Witness OStatement) where + (vectorIOP : VectorIOP oSpec Statement OStatement Witness vPSpec A) where /-- The reduction is perfectly complete. -/ is_complete : vectorIOP.perfectCompleteness relation @@ -167,10 +169,10 @@ class IsSecure - (perfect) completeness with respect to `completeRelation`, - round-by-round knowledge soundness with respect to `soundRelation` and `ε_rbr`. -/ class IsSecureWithGap - (completeRelation : (Statement × ∀ i, OStatement i) → Witness → Prop) - (soundRelation : (Statement × ∀ i, OStatement i) → Witness → Prop) + (completeRelation : Set ((Statement × ∀ i, OStatement i) × Witness)) + (soundRelation : Set ((Statement × ∀ i, OStatement i) × Witness)) (ε_rbr : vPSpec.ChallengeIdx → ℝ≥0) - (vectorIOP : VectorIOP vPSpec A oSpec Statement Witness OStatement) where + (vectorIOP : VectorIOP oSpec Statement OStatement Witness vPSpec A) where /-- The reduction is perfectly complete. -/ is_complete : vectorIOP.perfectCompleteness completeRelation diff --git a/ArkLib/ProofSystem/Component/CheckClaim.lean b/ArkLib/ProofSystem/Component/CheckClaim.lean new file mode 100644 index 000000000..32b4d1e4f --- /dev/null +++ b/ArkLib/ProofSystem/Component/CheckClaim.lean @@ -0,0 +1,127 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # Simple (Oracle) Reduction: Check if a predicate / claim on a statement is satisfied + + This is a zero-round (oracle) reduction. There is no witness. + + 1. Reduction version: the input relation becomes a predicate on the statement. Verifier checks + this predicate, and returns the same statement if successful. + + 2. Oracle reduction version: the input relation becomes an oracle computation having as oracles + the oracle statements, and taking in the (non-oracle) statement as an input (i.e. via + `ReaderT`), and returning a `Prop`. Verifier performs this oracle computation, and returns the + same statement & oracle statement if successful. + + In both cases, the output relation is trivial (since the input relation has been checked by the + verifier). +-/ + +open OracleComp OracleInterface ProtocolSpec Function + +namespace CheckClaim + +variable {ι : Type} (oSpec : OracleSpec ι) (Statement : Type) + +section Reduction + +/-- The prover for the `CheckClaim` reduction. -/ +@[inline, specialize] +def prover : Prover oSpec Statement Unit Statement Unit ![] where + PrvState := fun _ => Statement + input := Prod.fst + sendMessage := fun i => nomatch i + receiveChallenge := fun i => nomatch i + output := fun stmt => (stmt, ()) + +variable (pred : Statement → Prop) [DecidablePred pred] + +/-- The verifier for the `CheckClaim` reduction. -/ +@[inline, specialize] +def verifier : Verifier oSpec Statement Statement ![] where + verify := fun stmt _ => do guard (pred stmt); return stmt + +/-- The reduction for the `CheckClaim` reduction. -/ +@[inline, specialize] +def reduction : Reduction oSpec Statement Unit Statement Unit ![] where + prover := prover oSpec Statement + verifier := verifier oSpec Statement pred + +variable [oSpec.FiniteRange] + +/-- The `CheckClaim` reduction satisfies perfect completeness with respect to the predicate as the + input relation, and the output relation being always true. -/ +@[simp] +theorem reduction_completeness : + (reduction oSpec Statement pred).perfectCompleteness { ⟨stmt, _⟩ | pred stmt } + Set.univ := by + simp [reduction, Reduction.run, Prover.run, Prover.runToRound, Prover.processRound, Verifier.run, + prover, verifier] + +/-- The `CheckClaim` reduction satisfies perfect round-by-round knowledge soundness. -/ +theorem reduction_rbr_knowledge_soundness : True := sorry + +end Reduction + +section OracleReduction + +variable {ιₛ : Type} (OStatement : ιₛ → Type) [∀ i, OracleInterface (OStatement i)] + +/-- The oracle prover for the `CheckClaim` oracle reduction. -/ +@[inline, specialize] +def oracleProver : OracleProver oSpec + Statement OStatement Unit Statement OStatement Unit ![] where + PrvState := fun _ => Statement × (∀ i, OStatement i) + input := Prod.fst + sendMessage := fun i => nomatch i + receiveChallenge := fun i => nomatch i + output := fun stmt => (stmt, ()) + +variable (pred : ReaderT Statement (OracleComp [OStatement]ₒ) Prop) + (hPred : ∀ stmt, (pred stmt).neverFails) + +/-- The oracle verifier for the `CheckClaim` oracle reduction. -/ +@[inline, specialize] +def oracleVerifier : OracleVerifier oSpec + Statement OStatement Statement OStatement ![] where + verify := fun stmt _ => do let _ ← pred stmt; return stmt + embed := Embedding.inl + hEq := by intro i; simp + +/-- The oracle reduction for the `CheckClaim` oracle reduction. -/ +@[inline, specialize] +def oracleReduction : OracleReduction oSpec + Statement OStatement Unit Statement OStatement Unit ![] where + prover := oracleProver oSpec Statement OStatement + verifier := oracleVerifier oSpec Statement OStatement pred + +variable {Statement} {OStatement} + +def toRelInput : Set ((Statement × (∀ i, OStatement i)) × Unit) := + { ⟨⟨stmt, oStmt⟩, _⟩ | simulateQ' (toOracleImpl OStatement oStmt) (pred stmt) (hPred stmt) } + +-- theorem oracleProver_run + +variable [oSpec.FiniteRange] + +/-- The `CheckClaim` reduction satisfies perfect completeness. -/ +@[simp] +theorem oracleReduction_completeness : + (oracleReduction oSpec Statement OStatement pred).perfectCompleteness (toRelInput pred hPred) + Set.univ := by + simp [OracleReduction.perfectCompleteness, OracleReduction.toReduction, OracleVerifier.toVerifier, + oracleReduction, oracleProver, oracleVerifier, toRelInput] + simp [Reduction.run, Prover.run, Verifier.run, simOracle2] + sorry + +theorem oracleReduction_rbr_knowledge_soundness : True := sorry + +end OracleReduction + +end CheckClaim diff --git a/ArkLib/ProofSystem/Component/CheckPred.lean b/ArkLib/ProofSystem/Component/CheckPred.lean deleted file mode 100644 index 50326bfb9..000000000 --- a/ArkLib/ProofSystem/Component/CheckPred.lean +++ /dev/null @@ -1,24 +0,0 @@ -/- -Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Authors: Quang Dao --/ - -import ArkLib.OracleReduction.Security.Basic - -/-! - # Simple (Oracle) Reduction: Check if a predicate on a statement is satisfied - - This is a zero-round (oracle) reduction. - - 1. Reduction version: the input relation is constant on witness, and becomes a predicate on the - statement. Verifier checks this statement, and returns yes / no. - - Verifier may decide to keep the statement or not (send it to Unit)? - - 2. Oracle reduction version: the input relation is constant on witness, and there is an oracle - computation having as oracles the oracle statements, and taking in the (non-oracle) statement as - an input (i.e. via `ReaderT`), and returning a `Prop`. - - Verifier performs this oracle computation, and may decide to keep the (oracle) statement or not --/ diff --git a/ArkLib/ProofSystem/Component/DoNothing.lean b/ArkLib/ProofSystem/Component/DoNothing.lean new file mode 100644 index 000000000..1554a7752 --- /dev/null +++ b/ArkLib/ProofSystem/Component/DoNothing.lean @@ -0,0 +1,102 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # The Trivial (Oracle) Reduction: Do Nothing! + + This is a zero-round (oracle) reduction. Both the (oracle) prover and the (oracle) verifier simply + pass on their inputs unchanged. + + We still define this as the base for realizing other zero-round reductions, via lens / lifting. + + NOTE: we have already defined these as trivial (oracle) reductions +-/ + +namespace DoNothing + +variable {ι : Type} (oSpec : OracleSpec ι) (Statement Witness : Type) + +section Reduction + +/-- The prover for the `DoNothing` reduction. -/ +@[inline, specialize, simp] +def prover : Prover oSpec Statement Witness Statement Witness ![] := Prover.id + +/-- The verifier for the `DoNothing` reduction. -/ +@[inline, specialize, simp] +def verifier : Verifier oSpec Statement Statement ![] := Verifier.id + +/-- The reduction for the `DoNothing` reduction. + - Prover simply returns the statement and witness. + - Verifier simply returns the statement. +-/ +@[inline, specialize, simp] +def reduction : Reduction oSpec Statement Witness Statement Witness ![] := Reduction.id + +variable [oSpec.FiniteRange] (rel : Set (Statement × Witness)) + +/-- The `DoNothing` reduction satisfies perfect completeness for any relation. -/ +@[simp] +theorem reduction_completeness : + (reduction oSpec Statement Witness).perfectCompleteness rel rel := by + simp [Reduction.run, Prover.run, Prover.runToRound, Prover.processRound, Verifier.run, + reduction, prover, verifier, Reduction.id, Prover.id, Verifier.id] + +-- theorem reduction_rbr_knowledge_soundness : +-- (reduction oSpec Statement Witness).rbrKnowledgeSoundness rel rel := by + -- simp [Reduction.run, Prover.run, Prover.runToRound, Prover.processRound, Verifier.run, + -- reduction, prover, verifier] + +end Reduction + +section OracleReduction + +variable {ιₛ : Type} (OStatement : ιₛ → Type) [∀ i, OracleInterface (OStatement i)] + +/-- The oracle prover for the `DoNothing` oracle reduction. -/ +@[inline, specialize, simp] +def oracleProver : OracleProver oSpec + Statement OStatement Witness Statement OStatement Witness ![] := OracleProver.id + +/-- The oracle verifier for the `DoNothing` oracle reduction. -/ +@[inline, specialize, simp] +def oracleVerifier : OracleVerifier oSpec Statement OStatement Statement OStatement ![] := + OracleVerifier.id + +/-- The oracle reduction for the `DoNothing` oracle reduction. + - Prover simply returns the (non-oracle and oracle) statement and witness. + - Verifier simply returns the (non-oracle and oracle) statement. +-/ +@[inline, specialize, simp] +def oracleReduction : OracleReduction oSpec + Statement OStatement Witness Statement OStatement Witness ![] := OracleReduction.id + +variable [oSpec.FiniteRange] (rel : Set ((Statement × (∀ i, OStatement i)) × Witness)) + +/-- The `DoNothing` oracle reduction satisfies perfect completeness for any relation. -/ +@[simp] +theorem oracleReduction_completeness : + (oracleReduction oSpec Statement Witness OStatement).perfectCompleteness rel rel := by + simp [OracleReduction.perfectCompleteness, OracleReduction.toReduction, OracleVerifier.toVerifier, + oracleReduction, oracleProver, oracleVerifier] + -- Need to simp the below separately, otherwise we get timeout + simp [Reduction.run, Prover.run, Verifier.run, OracleReduction.id, OracleProver.id, + OracleVerifier.id, Prover.id, Verifier.id] + aesop + +-- theorem oracleReduction_rbr_knowledge_soundness : +-- (oracleReduction oSpec Statement Witness OStatement).rbrKnowledgeSoundness rel rel := by +-- simp [OracleReduction.rbrKnowledgeSoundness, OracleReduction.toReduction, +-- OracleVerifier.toVerifier, oracleReduction, oracleProver, oracleVerifier] +-- -- Need to simp the below separately, otherwise we get timeout +-- simp [Reduction.run, Prover.run, Verifier.run] +-- aesop + +end OracleReduction + +end DoNothing diff --git a/ArkLib/ProofSystem/Component/RandomQuery.lean b/ArkLib/ProofSystem/Component/RandomQuery.lean index 7384aefd1..311cc8282 100644 --- a/ArkLib/ProofSystem/Component/RandomQuery.lean +++ b/ArkLib/ProofSystem/Component/RandomQuery.lean @@ -3,7 +3,7 @@ Copyright (c) 2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import ArkLib.OracleReduction.LiftContext.Basic +import ArkLib.OracleReduction.LiftContext.Reduction /-! # Simple Oracle Reduction: Random Query @@ -47,39 +47,36 @@ def WitOut := Unit /-- The input relation is that the two oracles are equal. -/ @[reducible, simp] -def relIn : (StmtIn × ∀ i, OStmtIn OStatement i) → WitIn → Prop := fun ⟨(), oracles⟩ () => - oracles 0 = oracles 1 +def relIn : Set ((StmtIn × ∀ i, OStmtIn OStatement i) × WitIn) := + { ⟨⟨(), oracles⟩, ()⟩ | oracles 0 = oracles 1 } /-- The output relation states that if the verifier's single query was `q`, then `a` and `b` agree on that `q`, i.e. `oracle a q = oracle b q`. -/ @[reducible, simp] -def relOut : (StmtOut OStatement × ∀ i, OStmtOut OStatement i) → WitOut → Prop := - fun ⟨q, oStmt⟩ () => oracle (oStmt 0) q = oracle (oStmt 1) q +def relOut : Set ((StmtOut OStatement × ∀ i, OStmtOut OStatement i) × WitOut) := + { ⟨⟨q, oStmt⟩, ()⟩ | oracle (oStmt 0) q = oracle (oStmt 1) q } @[reducible] def pSpec : ProtocolSpec 1 := ![(.V_to_P, Query OStatement)] -instance : ∀ i, OracleInterface ((pSpec OStatement).Message i) | ⟨0, h⟩ => nomatch h -@[reducible, simp] instance : ∀ i, VCVCompatible ((pSpec OStatement).Challenge i) - | ⟨0, _⟩ => by dsimp [pSpec, ProtocolSpec.Challenge]; exact inst - /-- The prover is trivial: it has no messages to send. It only receives the verifier's challenge `q`, and outputs the same `q`. We keep track of `(a, b)` in the prover's state, along with the single random query `q`. -/ -def prover : OracleProver (pSpec OStatement) oSpec Unit Unit - (Query OStatement) Unit - (fun _ : Fin 2 => OStatement) (fun _ : Fin 2 => OStatement) where +@[inline, specialize] +def oracleProver : OracleProver oSpec + Unit (fun _ : Fin 2 => OStatement) Unit + (Query OStatement) (fun _ : Fin 2 => OStatement) Unit (pSpec OStatement) where PrvState | 0 => ∀ _ : Fin 2, OStatement | 1 => (∀ _ : Fin 2, OStatement) × (Query OStatement) - input := fun ⟨(), oracles⟩ () => oracles + input := fun x => x.1.2 sendMessage | ⟨0, h⟩ => nomatch h @@ -90,8 +87,10 @@ def prover : OracleProver (pSpec OStatement) oSpec Unit Unit /-- The oracle verifier simply returns the challenge, and performs no checks. -/ -def verifier : OracleVerifier (pSpec OStatement) oSpec Unit (Query OStatement) - (fun _ : Fin 2 => OStatement) (fun _ : Fin 2 => OStatement) where +@[inline, specialize] +def oracleVerifier : OracleVerifier oSpec + Unit (fun _ : Fin 2 => OStatement) + (Query OStatement) (fun _ : Fin 2 => OStatement) (pSpec OStatement) where verify := fun _ chal => do let q : Query OStatement := chal ⟨0, rfl⟩ @@ -106,24 +105,24 @@ Combine the trivial prover and this verifier to form the `RandomQuery` oracle re the input oracles are `(a, b)`, and the output oracles are the same `(a, b)` its output statement also contains the challenge `q`. -/ +@[inline, specialize] def oracleReduction : - OracleReduction (pSpec OStatement) oSpec Unit Unit - (Query OStatement) Unit - (fun _ : Fin 2 => OStatement) (fun _ : Fin 2 => OStatement) where - prover := prover oSpec OStatement - verifier := verifier oSpec OStatement + OracleReduction oSpec Unit (fun _ : Fin 2 => OStatement) Unit + (Query OStatement) (fun _ : Fin 2 => OStatement) Unit (pSpec OStatement) where + prover := oracleProver oSpec OStatement + verifier := oracleVerifier oSpec OStatement variable [oSpec.FiniteRange] /-- The `RandomQuery` oracle reduction is perfectly complete. -/ @[simp] -theorem completeness : (oracleReduction oSpec OStatement).perfectCompleteness +theorem oracleReduction_completeness : (oracleReduction oSpec OStatement).perfectCompleteness (relIn OStatement) (relOut OStatement) := by simp [OracleReduction.perfectCompleteness, oracleReduction, relIn, relOut] intro _ oStmt _ hOStmt simp [Reduction.run, Prover.run, Verifier.run, Prover.runToRound, Prover.processRound, - OracleReduction.toReduction, OracleVerifier.toVerifier, verifier, prover, - Transcript.snoc, FullTranscript.challenges] + OracleReduction.toReduction, OracleVerifier.toVerifier, oracleVerifier, oracleProver, + Transcript.concat, FullTranscript.challenges] intro q oStmt' q' oStmt'' transcript h1 h2 h3 h4 apply congrFun at h3 simp_all [Fin.snoc] @@ -135,24 +134,24 @@ theorem completeness : (oracleReduction oSpec OStatement).perfectCompleteness -- def langOut : Set ((Query OStatement) × (∀ _ : Fin 2, OStatement)) := setOf fun ⟨q, oracles⟩ => -- OracleInterface.oracle (oracles 0) q = OracleInterface.oracle (oracles 1) q -def stateFunction : (verifier oSpec OStatement).StateFunction (pSpec OStatement) oSpec +def stateFunction : (oracleVerifier oSpec OStatement).StateFunction (relIn OStatement).language (relOut OStatement).language where toFun | 0 => fun ⟨_, oracles⟩ _ => oracles 0 = oracles 1 | 1 => fun ⟨_, oracles⟩ chal => let q : Query OStatement := by simpa [pSpec] using chal ⟨0, by aesop⟩ OracleInterface.oracle (oracles 0) q = OracleInterface.oracle (oracles 1) q - toFun_empty := fun stmt hStmt => by simp_all [relIn, Function.language] + toFun_empty := fun stmt hStmt => by simp_all [relIn, Set.language] toFun_next := fun i hDir ⟨stmt, oStmt⟩ tr h => by simp_all toFun_full := fun ⟨stmt, oStmt⟩ tr h => by - simp_all [relOut, Function.language] + simp_all [relOut, Set.language] intro a b hSupp - simp [Verifier.run, OracleVerifier.toVerifier, verifier] at hSupp + simp [OracleVerifier.toVerifier, Verifier.run, oracleVerifier] at hSupp simp [hSupp.1, hSupp.2, h] /-- The extractor is trivial since the output witness is `Unit`. -/ -def extractor : RBRExtractor (pSpec OStatement) oSpec - (Unit × (∀ _ : Fin 2, OStatement)) Unit := +def extractor : Extractor.RoundByRound oSpec + (Unit × (∀ _ : Fin 2, OStatement)) Unit (pSpec OStatement) := fun _ _ _ _ => () /-! @@ -171,7 +170,7 @@ open NNReal /-- The `RandomQuery` oracle reduction is knowledge sound. -/ @[simp] theorem rbr_knowledge_soundness {d : ℕ} (h : OracleInterface.distanceLE OStatement d) : - (verifier oSpec OStatement).rbrKnowledgeSoundness + (oracleVerifier oSpec OStatement).rbrKnowledgeSoundness (relIn OStatement) (relOut OStatement) (fun _ => (d : ℝ≥0) / (Fintype.card (Query OStatement) : ℝ≥0)) := by @@ -184,7 +183,7 @@ theorem rbr_knowledge_soundness {d : ℕ} (h : OracleInterface.distanceLE OState simp [Prover.runWithLogToRound, Prover.runToRound, stateFunction] classical unfold Function.comp - simp [probEvent_liftM_eq_mul_inv, ProtocolSpec.Transcript.snoc, Fin.snoc, default] + simp [probEvent_liftM_eq_mul_inv, ProtocolSpec.Transcript.concat, Fin.snoc, default] rw [div_eq_mul_inv] gcongr simp [Finset.filter_and] @@ -238,7 +237,7 @@ instance : ∀ i, OracleInterface ((pSpec OStatement).Message i) | ⟨0, h⟩ => -- Perhaps it's time to test out the liftContext infrastructure --- instance : OracleContextLens +-- instance : OracleContext.Lens -- RandomQuery.StmtIn (RandomQuery.StmtOut OStatement) -- StmtIn (StmtOut OStatement) -- (RandomQuery.OStmtIn OStatement) (RandomQuery.OStmtOut OStatement) diff --git a/ArkLib/ProofSystem/Component/ReduceClaim.lean b/ArkLib/ProofSystem/Component/ReduceClaim.lean new file mode 100644 index 000000000..13eaf5330 --- /dev/null +++ b/ArkLib/ProofSystem/Component/ReduceClaim.lean @@ -0,0 +1,117 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic + +/-! + # Simple (Oracle) Reduction: Locally / non-interactively reduce a claim + + This is a zero-round (oracle) reduction. + + 1. Reduction version: there are mappings between `StmtIn → StmtOut` and `WitIn → WitOut`. The + prover and verifier applies these mappings to the input statement and witness, and returns the + output statement and witness. + + This reduction is secure via pull-backs on relations. In other words, the outputs of the reduction + satisfies some relation `relOut` if and only if the inputs satisfy the relation + `relIn := relOut (mapStmt ·) (mapWit ·)`. + + 2. Oracle reduction version: same as above, but with the extra mapping `OStmtIn → OStmtOut`, + defined as an oracle simulation / embedding. + + This oracle reduction is secure via pull-backs on relations. In other words, the outputs of the + reduction satisfies some relation `relOut` if and only if the inputs satisfy the relation + `relIn := relOut ((mapStmt ·) ⊗ (mapOStmt ·)) (mapWit ·)`. +-/ + +namespace ReduceClaim + +variable {ι : Type} (oSpec : OracleSpec ι) + {StmtIn : Type} {ιₛᵢ : Type} {OStmtIn : ιₛᵢ → Type} {WitIn : Type} + {StmtOut : Type} {ιₛₒ : Type} {OStmtOut : ιₛₒ → Type} {WitOut : Type} + [∀ i, OracleInterface (OStmtIn i)] + (mapStmt : StmtIn → StmtOut) (mapWit : WitIn → WitOut) + +section Reduction + +/-- The prover for the `ReduceClaim` reduction. -/ +def prover : Prover oSpec StmtIn WitIn StmtOut WitOut ![] where + PrvState | 0 => StmtIn × WitIn + input := id + sendMessage := fun i => nomatch i + receiveChallenge := fun i => nomatch i + output := fun ⟨stmt, wit⟩ => (mapStmt stmt, mapWit wit) + +/-- The verifier for the `ReduceClaim` reduction. -/ +def verifier : Verifier oSpec StmtIn StmtOut ![] where + verify := fun stmt _ => pure (mapStmt stmt) + +/-- The reduction for the `ReduceClaim` reduction. -/ +def reduction : Reduction oSpec StmtIn WitIn StmtOut WitOut ![] where + prover := prover oSpec mapStmt mapWit + verifier := verifier oSpec mapStmt + +variable [oSpec.FiniteRange] (relIn : Set (StmtIn × WitIn)) (relOut : Set (StmtOut × WitOut)) + +/-- The `ReduceClaim` reduction satisfies perfect completeness for any relation. -/ +@[simp] +theorem reduction_completeness (hRel : ∀ stmtIn witIn, (stmtIn, witIn) ∈ relIn ↔ + (mapStmt stmtIn, mapWit witIn) ∈ relOut) : + (reduction oSpec mapStmt mapWit).perfectCompleteness + relIn relOut := by + simp [reduction, Reduction.run, Prover.run, Prover.runToRound, Prover.processRound, Verifier.run, + prover, verifier, hRel] + +-- TODO: round-by-round knowledge soundness + +end Reduction + +section OracleReduction + +variable + -- Require map on indices to go the other way + (embedIdx : ιₛₒ ↪ ιₛᵢ) (hEq : ∀ i, OStmtIn (embedIdx i) = OStmtOut i) + +/-- The oracle prover for the `ReduceClaim` oracle reduction. -/ +def oracleProver : OracleProver oSpec + StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut ![] where + PrvState := fun _ => (StmtIn × (∀ i, OStmtIn i)) × WitIn + input := id + sendMessage := fun i => nomatch i + receiveChallenge := fun i => nomatch i + output := fun ⟨⟨stmt, oStmt⟩, wit⟩ => + ((mapStmt stmt, fun i => (hEq i) ▸ oStmt (embedIdx i)), mapWit wit) + +/-- The oracle verifier for the `ReduceClaim` oracle reduction. -/ +def oracleVerifier : OracleVerifier oSpec StmtIn OStmtIn StmtOut OStmtOut ![] where + verify := fun stmt _ => pure (mapStmt stmt) + embed := .trans embedIdx .inl + hEq := by intro i; simp [hEq] + +/-- The oracle reduction for the `ReduceClaim` oracle reduction. -/ +def oracleReduction : OracleReduction oSpec + StmtIn OStmtIn WitIn StmtOut OStmtOut WitOut ![] where + prover := oracleProver oSpec mapStmt mapWit embedIdx hEq + verifier := oracleVerifier oSpec mapStmt embedIdx hEq + +variable [oSpec.FiniteRange] + (relIn : Set ((StmtIn × (∀ i, OStmtIn i)) × WitIn)) + (relOut : Set ((StmtOut × (∀ i, OStmtOut i)) × WitOut)) + +/-- The `ReduceClaim` oracle reduction satisfies perfect completeness for any relation. -/ +@[simp] +theorem oracleReduction_completeness (hRel : ∀ stmtIn oStmtIn witIn, + ((stmtIn, oStmtIn), witIn) ∈ relIn ↔ + ((mapStmt stmtIn, fun i => (hEq i) ▸ oStmtIn (embedIdx i)), mapWit witIn) ∈ relOut) : + (oracleReduction oSpec mapStmt mapWit embedIdx hEq).perfectCompleteness + relIn relOut := by + sorry + +-- TODO: round-by-round knowledge soundness + +end OracleReduction + +end ReduceClaim diff --git a/ArkLib/ProofSystem/Component/SendClaim.lean b/ArkLib/ProofSystem/Component/SendClaim.lean index 91f435c03..5281ee00f 100644 --- a/ArkLib/ProofSystem/Component/SendClaim.lean +++ b/ArkLib/ProofSystem/Component/SendClaim.lean @@ -32,18 +32,16 @@ variable {ι : Type} (oSpec : OracleSpec ι) (Statement : Type) @[reducible] def pSpec : ProtocolSpec 1 := ![(.P_to_V, OStatement default)] -instance : ∀ i, OracleInterface ((pSpec OStatement).Message i) - | ⟨0, _⟩ => by dsimp; infer_instance -instance : ∀ i, VCVCompatible ((pSpec OStatement).Challenge i) | ⟨0, h⟩ => nomatch h - /-- The prover takes in the old oracle statement as input, and sends it as the protocol message. -/ -def prover : OracleProver (pSpec OStatement) oSpec Statement Unit Unit Unit - OStatement (OStatement ⊕ᵥ OStatement) where - PrvState | 0 => OStatement default | 1 => OStatement default +def prover : OracleProver oSpec + Statement OStatement Unit + Unit (OStatement ⊕ᵥ OStatement) Unit + (pSpec OStatement) where + PrvState := fun _ => OStatement default - input := fun ⟨_, oStmt⟩ () => oStmt default + input := fun ⟨⟨_, oStmt⟩, _⟩ => oStmt default sendMessage | ⟨0, _⟩ => fun st => pure (st, st) @@ -55,7 +53,7 @@ def prover : OracleProver (pSpec OStatement) oSpec Statement Unit Unit Unit | .inr default => by simpa [Unique.uniq] using st⟩, ()) -variable (rel : Statement × (∀ i, OStatement i) → Prop) +variable (rel : Set ((Statement × (∀ i, OStatement i)) × Unit)) (relComp : Statement → OracleComp [OStatement]ₒ Unit) -- (rel_eq : ∀ stmt oStmt, rel stmt oStmt ↔ -- (OracleInterface.simOracle []ₒ (OracleInterface.oracle oStmt)).run = oStmt) @@ -64,8 +62,8 @@ variable (rel : Statement × (∀ i, OStatement i) → Prop) The verifier checks that the relationship `rel oldStmt newStmt` holds. It has access to the original and new `OStatement` via their oracle indices. -/ -def verifier : OracleVerifier (pSpec OStatement) oSpec Statement Unit - OStatement (OStatement ⊕ᵥ OStatement) where +def verifier : OracleVerifier oSpec Statement OStatement Unit (OStatement ⊕ᵥ OStatement) + (pSpec OStatement) where verify := fun stmt _ => relComp stmt @@ -78,14 +76,14 @@ Combine the prover and verifier into an oracle reduction. The input has no statement or witness, but one `OStatement`. The output is also no statement or witness, but two `OStatement`s. -/ -def oracleReduction : - OracleReduction (pSpec OStatement) oSpec Statement Unit Unit Unit - OStatement (OStatement ⊕ᵥ OStatement) where +def oracleReduction : OracleReduction oSpec + Statement OStatement Unit + Unit (OStatement ⊕ᵥ OStatement) Unit (pSpec OStatement) where prover := prover oSpec Statement OStatement verifier := verifier oSpec Statement OStatement relComp -def relOut : Unit × (∀ i, (OStatement ⊕ᵥ OStatement) i) → Unit → Prop := - fun ⟨(), oracles⟩ () => oracles (.inl default) = oracles (.inr default) +def relOut : Set ((Unit × (∀ i, (Sum.elim OStatement OStatement) i)) × Unit) := + setOf (fun ⟨⟨(), oracles⟩, _⟩ => oracles (.inl default) = oracles (.inr default)) variable [oSpec.FiniteRange] @@ -94,7 +92,7 @@ Proof of perfect completeness: if `rel old new` holds in the real setting, it also holds in the ideal setting, etc. -/ theorem completeness : (oracleReduction oSpec Statement OStatement relComp).perfectCompleteness - (fun x _ => rel x) (relOut OStatement) := by + rel (relOut OStatement) := by sorry end SendClaim diff --git a/ArkLib/ProofSystem/Component/SendWitness.lean b/ArkLib/ProofSystem/Component/SendWitness.lean index a5a8b0404..6e4460509 100644 --- a/ArkLib/ProofSystem/Component/SendWitness.lean +++ b/ArkLib/ProofSystem/Component/SendWitness.lean @@ -4,71 +4,258 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ import ArkLib.OracleReduction.Security.Basic +import Mathlib.Data.FinEnum /-! # Simple Oracle Reduction - SendWitness -- The prover sends an oracle of type `WitEquiv` (supposed to be the witness up to equivalence) to - the verifier. - - The verifier does nothing. - - The output data has the same `Statement`, have output `OStatement` be the previous `OStatement` - plus an oracle for `EncWit`, and have no witness. - - The new relation is `rel` but replacing `Witness` with `EncWit`. +This file contains the (oracle) reduction for the trivial one-message protocol where the prover +sends the (entire) witness to the verifier. There are two variants: -This is usually used as the first step of an oracle reduction, to replace the witness with an -oracle statement. +1. For oracle reduction: the witness is an indexed family of types, and sent in a single oracle + message to the verifier (using the derived indexed product instance for oracle interface). + + We also define a simpler variant where one sends a single witness (converted to be indexed by + `Fin 1`). + +2. For reduction: the witness is a type, and sent as a statement to the verifier. -/ -open OracleSpec OracleComp OracleQuery +open OracleSpec OracleComp OracleQuery ProtocolSpec Function Equiv + +variable {ι : Type} (oSpec : OracleSpec ι) (Statement : Type) namespace SendWitness -variable {ι : Type} (oSpec : OracleSpec ι) (Statement Witness : Type) - {ιₛᵢ : Type} (OStatement : ιₛᵢ → Type) [∀ i, OracleInterface (OStatement i)] - (WitEquiv : Type) [OracleInterface WitEquiv] (equiv : Witness ≃ WitEquiv) +/-! + First, the reduction version (no oracle statements) +-/ + +section Reduction + +variable (Witness : Type) -@[reducible] -def pSpec : ProtocolSpec 1 := ![(.P_to_V, WitEquiv)] +@[reducible, simp] +def pSpec : ProtocolSpec 1 := ![(.P_to_V, Witness)] --- TODO: figure out why `OracleInterface` & `VCVCompatible` instances cannot be automatically --- synthesized -instance : ∀ i, OracleInterface ((pSpec WitEquiv).Message i) - | ⟨0, _⟩ => by dsimp; infer_instance -instance : ∀ i, VCVCompatible ((pSpec WitEquiv).Challenge i) | ⟨0, h⟩ => nomatch h +instance : ∀ i, VCVCompatible ((pSpec Witness).Challenge i) | ⟨0, h⟩ => nomatch h -def prover : OracleProver (pSpec WitEquiv) oSpec Statement Witness Statement Unit - OStatement (OStatement ⊕ᵥ (fun _ : Unit => WitEquiv)) where +@[inline, specialize] +def prover : Prover oSpec Statement Witness (Statement × Witness) Unit (pSpec Witness) where PrvState - | 0 => Statement × (∀ i, OStatement i) × Witness - | 1 => Statement × (∀ i, OStatement i) × WitEquiv - input := fun ⟨stmt, oStmt⟩ wit => ⟨stmt, oStmt, wit⟩ - sendMessage | ⟨0, _⟩ => fun ⟨stmt, oStmt, wit⟩ => pure (equiv wit, ⟨stmt, oStmt, equiv wit⟩) + | 0 => Statement × Witness + | 1 => Statement × Witness + input := id + sendMessage | ⟨0, _⟩ => fun ⟨stmt, wit⟩ => pure (wit, ⟨stmt, wit⟩) receiveChallenge | ⟨0, h⟩ => nomatch h - output := fun ⟨stmt, oStmt, encWit⟩ => (⟨stmt, (Sum.rec oStmt (fun _ => encWit))⟩, ()) + output := fun ⟨stmt, wit⟩ => (⟨stmt, wit⟩, ()) -def verifier : OracleVerifier (pSpec WitEquiv) oSpec Statement Statement - OStatement (OStatement ⊕ᵥ (fun _ : Unit => WitEquiv)) where - verify := fun stmt _ => pure stmt - embed := by simp [pSpec, ProtocolSpec.MessageIdx]; sorry - hEq := sorry +@[inline, specialize] +def verifier : Verifier oSpec Statement (Statement × Witness) (pSpec Witness) where + verify := fun stmt transcript => pure ⟨stmt, transcript 0⟩ + +@[inline, specialize] +def reduction : Reduction oSpec Statement Witness (Statement × Witness) Unit (pSpec Witness) where + prover := prover oSpec Statement Witness + verifier := verifier oSpec Statement Witness + +variable {Statement} {Witness} [oSpec.FiniteRange] (relIn : Set (Statement × Witness)) + +@[reducible, simp] +def toRelOut : Set ((Statement × Witness) × Unit) := + Prod.fst ⁻¹' relIn + +/-- The `SendWitness` reduction satisfies perfect completeness. -/ +@[simp] +theorem reduction_completeness : + (reduction oSpec Statement Witness).perfectCompleteness relIn (toRelOut relIn) := by + simp [Reduction.run, Prover.run, Prover.runToRound, Prover.processRound, Verifier.run, + reduction, prover, verifier] + aesop + +theorem reduction_rbr_knowledge_soundness : True := sorry + +end Reduction + +/-! + Now, the oracle reduction version +-/ + +section OracleReduction + +variable {ιₛ : Type} (OStatement : ιₛ → Type) [∀ i, OracleInterface (OStatement i)] + {ιw : Type} [FinEnum ιw] (Witness : ιw → Type) [∀ i, OracleInterface (Witness i)] + +@[reducible, simp] +def oraclePSpec : ProtocolSpec 1 := ![(.P_to_V, ∀ i, Witness i)] + +-- instance : IsEmpty (oraclePSpec Witness).ChallengeIdx where +-- false := by aesop +-- instance : ∀ i, OracleInterface ((oraclePSpec Witness).Message i) +-- | ⟨0, _⟩ => OracleInterface.instForall _ +-- instance : ∀ i, VCVCompatible ((oraclePSpec Witness).Challenge i) +-- | ⟨0, _⟩ => by aesop + +/-- The oracle prover for the `SendWitness` oracle reduction. + +For each round `i : Fin (FinEnum.card ιw)`, the prover sends the witness +`wit (FinEnum.equiv.symm i)` to the verifier. +-/ +@[inline, specialize] +def oracleProver : OracleProver oSpec + Statement OStatement (∀ i, Witness i) + Statement (OStatement ⊕ᵥ Witness) Unit + (oraclePSpec Witness) where + PrvState := fun _ => (Statement × (∀ i, OStatement i)) × (∀ i, Witness i) + input := id + sendMessage | ⟨0, _⟩ => fun ⟨stmt, wit⟩ => pure (wit, ⟨stmt, wit⟩) + -- No challenge is sent to the prover + receiveChallenge | ⟨0, h⟩ => nomatch h + output := fun ⟨⟨stmt, oStmt⟩, wit⟩ => (⟨stmt, Sum.rec oStmt wit⟩, ()) + +-- /-- The oracle verifier for the `SendWitness` oracle reduction. -def oracleReduction : OracleReduction (pSpec WitEquiv) oSpec Statement Witness Statement Unit - OStatement (OStatement ⊕ᵥ (fun _ : Unit => WitEquiv)) where - prover := prover oSpec Statement Witness OStatement WitEquiv equiv - verifier := verifier oSpec Statement OStatement WitEquiv +-- It receives the input statement `stmt` and returns it, and also specifying the combination of +-- `OStatement` and `Witness` as the output oracle statements. +-- -/ +-- @[inline, specialize] +-- def oracleVerifier : OracleVerifier (oraclePSpec Witness) oSpec +-- Statement Statement OStatement (OStatement ⊕ᵥ Witness) where +-- verify := fun stmt _ => pure stmt +-- -- ιₛ ⊕ ιw ↪ ιₛ ⊕ (oraclePSpec Witness).MessageIdx +-- embed := Embedding.sumMap (.refl _) +-- -- ιw ↪ (oraclePSpec Witness).MessageIdx +-- (Equiv.toEmbedding +-- -- ιw ≃ (oraclePSpec Witness).MessageIdx +-- -- after unfolding : ιw ≃ { i : Fin (FinEnum.card ιw) // True } +-- (.trans FinEnum.equiv -- ιw ≃ Fin (FinEnum.card ιw) +-- <| .symm -- { i : Fin (FinEnum.card ιw) // True } ≃ Fin (FinEnum.card ιw) +-- <| .subtypeUnivEquiv (by simp))) +-- hEq := by intro i; rcases i <;> simp -variable (relIn : Statement × (∀ i, OStatement i) → Witness → Prop) +-- @[inline, specialize] +-- def oracleReduction : OracleReduction (oraclePSpec Witness) oSpec +-- Statement (∀ i, Witness i) Statement Unit +-- OStatement (OStatement ⊕ᵥ Witness) where +-- prover := oracleProver oSpec Statement OStatement Witness +-- verifier := oracleVerifier oSpec Statement OStatement Witness -def toRelOut : Statement × (∀ i, (OStatement ⊕ᵥ (fun _ : Unit => WitEquiv)) i) → Unit → Prop := - fun ⟨stmt, oStmt⟩ _ => relIn ⟨stmt, fun i => oStmt (Sum.inl i)⟩ (equiv.invFun <| oStmt (.inr ())) +-- variable {Statement} {OStatement} {Witness} [oSpec.FiniteRange] +-- (oRelIn : Statement × (∀ i, OStatement i) → (∀ i, Witness i) → Prop) -variable [oSpec.FiniteRange] +-- @[reducible, simp] +-- def toORelOut : Statement × (∀ i, (OStatement ⊕ᵥ Witness) i) → Unit → Prop := +-- fun ⟨stmt, oStmtAndWit⟩ _ => +-- oRelIn ⟨stmt, fun i => oStmtAndWit (Sum.inl i)⟩ (fun i => oStmtAndWit (Sum.inr i)) -theorem completeness : OracleReduction.perfectCompleteness - relIn - (toRelOut Statement Witness OStatement WitEquiv equiv relIn) - (oracleReduction oSpec Statement Witness OStatement WitEquiv equiv) := by - sorry - -- simp [OracleReduction.perfectCompleteness] +-- /-- Running the oracle prover returns the expected result: `(stmt, Sum.rec oStmt wit)`. -/ +-- theorem oracleProver_run {stmt : Statement} {oStmt : ∀ i, OStatement i} {wit : ∀ i, Witness i} : +-- (oracleProver oSpec Statement OStatement Witness).run ⟨stmt, oStmt⟩ wit = +-- pure ((stmt, Sum.rec oStmt wit), (), fun i => wit (FinEnum.equiv.symm i)) := by +-- simp [Prover.run, Prover.runToRound, Prover.processRound, oracleProver] +-- sorry + +-- /-- The `SendWitness` oracle reduction satisfies perfect completeness. -/ +-- @[simp] +-- theorem oracleReduction_completeness : +-- (oracleReduction oSpec Statement OStatement Witness).perfectCompleteness oRelIn +-- (toORelOut oRelIn) := by +-- simp [OracleReduction.perfectCompleteness, OracleReduction.toReduction, +-- OracleVerifier.toVerifier] +-- intro stmt oStmt wit hRelIn +-- unfold Reduction.run +-- sorry + +-- theorem oracleReduction_rbr_knowledge_soundness : True := sorry + +end OracleReduction end SendWitness + +namespace SendSingleWitness + +/-! + A special case of `SendWitness` oracle reduction where there is only one witness. We implicitly + convert to `fun _ : Fin 1 => Witness`. +-/ + +variable {ιₛ : Type} (OStatement : ιₛ → Type) [∀ i, OracleInterface (OStatement i)] + (Witness : Type) [OracleInterface Witness] + +@[reducible, simp] +def oraclePSpec : ProtocolSpec 1 := ![(.P_to_V, Witness)] + +/-- The oracle prover for the `SendSingleWitness` oracle reduction. + +The prover sends the witness `wit` to the verifier as the only oracle message. +-/ +@[inline, specialize] +def oracleProver : OracleProver oSpec + Statement OStatement Witness + Statement (OStatement ⊕ᵥ (fun _ : Fin 1 => Witness)) Unit + (oraclePSpec Witness) where + PrvState := fun _ => (Statement × (∀ i, OStatement i)) × Witness + input := id + sendMessage | ⟨0, _⟩ => fun ⟨stmt, wit⟩ => pure (wit, ⟨stmt, wit⟩) + receiveChallenge | ⟨0, h⟩ => nomatch h + output := fun ⟨⟨stmt, oStmt⟩, wit⟩ => (⟨stmt, Sum.rec oStmt (fun _ => wit)⟩, ()) + +/-- The oracle verifier for the `SendSingleWitness` oracle reduction. + +The verifier receives the input statement `stmt` and returns it, and also specifying the oracle +message as the output oracle statement. +-/ +@[inline, specialize] +def oracleVerifier : OracleVerifier oSpec + Statement OStatement Statement (OStatement ⊕ᵥ (fun _ : Fin 1 => Witness)) + (oraclePSpec Witness) where + verify := fun stmt _ => pure stmt + embed := .sumMap (.refl _) + <| Equiv.toEmbedding + <|.symm (subtypeUnivEquiv (by simp)) + hEq := by intro i; rcases i <;> simp + +@[inline, specialize] +def oracleReduction : OracleReduction oSpec + Statement OStatement Witness + Statement (OStatement ⊕ᵥ (fun _ : Fin 1 => Witness)) Unit + (oraclePSpec Witness) where + prover := oracleProver oSpec Statement OStatement Witness + verifier := oracleVerifier oSpec Statement OStatement Witness + +variable {Statement} {OStatement} {Witness} + +omit [(i : ιₛ) → OracleInterface (OStatement i)] [OracleInterface Witness] in +theorem oracleProver_run {stmt : Statement} {oStmt : ∀ i, OStatement i} {wit : Witness}: + (oracleProver oSpec Statement OStatement Witness).run ⟨stmt, oStmt⟩ wit = + pure (⟨⟨stmt, Sum.rec oStmt (fun _ => wit)⟩, ()⟩, fun i => by simpa using wit) := by + simp [Prover.run, Prover.runToRound, Prover.processRound, oracleProver, Transcript.concat] + ext i; fin_cases i; simp [Fin.snoc] + +theorem oracleVerifier_toVerifier_run {stmt : Statement} {oStmt : ∀ i, OStatement i} + {tr : (oraclePSpec Witness).FullTranscript}: + (oracleVerifier oSpec Statement OStatement Witness).toVerifier.run ⟨stmt, oStmt⟩ tr = + pure ⟨stmt, Sum.rec oStmt (fun i => by simpa using tr i)⟩ := by + simp [Verifier.run, OracleVerifier.toVerifier, oracleVerifier] + ext i; rcases i <;> simp + +variable [oSpec.FiniteRange] (oRelIn : Set ((Statement × (∀ i, OStatement i)) × Witness)) + +@[reducible, simp] +def toORelOut : + Set ((Statement × (∀ i, (Sum.elim OStatement fun _ : Fin 1 => Witness) i)) × Unit) := + setOf (fun ⟨⟨stmt, oStmtAndWit⟩, _⟩ => + oRelIn ⟨⟨stmt, fun i => oStmtAndWit (Sum.inl i)⟩, (oStmtAndWit (Sum.inr 0))⟩) + +/-- The `SendSingleWitness` oracle reduction satisfies perfect completeness. -/ +@[simp] +theorem oracleReduction_completeness : + (oracleReduction oSpec Statement OStatement Witness).perfectCompleteness oRelIn + (toORelOut oRelIn) := by + simp [OracleReduction.perfectCompleteness, OracleReduction.toReduction] + simp_rw [Reduction.run, oracleReduction, oracleVerifier_toVerifier_run, oracleProver_run] + aesop + +theorem oracleReduction_rbr_knowledge_soundness : True := sorry + +end SendSingleWitness diff --git a/ArkLib/ProofSystem/ConstraintSystem/Plonk.lean b/ArkLib/ProofSystem/ConstraintSystem/Plonk.lean index 535156bd2..dc3c99c45 100644 --- a/ArkLib/ProofSystem/ConstraintSystem/Plonk.lean +++ b/ArkLib/ProofSystem/ConstraintSystem/Plonk.lean @@ -26,7 +26,7 @@ structure WireIndices (numWires : ℕ) where deriving DecidableEq /-- A selector for a Plonk constraint system is a set of coefficients that determine the gate type - -/ +-/ structure Selector (𝓡 : Type*) where /-- left input -/ qL : 𝓡 diff --git a/ArkLib/ProofSystem/ConstraintSystem/R1CS.lean b/ArkLib/ProofSystem/ConstraintSystem/R1CS.lean index b9524764b..6f3c77362 100644 --- a/ArkLib/ProofSystem/ConstraintSystem/R1CS.lean +++ b/ArkLib/ProofSystem/ConstraintSystem/R1CS.lean @@ -1,17 +1,19 @@ /- -Copyright (c) 2024 ArkLib Contributors. All rights reserved. +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import Mathlib.Data.Matrix.Hadamard +import ArkLib.Data.Matrix.Basic /-! -# R1CS - -This file defines the R1CS (Rank-1 Constraint System) relation. It also defines the sparse -representation of a matrix. +# Rank-1 Constraint System (R1CS) +This file defines the R1CS (Rank-1 Constraint System) relation +- The definition is in terms of `Fin` vectors and matrices. In the future, we may consider more + efficient representations such as `Vector` and `Vector m (Vector n α)`. +- We define padding (on the right) for R1CS instances, and show that padding preserves the R1CS + relation. -/ namespace R1CS @@ -24,48 +26,67 @@ inductive MatrixIdx where | A | B | C deriving Inhabited, DecidableEq structure Size where m : ℕ -- number of columns - n_x : ℕ -- number of public variables + n : ℕ -- number of rows n_w : ℕ -- number of witness variables + n_w_le_n : n_w ≤ n := by omega -- Number of witness variables must be at most the number of rows + +attribute [simp] Size.n_w_le_n + +variable (sz : Size) + +/-- Number of public `𝕩` variables -/ +abbrev Size.n_x : ℕ := sz.n - sz.n_w + +lemma Size.n_eq_n_x_add_n_w : sz.n = sz.n_x + sz.n_w := by + simp [Size.n_x] -abbrev Size.n (sz : Size) : ℕ := sz.n_x + sz.n_w +@[reducible] +def Statement := Fin sz.n_x → R -def Statement (sz : Size) := Fin sz.n_x → R +@[reducible] +def OracleStatement := fun _ : MatrixIdx => Matrix (Fin sz.m) (Fin sz.n) R -def OracleStatement (sz : Size) := fun _ : MatrixIdx => Matrix (Fin sz.m) (Fin sz.n) R +@[reducible] +def Witness := Fin sz.n_w → R -def Witness (sz : Size) := Fin sz.n_w → R +/-- The vector `𝕫` is the concatenation of the public input and witness variables -/ +@[reducible, inline] +def 𝕫 {R} {sz} (stmt : Statement R sz) (wit : Witness R sz) : Fin sz.n → R := + Fin.append stmt wit ∘ Fin.cast (by simp) --- The R1CS relation -def relation (sz : Size) : +/-- The R1CS relation: `(A *ᵥ 𝕫) * (B *ᵥ 𝕫) = (C *ᵥ 𝕫)`, where `*` is understood to mean + component-wise (Hadamard) vector multiplication. -/ +@[reducible] +def relation : (Fin sz.n_x → R) → -- public input `x` (MatrixIdx → Matrix (Fin sz.m) (Fin sz.n) R) → -- matrices `A`, `B`, `C` as oracle inputs (Fin sz.n_w → R) → -- witness input `w` Prop := - fun stmt matrices wit => - let z : Fin (sz.n_x + sz.n_w) → R := Fin.append stmt wit - (matrices .A *ᵥ z) * (matrices .B *ᵥ z) = (matrices .C *ᵥ z) + fun stmt matrix wit => + letI 𝕫 := 𝕫 stmt wit + (matrix .A *ᵥ 𝕫) * (matrix .B *ᵥ 𝕫) = (matrix .C *ᵥ 𝕫) + +/-- Pad an R1CS instance (on the right) from `sz₁` to `sz₂` with zeros. + +Note that this results in truncation if the second size is smaller than the first one. -/ +def pad (sz₁ sz₂ : Size) + (stmt : Statement R sz₁) + (matrices : MatrixIdx → Matrix (Fin sz₁.m) (Fin sz₁.n) R) + (wit : Witness R sz₁) : + Statement R sz₂ × (MatrixIdx → Matrix (Fin sz₂.m) (Fin sz₂.n) R) × Witness R sz₂ := + (Fin.rightpad sz₂.n_x 0 stmt, + fun idx => Matrix.rightpad sz₂.m sz₂.n 0 (matrices idx), + Fin.rightpad sz₂.n_w 0 wit) + +-- padding preserves the R1CS relation +theorem pad_preserves_relation (sz₁ sz₂ : Size) + (stmt : Statement R sz₁) + (matrices : MatrixIdx → Matrix (Fin sz₁.m) (Fin sz₁.n) R) + (wit : Witness R sz₁) : + relation R sz₁ stmt matrices wit = + let (stmt', matrices', wit') := pad R sz₁ sz₂ stmt matrices wit + relation R sz₂ stmt' matrices' wit' := by + simp [pad, relation, rightpad] + sorry end R1CS - -/-- The sparse representation of a matrix `m → n → α` consists of: -- The number of non-zero entries `k : ℕ` -- The row indices `row : Fin k → m` -- The column indices `col : Fin k → n` -- The values `val : Fin k → α` - -This representation is **not** unique. In particular, we may have duplicate `(row, col)` pairs, and -some `val` may be zero. --/ -structure SparseMatrix (m n α : Type*) where - numEntries : ℕ - row : Fin numEntries → m - col : Fin numEntries → n - val : Fin numEntries → α -deriving Inhabited, DecidableEq - -/-- Convert a sparse matrix to a regular (dense) matrix. For each entry `(i, j)` of the matrix, we - simply sum over all `k` such that `(row k, col k) = (i, j)`. --/ -def SparseMatrix.toMatrix {m n α : Type*} [DecidableEq m] [DecidableEq n] [AddCommMonoid α] - (A : SparseMatrix m n α) : Matrix m n α := - fun i j => ∑ k : Fin A.numEntries, if A.row k = i ∧ A.col k = j then A.val k else 0 diff --git a/ArkLib/ProofSystem/Spartan/Basic.lean b/ArkLib/ProofSystem/Spartan/Basic.lean index 5ef633bbe..6b8f7de44 100644 --- a/ArkLib/ProofSystem/Spartan/Basic.lean +++ b/ArkLib/ProofSystem/Spartan/Basic.lean @@ -4,11 +4,13 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import ArkLib.OracleReduction.Security.Basic import ArkLib.ProofSystem.ConstraintSystem.R1CS import ArkLib.Data.MvPolynomial.Multilinear -import ArkLib.ProofSystem.Sumcheck.Basic --- import ArkLib.ProofSystem.Components +import ArkLib.ProofSystem.Sumcheck.Spec.General +import ArkLib.ProofSystem.Component.SendWitness +import ArkLib.ProofSystem.Component.RandomQuery +import ArkLib.ProofSystem.Component.SendClaim +import ArkLib.ProofSystem.Component.CheckClaim /-! # The Spartan PIOP (Polynomial Interactive Oracle Proof) @@ -18,14 +20,17 @@ import ArkLib.ProofSystem.Sumcheck.Basic - `R` is the underlying ring, required to be a finite integral domain. - `n := 2 ^ ℓ_n` is the number of variables in the R1CS relation. - `m := 2 ^ ℓ_m` is the number of constraints in the R1CS relation. - - `k := 2 ^ ℓ_k` is the number of witness variables, where `ℓ_k < ℓ_n`. + - `n_w := 2 ^ ℓ_w` is the number of witness variables, where `ℓ_w < ℓ_n`. Note that all dimensions are required to be powers of two. + (Maybe we shouldn't do this? And do the padding explicitly, so we can handle arbitrary + dimensions?) + It is used to prove the correctness of R1CS relations: `(A *ᵥ 𝕫) * (B *ᵥ 𝕫) = (C *ᵥ 𝕫)`, where: - `A, B, C : Matrix (Fin m) (Fin n) R` are the R1CS constraint matrices. - - `𝕩 : Fin (n - k) → R` is the public input. - - `𝕨 : Fin k → R` is the private witness. + - `𝕩 : Fin (n - n_w) → R` is the public input. + - `𝕨 : Fin n_w → R` is the private witness. - `𝕫 = 𝕩 ‖ 𝕨` is the concatenation of the public input `𝕩` and the private witness `𝕨`. - `*ᵥ` denotes the standard matrix-vector product, and `*` denotes the component-wise product. @@ -37,50 +42,53 @@ import ArkLib.ProofSystem.Sumcheck.Basic **I. Interaction Phase:** - **Stage 0:** The oracle verifier may optionally receive oracle access to the multilinear - extensions `MLE A, MLE B, MLE C : R[X Fin ℓ_n][X Fin ℓ_m]` of the R1CS matrices `A`, `B`, - and `C`. Otherwise, the oracle verifier may see the matrices `A`, `B`, and `C` directly - (as part of the input statement). + extensions `MLE A, MLE B, MLE C : R[X Fin ℓ_n][X Fin ℓ_m]` of the R1CS matrices `A`, `B`, and + `C`. Otherwise, the oracle verifier may see the matrices `A`, `B`, and `C` directly (as part of + the input statement). - - **Stage 1:** The prover sends the multilinear extension `MLE 𝕨 : R[X Fin ℓ_k]` of the witness - `w` to the verifier. The verifier sends back a challenge `τ : Fin ℓ_m → R`. + - **Stage 1:** The prover sends the multilinear extension `MLE 𝕨 : R[X Fin n_w]` of the witness + `w` to the verifier. The verifier sends back a challenge `τ : Fin ℓ_m → R`. - **Stage 2:** The prover and verifier engage in a sum-check protocol to verify the computation: - `∑ x ∈ {0, 1}^ℓ_m, eqPoly ⸨τ, x⸩ * (A_x ⸨x⸩ * B_x ⸨x⸩ - C_x ⸨x⸩) = 0`, + `∑ x ∈ {0, 1}^ℓ_m, eqPoly ⸨τ, x⸩ * (A𝕫 ⸨x⸩ * B𝕫 ⸨x⸩ - C𝕫 ⸨x⸩) = 0`, - where `A_x ⸨X⸩ = ∑ y ∈ {0, 1}^ℓ_m, (MLE A) ⸨X, y⸩ * (MLE 𝕫) ⸨y⸩`, and similarly for - `B_x` and `C_x`. + where `A𝕫 ⸨X⸩ = ∑ y ∈ {0, 1}^ℓ_m, (MLE A) ⸨X⸩ ⸨y⸩ * (MLE 𝕫) ⸨y⸩`, and similarly for `B𝕫` and + `C𝕫`. The sum-check protocol terminates with random challenges `r_x : Fin ℓ_m → R`, and the purported - evaluation `e_x` of `eqPoly ⸨τ, r_x⸩ * (A_x ⸨r_x⸩ * B_x ⸨r_x⸩ - C_x ⸨r_x⸩)`. + evaluation `e_x` of `eqPoly ⸨τ, r_x⸩ * (A𝕫 ⸨r_x⸩ * B𝕫 ⸨r_x⸩ - C𝕫 ⸨r_x⸩)`. - **Stage 3:** The prover sends further evaluation claims to the verifier: - `v_A = A_x ⸨r_x⸩`, `v_B = B_x ⸨r_x⸩`, `v_C = C_x ⸨r_x⸩` + + `v_A = A𝕫 ⸨r_x⸩`, `v_B = B𝕫 ⸨r_x⸩`, `v_C = C𝕫 ⸨r_x⸩` The verifier sends back challenges `r_A, r_B, r_C : R`. - **Stage 4:** The prover and verifier engage in another sum-check protocol to verify the - computation: - `∑ y ∈ {0, 1}^ℓ_n, r_A * (MLE A) ⸨r_x, y⸩ * (MLE 𝕫) ⸨y⸩ + r_B * (MLE B) ⸨r_x, y⸩ * (MLE 𝕫) ⸨y⸩ ` - `+ r_C * (MLE C) ⸨r_x, y⸩ * (MLE 𝕫) ⸨y⸩ = r_A * v_A + r_B * v_B + r_C * v_C` + computation: + + `∑ y ∈ {0, 1}^ℓ_n, r_A * (MLE A) ⸨r_x, y⸩ * (MLE 𝕫) ⸨y⸩ + r_B * (MLE B) ⸨r_x, y⸩ *` + `(MLE 𝕫) ⸨y⸩ + r_C * (MLE C) ⸨r_x, y⸩ * (MLE 𝕫) ⸨y⸩ = r_A * v_A + r_B * v_B + r_C * v_C` The sum-check protocol terminates with random challenges `r_y : Fin ℓ_n → R`, and the purported evaluation `e_y` of - `(r_A * (MLE A) ⸨r_x, r_y⸩ + r_B * (MLE B) ⸨r_x, r_y⸩ + r_C * (MLE C) ⸨r_x, r_y⸩) ` - `* (MLE 𝕫) ⸨r_y⸩`. + + `(r_A * (MLE A) ⸨r_x, r_y⸩ + r_B * (MLE B) ⸨r_x, r_y⸩ + r_C * (MLE C) ⸨r_x, r_y⸩) *` + `(MLE 𝕫) ⸨r_y⸩`. **II. Verification Phase:** - 1. The verifier makes a query to the polynomial oracle `MLE 𝕨` at - `r_y [ℓ_n - ℓ_k :] : Fin ℓ_k → R`, and obtain an evaluation value `v_𝕨 : R`. + 1. The verifier makes a query to the polynomial oracle `MLE 𝕨` at `r_y [ℓ_n - ℓ_k :] : Fin ℓ_k → + R`, and obtain an evaluation value `v_𝕨 : R`. - 2. The verifier makes three queries to the polynomial oracles `MLE A, MLE B, MLE C` at - `r_y ‖ r_x : Fin (ℓ_n + ℓ_m) → R`, and obtain evaluation values `v_1, v_2, v_3 : R`. + 2. The verifier makes three queries to the polynomial oracles `MLE A, MLE B, MLE C` at `r_y ‖ r_x + : Fin (ℓ_n + ℓ_m) → R`, and obtain evaluation values `v_1, v_2, v_3 : R`. - Alternatively, if the verifier does not receive oracle access, then it computes the - evaluation values directly. + Alternatively, if the verifier does not receive oracle access, then it computes the evaluation + values directly. 3. The verifier computes `v_𝕫 := 𝕩 *ᵢₚ (⊗ i, (1, r_y i))[: n - k] + (∏ i < ℓ_k, r_y i) * v_𝕨`, - where `*ᵢₚ` denotes the inner product, and `⊗` denotes the tensor product. + where `*ᵢₚ` denotes the inner product, and `⊗` denotes the tensor product. 4. The verifier accepts if and only if both of the following holds: - `e_x = eqPoly ⸨τ, r_x⸩ * (v_A * v_B - v_C)` @@ -88,84 +96,320 @@ import ArkLib.ProofSystem.Sumcheck.Basic -/ -open MvPolynomial +open MvPolynomial Matrix namespace Spartan +-- Note: this is the _padded_ Spartan protocol. The non-padded version will be defined via padding +-- to the nearest power of two + noncomputable section +/-- The public parameters of the (padded) Spartan protocol. Consists of the number of bits of the + R1CS dimensions, and the number of bits of the witness variables. -/ +structure PublicParams where + ℓ_m : ℕ + ℓ_n : ℕ + ℓ_w : ℕ + ℓ_w_le_ℓ_n : ℓ_w ≤ ℓ_n := by omega + +namespace PublicParams + +/-- The R1CS dimensions / sizes are the powers of two of the public parameters. -/ +def toSizeR1CS (pp : PublicParams) : R1CS.Size := { + m := 2 ^ pp.ℓ_m + n := 2 ^ pp.ℓ_n + n_w := 2 ^ pp.ℓ_w + n_w_le_n := Nat.pow_le_pow_of_le (by decide) pp.ℓ_w_le_ℓ_n +} + +end PublicParams + namespace Spec +variable (R : Type) [CommRing R] [IsDomain R] [Fintype R] (pp : PublicParams) + +variable {ι : Type} (oSpec : OracleSpec ι) + +section Construction + +/- The input types and relation is just the R1CS relation for the given size -/ + +/-- This unfolds to `𝕩 : Fin (2 ^ ℓ_n - 2 ^ ℓ_w) → R` -/ +@[simp] +abbrev Statement := R1CS.Statement R pp.toSizeR1CS + +/-- This unfolds to `A, B, C : Matrix (Fin 2 ^ ℓ_m) (Fin 2 ^ ℓ_n) R` -/ +@[simp] +abbrev OracleStatement := R1CS.OracleStatement R pp.toSizeR1CS + +/-- This unfolds to `𝕨 : Fin 2 ^ ℓ_w → R` -/ +@[simp] +abbrev Witness := R1CS.Witness R pp.toSizeR1CS + +/-- This unfolds to `(A *ᵥ 𝕫) * (B *ᵥ 𝕫) = (C *ᵥ 𝕫)`, where `𝕫 = 𝕩 ‖ 𝕨` -/ +@[simp] +abbrev relation := R1CS.relation R pp.toSizeR1CS + +/-- The oracle interface for the input statement is the polynomial evaluation oracle of its + multilinear extension. -/ +-- For the input oracle statement, we define its oracle interface to be the polynomial evaluation +-- oracle of its multilinear extension. + +instance : ∀ i, OracleInterface (OracleStatement R pp i) := + fun i => { + Query := (Fin pp.ℓ_m → R) × (Fin pp.ℓ_n → R) + Response := R + oracle := fun matrix ⟨x, y⟩ => matrix.toMLE ⸨C ∘ x⸩ ⸨y⸩ + } + +-- For the input witness, we define its oracle interface to be the polynomial evaluation oracle of +-- its multilinear extension. + +-- TODO: define an `OracleInterface.ofEquiv` definition that transfers the oracle interface across +-- an equivalence of types. +instance : OracleInterface (Witness R pp) where + Query := Fin pp.ℓ_w → R + Response := R + oracle := fun 𝕨 evalPoint => (MLE (𝕨 ∘ finFunctionFinEquiv)) ⸨evalPoint⸩ + /-! ## First message - Prover sends `MLE 𝕨 : R⦃≤ 1⦄[X Fin ℓ_k]`. + We invoke the protocol `SendSingleWitness` to send the witness `𝕨` to the verifier. -/ -structure PublicParams where - ℓ_n : ℕ - ℓ_m : ℕ - ℓ_k : ℕ +/-- Unfolds to `𝕩 : Fin (2 ^ ℓ_n - 2 ^ ℓ_w) → R` -/ +@[simp] +abbrev Statement.AfterFirstMessage : Type := Statement R pp -def sizeR1CS (pp : PublicParams) : R1CS.Size := { - m := 2 ^ pp.ℓ_m - n_x := 2 ^ pp.ℓ_n - 2 ^ pp.ℓ_k - n_w := 2 ^ pp.ℓ_k -} +/-- Unfolds to `A, B, C : Matrix (Fin 2 ^ ℓ_m) (Fin 2 ^ ℓ_n) R` and `𝕨 : Fin 2 ^ ℓ_w → R` -/ +@[simp] +abbrev OracleStatement.AfterFirstMessage : R1CS.MatrixIdx ⊕ Fin 1 → Type := + (OracleStatement R pp) ⊕ᵥ (fun _ => Witness R pp) -variable (R : Type) [CommSemiring R] [IsDomain R] [Fintype R] (pp : PublicParams) +/-- Unfolds to `() : Unit` -/ +@[simp] +abbrev Witness.AfterFirstMessage : Type := Unit -@[reducible] -def WitnessMLE (pp : PublicParams) : Type := R⦃≤ 1⦄[X Fin pp.ℓ_k] +def oracleReduction.firstMessage : + OracleReduction oSpec + (Statement R pp) (OracleStatement R pp) (Witness R pp) + (Statement.AfterFirstMessage R pp) (OracleStatement.AfterFirstMessage R pp) Unit + ![(.P_to_V, Witness R pp)] := + SendSingleWitness.oracleReduction oSpec + (Statement R pp) (OracleStatement R pp) (Witness R pp) -@[reducible] -def pSpecFirstMessage : ProtocolSpec 1 := ![(.P_to_V, WitnessMLE R pp)] +/-! + ## First challenge + We invoke the protocol `RandomQuery` on the "virtual" polynomial: + `𝒢(Z) = ∑_{x} eq ⸨Z, x⸩ * (A𝕫 ⸨x⸩ * B𝕫 ⸨x⸩ - C𝕫 ⸨x⸩)`, which is supposed to be `0`. +-/ + +def zeroCheckVirtualPolynomial (𝕩 : Statement.AfterFirstMessage R pp) + -- Recall: `oStmt = (A, B, C, 𝕨)` + (oStmt : ∀ i, OracleStatement.AfterFirstMessage R pp i) : + MvPolynomial (Fin pp.ℓ_m) R := + letI 𝕫 := R1CS.𝕫 𝕩 (oStmt (.inr 0)) + ∑ x : Fin (2 ^ pp.ℓ_m), + (eqPolynomial (finFunctionFinEquiv.symm x : Fin pp.ℓ_m → R)) * + C ((oStmt (.inl .A) *ᵥ 𝕫) x * (oStmt (.inl .B) *ᵥ 𝕫) x - (oStmt (.inl .C) *ᵥ 𝕫) x) + +/-- Unfolds to `τ : Fin ℓ_m → R` -/ +@[simp] +abbrev FirstChallenge : Type := Fin pp.ℓ_m → R + +/-- Unfolds to `(τ, x) : (Fin (2 ^ ℓ_n - 2 ^ ℓ_w) → R) × (Fin (2 ^ ℓ_m) → R)` -/ +@[simp] +abbrev Statement.AfterFirstChallenge : Type := + FirstChallenge R pp × Statement.AfterFirstMessage R pp + +/-- Is equivalent to `((A, B, C), 𝕨) :` + `(fun _ => (Matrix (Fin 2 ^ ℓ_m) (Fin 2 ^ ℓ_n) R)) × (Fin 2 ^ ℓ_w → R)` -/ +@[simp] +abbrev OracleStatement.AfterFirstChallenge : R1CS.MatrixIdx ⊕ Fin 1 → Type := + OracleStatement.AfterFirstMessage R pp + +@[simp] +abbrev Witness.AfterFirstChallenge : Type := Unit + +def oracleReduction.firstChallenge : + OracleReduction oSpec + (Statement.AfterFirstMessage R pp) (OracleStatement.AfterFirstMessage R pp) (Witness R pp) + (Statement.AfterFirstChallenge R pp) (OracleStatement.AfterFirstChallenge R pp) Unit + ![(.V_to_P, FirstChallenge R pp)] := + sorry + -- (RandomQuery.oracleReduction oSpec (Statement.AfterFirstMessage R pp)).liftContext sorry + +/-! + ## First sum-check + We invoke the sum-check protocol the "virtual" polynomial: + `ℱ(X) = eq ⸨τ, X⸩ * (A ⸨X⸩ * B ⸨X⸩ - C ⸨X⸩)` +-/ + +-- def firstSumCheckVirtualPolynomial (𝕩 : FirstMessageStatement R pp) +-- (oStmt : ∀ i, FirstMessageOracleStatement R pp i) : MvPolynomial (Fin pp.ℓ_n) R := +-- letI 𝕫 := R1CS.𝕫 𝕩 (oStmt (.inr 0)) +-- ∑ x : Fin (2 ^ pp.ℓ_n), +-- (eqPolynomial (finFunctionFinEquiv.symm x : Fin pp.ℓ_n → R)) * +-- C ((oStmt (.inl .A) *ᵥ 𝕫) x * (oStmt (.inl .B) *ᵥ 𝕫) x - (oStmt (.inl .C) *ᵥ 𝕫) x) + +/-- Unfolds to `r_x : Fin ℓ_m → R` -/ +@[simp] +abbrev FirstSumcheckChallenge : Type := Fin pp.ℓ_m → R + +/-- Unfolds to `(r_x, τ, 𝕩) : (Fin ℓ_m → R) × (Fin (2 ^ ℓ_n - 2 ^ ℓ_w) → R) × (Fin ℓ_m → R)` -/ +@[simp] +abbrev Statement.AfterFirstSumcheck : Type := + FirstSumcheckChallenge R pp × Statement.AfterFirstChallenge R pp + +/-- Is equivalent to `((A, B, C), 𝕨) :` + `(fun _ => (Matrix (Fin 2 ^ ℓ_m) (Fin 2 ^ ℓ_n) R)) × (Fin 2 ^ ℓ_w → R)` -/ +@[simp] +abbrev OracleStatement.AfterFirstSumcheck : R1CS.MatrixIdx ⊕ Fin 1 → Type := + OracleStatement.AfterFirstChallenge R pp + +@[simp] +abbrev Witness.AfterFirstSumcheck : Type := Unit + +-- def oracleReduction.firstSumcheck : +-- OracleReduction (Sumcheck.Spec.pSpec R pp.ℓ_m) oSpec +-- (Statement.AfterFirstChallenge R pp) Witness.AfterFirstChallenge +-- (Statement.AfterFirstSumcheck R pp) Witness.AfterFirstSumcheck +-- (OracleStatement.AfterFirstChallenge R pp) (OracleStatement.AfterFirstSumcheck R pp) := + -- Sumcheck.Spec.oracleReduction oSpec + -- (Statement.AfterFirstChallenge R pp) (Witness.AfterFirstChallenge R pp) + -- (Statement.AfterFirstSumcheck R pp) (Witness.AfterFirstSumcheck R pp) + -- (OracleStatement.AfterFirstChallenge R pp) (OracleStatement.AfterFirstSumcheck R pp) + +/-! + ## Send evaluation claims -open ProtocolSpec in -instance : ProverOnly (pSpecFirstMessage R pp) where - prover_first' := by simp [pSpecFirstMessage] + We send the evaluation claims `v_A, v_B, v_C` to the verifier. -def relationR1CS := R1CS.relation R (sizeR1CS pp) + (i.e. invoking `SendClaim` on these "virtual" values) +-/ + +@[simp] +abbrev EvalClaim : R1CS.MatrixIdx → Type := fun _ => R + +/-- We equip each evaluation claim with the default oracle interface, which returns the claim upon a + trivial query `() : Unit`. -/ +instance : ∀ i, OracleInterface (EvalClaim R i) := + fun _ => default + +@[simp] +abbrev Statement.AfterSendEvalClaim : Type := Statement.AfterFirstSumcheck R pp -def proverFirstMessage : OracleProver (pSpecFirstMessage R pp) []ₒ - (R1CS.Statement R (sizeR1CS pp)) (R1CS.Witness R (sizeR1CS pp)) - (R1CS.Statement R (sizeR1CS pp)) Unit - (R1CS.OracleStatement R (sizeR1CS pp)) - (Sum.elim (R1CS.OracleStatement R (sizeR1CS pp)) (fun _ : Unit => WitnessMLE R pp)) where +@[simp] +abbrev OracleStatement.AfterSendEvalClaim : R1CS.MatrixIdx ⊕ R1CS.MatrixIdx ⊕ Fin 1 → Type := + EvalClaim R ⊕ᵥ OracleStatement.AfterFirstSumcheck R pp - PrvState - | ⟨0, _⟩ => R1CS.Statement R (sizeR1CS pp) × R1CS.Witness R (sizeR1CS pp) - | ⟨1, _⟩ => Fin pp.ℓ_m → R +@[simp] +abbrev Witness.AfterSendEvalClaim : Type := Unit - input := fun _ => sorry +def oracleReduction.sendEvalClaim : + OracleReduction oSpec + (Statement.AfterFirstSumcheck R pp) (OracleStatement.AfterFirstSumcheck R pp) (Witness R pp) + (Statement.AfterSendEvalClaim R pp) (OracleStatement.AfterSendEvalClaim R pp) Unit + ![(.P_to_V, ∀ i, EvalClaim R i)] := + sorry + -- SendClaim.oracleReduction oSpec + -- (Statement.AfterFirstSumcheck R pp) - sendMessage := fun _ => sorry +/-! + ## Random linear combination challenges - receiveChallenge := fun _ => sorry + The verifier sends back random linear combination challenges `r_A, r_B, r_C : R`. +-/ - output := fun _ => sorry +@[simp] +abbrev LinearCombinationChallenge : Type := R1CS.MatrixIdx → R --- def verifierFirstMessage : OracleVerifier (pSpecFirstMessage R pp) []ₒ --- (R1CS.Statement R (sizeR1CS pp)) (R1CS.Statement R (sizeR1CS pp)) --- (R1CS.OracleStatement R (sizeR1CS pp)) --- (Sum.elim (R1CS.OracleStatement R (sizeR1CS pp)) (fun _ : Unit => WitnessMLE R pp)) where +/-- Unfolds to `((r_A, r_B, r_C), r_x, τ, 𝕩) :` + `(R1CS.MatrixIdx → R) × (Fin (2 ^ ℓ_m) → R) × (Fin ℓ_m → R) × (Fin (2 ^ ℓ_n - 2 ^ ℓ_w) → R)` -/ +@[simp] +abbrev Statement.AfterLinearCombination : Type := + LinearCombinationChallenge R × Statement.AfterSendEvalClaim R pp --- verify := sorry +@[simp] +abbrev OracleStatement.AfterLinearCombination : R1CS.MatrixIdx ⊕ R1CS.MatrixIdx ⊕ Fin 1 → Type := + EvalClaim R ⊕ᵥ OracleStatement.AfterFirstSumcheck R pp -def pSpecFirstChallenge : ProtocolSpec 1 := ![(.V_to_P, Fin pp.ℓ_m → R)] +@[simp] +abbrev Witness.AfterLinearCombination : Type := Unit --- First sumcheck +instance : ∀ i, OracleInterface (ProtocolSpec.Message ![(.V_to_P, LinearCombinationChallenge R)] i) + | ⟨0, h⟩ => nomatch h --- Send values `v_A, v_B, v_C` +def oracleReduction.linearCombination : + OracleReduction oSpec + (Statement.AfterFirstSumcheck R pp) (OracleStatement.AfterFirstSumcheck R pp) (Witness R pp) + (Statement.AfterLinearCombination R pp) (OracleStatement.AfterLinearCombination R pp) Unit + ![(.V_to_P, LinearCombinationChallenge R)] := + sorry --- Second sumcheck +/-! + ## Second sum-check + We invoke the sum-check protocol the "virtual" polynomial: + `ℳ(Y) = r_A * (MLE A) ⸨r_x, Y⸩ * (MLE 𝕫) ⸨Y⸩ + r_B * (MLE B) ⸨r_x, Y⸩ * (MLE 𝕫) ⸨Y⸩` + `+ r_C * (MLE C) ⸨r_x, Y⸩ * (MLE 𝕫) ⸨Y⸩` +-/ --- Send values ... +def secondSumCheckVirtualPolynomial + (stmt : Statement.AfterLinearCombination R pp) + (oStmt : ∀ i, OracleStatement.AfterLinearCombination R pp i) : + MvPolynomial (Fin pp.ℓ_n) R := sorry --- Final check +@[simp] +abbrev SecondSumcheckChallenge : Type := Fin pp.ℓ_n → R + +/-- Unfolds to `(r_y, (r_A, r_B, r_C), r_x, τ, 𝕩) :` + `(Fin ℓ_n → R) × (R1CS.MatrixIdx → R) × (Fin (2 ^ ℓ_m) → R) × (Fin ℓ_m → R) ×` + `(Fin (2 ^ ℓ_n - 2 ^ ℓ_w) → R)` -/ +@[simp] +abbrev Statement.AfterSecondSumcheck : Type := + SecondSumcheckChallenge R pp × Statement.AfterLinearCombination R pp + +@[simp] +abbrev OracleStatement.AfterSecondSumcheck : R1CS.MatrixIdx ⊕ R1CS.MatrixIdx ⊕ Fin 1 → Type := + EvalClaim R ⊕ᵥ OracleStatement.AfterFirstSumcheck R pp + +@[simp] +abbrev Witness.AfterSecondSumcheck : Type := Unit + +-- def oracleReduction.secondSumcheck : +-- OracleReduction (Sumcheck.Spec.pSpec R pp.ℓ_n) oSpec +-- (Statement.AfterLinearCombination R pp) Witness.AfterLinearCombination +-- (Statement.AfterSecondSumcheck R pp) Witness.AfterSecondSumcheck +-- (OracleStatement.AfterLinearCombination R pp) (OracleStatement.AfterSecondSumcheck R pp) := +-- sorry + +/-! + ## Final check + + We invoke the `CheckClaim` protocol to check the two evaluation claims. +-/ + +-- Definition of the final relation to be checked +-- def finalCheck := sorry + +-- def oracleReduction.finalCheck : +-- OracleReduction ![] oSpec +-- (Statement.AfterSecondSumcheck R pp) Witness.AfterSecondSumcheck +-- Unit Unit +-- (OracleStatement.AfterSecondSumcheck R pp) (fun _ => Unit) := +-- CheckClaim.oracleReduction oSpec (Statement.AfterSecondSumcheck R pp) +-- (OracleStatement.AfterSecondSumcheck R pp) (sorry) + +end Construction + +section Security + + +end Security end Spec end - end Spartan diff --git a/ArkLib/ProofSystem/Stir.lean b/ArkLib/ProofSystem/Stir.lean new file mode 100644 index 000000000..bb3a65e4e --- /dev/null +++ b/ArkLib/ProofSystem/Stir.lean @@ -0,0 +1,7 @@ +import ArkLib.ProofSystem.Stir.Combine +import ArkLib.ProofSystem.Stir.Folding +import ArkLib.ProofSystem.Stir.MainThm +import ArkLib.ProofSystem.Stir.OutOfDomSmpl +import ArkLib.ProofSystem.Stir.ProximityBound +import ArkLib.ProofSystem.Stir.ProximityGap +import ArkLib.ProofSystem.Stir.Quotienting diff --git a/ArkLib/ProofSystem/Stir/Combine.lean b/ArkLib/ProofSystem/Stir/Combine.lean new file mode 100644 index 000000000..554f6924e --- /dev/null +++ b/ArkLib/ProofSystem/Stir/Combine.lean @@ -0,0 +1,104 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter, Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.Probability.Notation +import ArkLib.ProofSystem.Stir.ProximityBound + +/-! Section 4.5 from STIR [ACFY24] -/ + +open BigOperators Finset NNReal + +namespace Combine +variable {m : ℕ} + {F : Type*} [Field F] [Fintype F] [DecidableEq F] + {ι : Type*} [Fintype ι] + +/-- Fact 4.10 + Geometric series formula in a field, for a unit `r : F`. -/ +lemma geometric_sum_units {r : Units F} {a : ℕ} : + ∑ j : Fin (a + 1), (r ^ ( j : ℕ ) : F) = + if r = 1 then (a + 1 : F) + else (1 - r ^ (a + 1)) / (1 - r) := by sorry + +/--Coefficients r_i as used in the definition of Combine, + for i < m, r_i := r^{i - 1 + sum_{j < i}(d* - d_j)} -/ +def ri (dstar : ℕ) (degs : Fin m → ℕ) (r : F) : Fin m → F := + fun i => + let ipred := Nat.pred i.val + let exp := ipred + ∑ j < i, (dstar - degs j) + r ^ exp + +/-- Definition 4.11.1 + Combine(d*, r, (f_0, d_0), …, (f_{m-1}, d_{m-1}))(x) + := sum_{i < m} r_i * f_i(x) * ( sum_{l < (d* - d_i + 1)} (r * φ(x))^l ) -/ +def combineInterm + {φ : ι ↪ F} (dstar : ℕ) (r : F) (fs : Fin m → ι → F) (degs : Fin m → ℕ) + (hdegs : ∀ i, degs i ≤ dstar) : ι → F := + fun x => + ∑ i, (ri dstar degs r i) * (fs i x) * (∑ l < (dstar - degs i + 1), (r * (φ x))^l) + +/--if (r * φ(x)) = 1, then (dstar - degree + 1) + else (1 - r * φ(x)^(dstar - degree + 1)) / (1 - r * φ(x))-/ +def conditionalExp + (φ : ι ↪ F) (dstar : ℕ) (degree : ℕ) (r : F) : ι → F := + fun x => + let q := r * (φ x) + if q = 1 then (dstar - degree + 1 : F) + else (1 - q^(dstar - degree + 1)) / (1 - q) + +/-- Definition 4.11.2 + Combine(d*, r, (f_0, d_0), …, (f_{m-1}, d_{m-1}))(x) := + sum_{i < m} r_i * f_i(x) * conditionExp(dstar, degsᵢ, r) -/ +def combineFinal + (φ : ι ↪ F) (dstar : ℕ) (r : F) (fs : Fin m → ι → F) + (degs : Fin m → ℕ) (hdegs : ∀ i, degs i ≤ dstar) : ι → F := + fun x => + ∑ i, (ri dstar degs r i) * (fs i x) * conditionalExp φ dstar (degs i) r x + +/-- Definition 4.12.1 + DegCor(d*, r, f, degree)(x) := f(x) * ( sum_{ l < d* - d + 1 } (r * φ(x))^l ) -/ +def degreeCorrInterm + {φ : ι ↪ F} (dstar degree : ℕ) (r : F) (f : ι → F) (hd : degree ≤ dstar) : ι → F := + fun x => + let geom := ∑ l < (dstar - degree + 1), (r * (φ x)) ^ l + f x * geom + +/-- Definition 4.12.2 + DegCor(d*, r, f, d)(x) := f(x) * conditionalExp(x) -/ +def degreeCorrFinal +{φ : ι ↪ F} (dstar degree : ℕ) (r : F) (f : ι → F) (hd : degree ≤ dstar) : ι → F := + fun x => + f x * conditionalExp φ dstar degree r x + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [Nonempty ι] + +open LinearCode ProbabilityTheory ReedSolomon + +/--Lemma 4.13 + Let `dstar` be the target degree, `f₁,...,f_{m-1} : ι → F`, + `0 < degs₁,...,degs_{m-1} < dstar` be degrees and + `δ ∈ (0, min{(1-BStar(ρ)), (1-ρ-1/|ι|)})` be a distance parameter, then + Pr_{r ← F} [δᵣ(Combine(dstar,r,(f₁,degs₁),...,(fₘ,degsₘ)))] + > err' (dstar, ρ, δ, m * (dstar + 1) - ∑ i degsᵢ) + -/ +lemma combine + {φ : ι ↪ F} {dstar m degree : ℕ} + (fs : Fin m → ι → F) (degs : Fin m → ℕ) (hdegs : ∀ i, degs i ≤ dstar) + (δ : ℝ) (hδPos : δ > 0) + (hδLt : δ < (min (1 - Bstar (rate (code φ degree))) + (1- (rate (code φ degree)) - 1/ Fintype.card ι))) + (hProb : Pr_{ let r ←$ᵖ F }[ δᵣ((combineFinal φ dstar r fs degs hdegs), (code φ dstar)) ≤ δ ] > + ENNReal.ofReal (err' F dstar (rate (code φ degree)) δ (m * (dstar + 1) - ∑ i, degs i))) : + ∃ S : Finset ι, + S.card ≥ (1 - δ) * Fintype.card ι ∧ + ∀ i : Fin m, ∃ u : (ι → F), + u ∈ (code φ (degs i)) ∧ + ∀ x ∈ S, fs i x = u x + := by sorry + +end Combine diff --git a/ArkLib/ProofSystem/Stir/Folding.lean b/ArkLib/ProofSystem/Stir/Folding.lean new file mode 100644 index 000000000..c62c1bb6c --- /dev/null +++ b/ArkLib/ProofSystem/Stir/Folding.lean @@ -0,0 +1,199 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter, Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.Data.Probability.Notation +import ArkLib.ProofSystem.Stir.ProximityBound + +import Mathlib.Algebra.MvPolynomial.Basic +import Mathlib.Algebra.MvPolynomial.Degrees +import Mathlib.Probability.ProbabilityMassFunction.Basic +import Mathlib.Probability.Distributions.Uniform +import Mathlib.RingTheory.MvPolynomial.Groebner + +/-! Section 4.4, [ACFY24] -/ + +open Polynomial ReedSolomon LinearMap Finset ListDecodable + +namespace Domain + +variable {ι F : Type*} [Field F] [Fintype F] [DecidableEq F] [DecidableEq ι] + +/-- The image of a finite set `S` under the map `x ↦ (φ x)ᵏ` -/ +def indexPow (S : Finset ι) (φ : ι ↪ F) (k : ℕ) : Finset F := + S.image (fun x => (φ x) ^ k) + +/-- The k-th power domain `ιᵏ ↪ F` for a given domain `ι ↪ F`. -/ +def pow (S : Finset ι) (φ : ι ↪ F) (k : ℕ) : indexPow S φ k ↪ F := + Function.Embedding.subtype fun y => y ∈ indexPow S φ k + +/-- The fiber over a point `y` under the map `x ↦ (φ x)ᵏ` -/ +def powFiber (S : Finset ι) (φ : ι ↪ F) (k : ℕ) (y : indexPow S φ k) : Finset ι := + S.filter (fun x => (φ x) ^ k = y) + +/-- The fiber domain `f⁻¹(y) ↪ F` for the surjection `f : ι → ιᵏ, x → xᵏ` and `y ∈ ιᵏ`. -/ +def fiber (S : Finset ι) (φ : ι ↪ F) (k : ℕ) + (y : indexPow S φ k) : powFiber S φ k y ↪ F := + Function.Embedding.mk (fun z => φ z) (φ.injective.comp Subtype.val_injective) + +end Domain + +namespace Folding + +variable {F : Type* } [Field F] [Fintype F] + +/- 𝔽[X,Y] is not an Euclidean Domain, but fixing an order on monomials still allows + to show existance of bivariate polynomials Q', Q ∈ 𝔽[X,Y] such that + P = Q' * P' + Q for all P,P' ∈ 𝔽[X,Y] with P' having an invertible leading coefficient + (which on a field is equivalent to P' not being the zero polynomial). + + This is MonomialOrder.div from Mathlib.RingTheory.MvPolynomial.Groebner + + Using the usual lexicographic order x₀ > x₁ is equal to proposition 6.3 in + https://people.csail.mit.edu/madhu/papers/2005/rspcpp-full.pdf under the + substitution z = x₀ and y = x₁, hence the following definition constructs + Q ∈ 𝔽[Z,Y] with P(z,y) = Q'(z,y) * R(z,y) + Q(z,y) +-/ + +/-- Given `P, P' ∈ 𝔽[Z,Y]`, `P' ≠ 0`, computes `Q ∈ 𝔽[Z,Y]`, +with `P(z,y) = Q'(z,y) * P'(z,y) + Q(z,y)` for some `Q' ∈ 𝔽[Z,Y]` -/ +noncomputable def modBivar (P P' : MvPolynomial (Fin 2) F) + (hlg : IsUnit ((MonomialOrder.lex).leadingCoeff P')) : MvPolynomial (Fin 2) F := + -- Lexicographic order on `Fin 2`. + let ord : MonomialOrder (Fin 2) := MonomialOrder.lex + -- Wrap the single divisor into a family indexed by `Unit`. + let b : Unit → MvPolynomial (Fin 2) F := fun _ => P' + -- Unit leading-coeff proof for every index (there is only one). + have hb : ∀ i : Unit, IsUnit (ord.leadingCoeff (b i)) := by + intro _; simpa [b, ord] using hlg + -- Apply Groebner-basis division: + -- hdiv : ∃ Q', ∃ Q, P = P' * Q' + Q ∧ (side conditions) + have hdiv := ord.div (b := b) hb P + -- Peel off the two nested existentials and return the chosen remainder `r`. + Classical.choose (Classical.choose_spec hdiv) + +/-- maps the univariate polynomial P∈𝔽[Z] to the bivariate polynomial P'∈ 𝔽[Z,Y] with + P'(z,y) = P(z) -/ +noncomputable def uni2bi (p : Polynomial F) : MvPolynomial (Fin 2) F := + Polynomial.eval₂ MvPolynomial.C (MvPolynomial.X 0) p + +/-- Computes Q(z,y) with P(z) = Q'(z,y) * (y- q(z)) + Q(z,y) as in + proposition 6.3 from https://people.csail.mit.edu/madhu/papers/2005/rspcpp-full.pdf -/ +noncomputable def polyQ (P q : Polynomial F) : MvPolynomial (Fin 2) F := + -- Pbi(z,y):= P(z) + let Pbi : MvPolynomial (Fin 2) F := uni2bi P + -- P'(z,y) := (y - q(z)) + let P' : MvPolynomial (Fin 2) F := (MvPolynomial.X 1) - uni2bi q + -- proof that leading coefficient f q is not zero + have h_unit : IsUnit ((MonomialOrder.lex).leadingCoeff P') := sorry + modBivar Pbi P' h_unit + +/-- Helper For Readability: Evaluate a bivariate polynomial Q at (a, b) ∈ F×F -/ +def evalBivar + (Q : MvPolynomial (Fin 2) F) (a b : F) : F := MvPolynomial.eval (Fin.cases a (fun _ ↦ b)) Q + +/-- The STIR paper assumes that the polynomials fPoly(.) and Q(qPoly(.),.) are + fully determined by their evaluations on F. This is not necessarily true + for arbitrary polynomials of degrees larger than |F|. So we include an + assumption in what follows that qPoly has degree < |F| from which the + uniqueness of fPoly and Q can be derived from their evaluation on F. + Alternatively we could use the identity of polynomials + fPoly(.) = Q(qPoly(.), .) instead. + + Below we present Fact 4.6.1 from STIR -/ +lemma exists_unique_bivariate + (qPoly : Polynomial F) (hdeg_q_min : qPoly.natDegree > 0) + (hdeg_q_max : qPoly.natDegree < Fintype.card F) (fPoly : Polynomial F) : + -- Q ∈ 𝔽[X,Y] + ∃! Q : MvPolynomial (Fin 2) F, + -- deg_x(Q) = Floor ( deg(fPoly) / deg(qPoly) ) + -- This is natural number division towards zero, which is floor + (MvPolynomial.degreeOf 0 Q = (Polynomial.natDegree fPoly) / (Polynomial.natDegree qPoly)) ∧ + -- deg_y(Q) < deg (q) + (MvPolynomial.degreeOf 1 Q < Polynomial.natDegree qPoly) ∧ + -- point‑wise equality on F: f(z) = Q(q(z), z) + (∀ z : F, Polynomial.eval z fPoly = evalBivar Q (Polynomial.eval z qPoly) z) ∧ + (∀ t : ℕ, fPoly.natDegree < t * qPoly.natDegree → MvPolynomial.degreeOf 0 Q < t) := + /- The proof can follow `def polyQ` using the properties guranteed + from MonomialOrder.div from Mathlib.RingTheory.MvPolynomial.Groebner -/ + by sorry + +/-- Fact 4.6.2 in STIR-/ +lemma degree_bound_bivariate + (qPoly : Polynomial F) + (hdeg_q_min : qPoly.natDegree > 0) + (hdeg_q_max : qPoly.natDegree < Fintype.card F) + {t : ℕ} (Q : MvPolynomial (Fin 2) F) + (hdegX : MvPolynomial.degreeOf 0 Q < t) + (hdegY : MvPolynomial.degreeOf 1 Q < qPoly.natDegree) : + (MvPolynomial.eval₂Hom (Polynomial.C : F →+* Polynomial F) + (fun i : Fin 2 => if i = 0 then qPoly else Polynomial.X) Q).natDegree < t * qPoly.natDegree := + by sorry + +/--Definition 4.7 + `polyFold(f, k, r)` “folds” the polynomial `f` + producing a new polynomial of deree `< degree(f)/k`.-/ +noncomputable def polyFold + [DecidableEq F] (fPoly : Polynomial F) + (k : ℕ) (hk0 : 0 < k) (hkfin : k < Fintype.card F) + (r : F): Polynomial F := + let qPoly : Polynomial F := Polynomial.X ^ k + let hdeg_q_min : qPoly.natDegree > 0 := sorry + let hdeg_q_max : qPoly.natDegree < Fintype.card F := sorry + -- choose the unique bivariate lift Q + let Q : MvPolynomial (Fin 2) F := polyQ fPoly qPoly + MvPolynomial.eval₂Hom + (Polynomial.C : F →+* Polynomial F) + (fun i : Fin 2 => if i = 0 then Polynomial.X else Polynomial.C r) Q + +open Domain + +variable {ι F : Type*} [Field F] [Fintype F] [DecidableEq F] [DecidableEq ι] + +/--Definition 4.8 + For x ∈ ιᵏ, p_x ∈ 𝔽[X] is the degree < k polynomial + where p_x(y) = f(y) for every y ∈ ι such that yᵏ = x.-/ +noncomputable def xPoly + {S : Finset ι} (f : ι → F) (φ : ι ↪ F) (k : ℕ) (x : indexPow S φ k) : Polynomial F := + let dom := powFiber S φ k x + let emb : { y // y ∈ dom } ↪ F := fiber S φ k x + let g : { y // y ∈ dom } → F := fun y => f y.val + Lagrange.interpolate univ emb g + +/--Definition 4.8 + Fold(f,k,α) : ιᵏ → 𝔽 such that Fold(f, k, α)(x) := p_x(α) -/ +noncomputable def fold + {S : Finset ι} (φ : ι ↪ F) (f : ι → F) (k : ℕ) (α : F) : indexPow S φ k → F := + fun x => (xPoly f φ k x).eval α + +/-- min{δᵣ(f, RSC[F, ι, degree]), 1 − B^⋆(ρ)} -/ +noncomputable def foldingDistRange + (degree : ℕ) [Fintype ι] [Nonempty ι] (φ : ι ↪ F) (f : ι → F) : ℝ := + let C : Set (ι → F) := code φ degree + min δᵣ(f, C) (1 - Bstar (LinearCode.rate (code φ degree))) + +open ProbabilityTheory + +variable {ι F : Type} [Field F] [Fintype F] [DecidableEq F] [DecidableEq ι] + +/--Lemma 4.9 + For every function `f : ι → F`, `degree`, folding parameter `k`, and + `δ ∈ (0, foldingDistRange)` + `Pr_{r ← F} [ δᵣ(fold(f, k, α), RS[F, ιᵏ, degree/k)] < δ] ≤ err'(degree/k, ρ, δ, k)` + -/ +lemma folding + [Nonempty ι] {S : Finset ι} [Fintype ι] + (φ : ι ↪ F) (f : ι → F) (k : ℕ) (x : indexPow S φ k) + [Nonempty (indexPow S φ k)] + {degree : ℕ} (δ : ℚ) (hδPos : δ > 0) + (hδLt : δ < foldingDistRange degree φ f) : + let C : Set ((indexPow S φ k) → F) := code (pow S φ k) (degree / k) + Pr_{ let r ←$ᵖ F }[ δᵣ((fold φ f k r), C) ≤ δ] + ≤ ENNReal.ofReal (err' F (degree / k) (LinearCode.rate (code φ degree)) δ k) := +by sorry + +end Folding diff --git a/ArkLib/ProofSystem/Stir/MainThm.lean b/ArkLib/ProofSystem/Stir/MainThm.lean new file mode 100644 index 000000000..a01ebca65 --- /dev/null +++ b/ArkLib/ProofSystem/Stir/MainThm.lean @@ -0,0 +1,228 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.OracleReduction.VectorIOR +import ArkLib.ProofSystem.Stir.ProximityBound + +/-!Section 5 STIR[ACFY24], Theorem 5.1 and Lemma 5.4 -/ + +open BigOperators Finset ListDecodable NNReal ReedSolomon VectorIOP + +namespace StirIOP + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {M : ℕ} (ι : Fin (M + 1) → Type) [∀ i : Fin (M + 1), Fintype (ι i)] + [∀ i : Fin (M + 1), DecidableEq (ι i)] + +/-- **Per‑round protocol parameters:** + For a fixed depth `M`, the reduction runs `M + 1` rounds. + In round `i ∈ {0,…,M}` we fold by a factor `foldingParamᵢ`, + evaluate on the point set `ιᵢ` through the embedding `φᵢ : ιᵢ ↪ F`, + and repeat certain proximity checks `repeatParamᵢ` times. -/ +structure Params (F : Type*) where + deg : ℕ -- initial degree + foldingParam : Fin (M + 1) → ℕ + φ : (i : Fin (M + 1)) → (ι i) ↪ F + repeatParam : Fin (M + 1) → ℕ + +/-- **Degree after `i` folds:** + The starting degree is `deg`; + every fold divides it by `foldingParamⱼ (j P.deg / ∏ j < i, (P.foldingParam j) + +/-- **Conditions that protocol parameters must satisfy.** + - `h_deg` : initial degree `deg` is a power of 2 + - `h_foldingParams` : `∑ i : Fin (M + 1), foldingParamᵢ` is a power of 2 + - `h_deg_ge` : `deg ≥ ∏ i foldingParamᵢ` + - `h_smooth` : each `φᵢ` must embed a smooth evaluation domain + - `h_smooth_le` : `|ιᵢ| ≤ degreeᵢ` + - `h_repeatP_le` : `∀ i : Fin (M + 1), repeatParamᵢ + 1 ≤ degreeᵢ` -/ +structure ParamConditions (P : Params ι F) where + h_deg : ∃ k : ℕ, P.deg = 2^k + h_foldingParams : ∀ i : Fin (M + 1), ∃ k : ℕ, (P.foldingParam i) = 2^k + h_deg_ge : P.deg ≥ ∏ i : Fin (M + 1), (P.foldingParam i) + h_smooth : ∀ i : Fin (M + 1), Smooth (P.φ i) + h_smooth_le : ∀ i : Fin (M + 1), Fintype.card (ι i) ≤ (degree ι P i) + h_repeatP_le : ∀ i : Fin (M + 1), P.repeatParam i + 1 ≤ (degree ι P i) + +/-- Distance and list‑size targets per round. -/ +structure Distances (M : ℕ) where + δ : Fin (M + 1) → ℝ≥0 + l : Fin (M + 1) → ℝ≥0 + +/-- Family of Reed–Solomon codes expected by the verifier, we have + `codeᵢ = RS[F, ιᵢ, degreeᵢ]` and for `i ∈ {1,…,M}` + `hlistDecode: codeᵢ` is `(δᵢ,lᵢ)`-list decodable +-/ +structure CodeParams (P : Params ι F) (Dist : Distances M) where + C : ∀ i : Fin (M + 1), Set ((ι i) → F) + h_code : ∀ i : Fin (M + 1), C i = code (P.φ i) (degree ι P i) + h_listDecode : ∀ i : Fin (M + 1), i ≠ 0 → listDecodable (C i) (Dist.δ i) (Dist.l i) + +section MainTheorem + +open OracleComp OracleSpec ProtocolSpec LinearCode + +/-- `OracleStatement` defines the oracle message type for a multi-indexed setting: + given base input type `ι`, and field `F`, the output type at each index + is a function `ι → F` representing an evaluation over `ι`. +-/ +@[reducible] +def OracleStatement (ι F : Type) : Unit → Type := + fun _ => ι → F + +/-- Provides a default OracleInterface instance that leverages + the oracle statement defined above. The oracle simply applies + the function `f : ι → F` to the query input `i : ι`, + producing the response. -/ +instance {ι : Type} : OracleInterface (OracleStatement ι F ()) where + Query := ι + Response := F + oracle := fun f i => f i + +/-- STIR relation: the oracle's output is δᵣ-close to a Reed-Solomon codeword + of degree at most `degree` over domain `φ`, within error `err`. +-/ +def stirRelation + {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [Nonempty ι] + (degree : ℕ) (φ : ι ↪ F) (err : ℝ) + : Set ((Unit × ∀ i, (OracleStatement ι F i)) × Unit) := + fun ⟨⟨_, oracle⟩, _⟩ => δᵣ(oracle (), ReedSolomon.code φ degree) ≤ err + +/-- Theorem 5.1 : STIR main theorem + Consider the following ingrediants, + a security parameter `secpar` + a ReedSolomon code `RS[F, ι, degree]` with rate `ρ = degree/ |ι|`, where ι is a smooth domain + a proximity parameter `δ ∈ (0, 1 - 1.05 * √ρ)` + a folding parameter `k ≥ 4`, being a power of 2 + if `|F| ≤ secpar * 2^{secpar * degree² * |ι|^3.5 / log(1/ρ)}`, then + there exists a `vector IOPP π` for `RS` with + - `round by round soundness error ≤ 2 ^ (- secpar)`, + - `M = O(logₖdegree)` + - `proof length = |ι| + Oₖ(log degree)` + - `query complexity to input = secpar / (- log(1-δ))` + - `query complexity to proof strings = Oₖ(log degree + secpar * log(log degree / log(1/ρ)))` +-/ +theorem stir_main + (secpar : ℕ) [VCVCompatible F] + {ι : Type} [Fintype ι] [Nonempty ι] [DecidableEq ι] + {φ : ι ↪ F} {degree : ℕ} [hsmooth : Smooth φ] + {k proofLen qNumtoInput qNumtoProofstr : ℕ} + (hk : ∃ p, k = 2^p) (hkGe : k ≥ 4) + (δ : ℝ≥0) (hδub : δ < 1 - 1.05 * Real.sqrt (degree / Fintype.card ι)) + (hF : Fintype.card F ≤ + secpar * 2 ^ secpar * degree^2 * (Fintype.card ι)^(7/2) / + Real.log (1 / rate (code φ degree))) : + ∃ n : ℕ, + ∃ vPSpec : ProtocolSpec.VectorSpec n, + ∃ ε_rbr : vPSpec.ChallengeIdx → ℝ≥0, + ∃ π : VectorIOP []ₒ Unit (OracleStatement ι F) Unit vPSpec F, + IsSecureWithGap (stirRelation degree φ 0) + (stirRelation degree φ δ) + ε_rbr π + ∧ ∀ i, ε_rbr i ≤ (1 : ℚ≥0) / (2 ^ secpar) + ∧ ∃ c > 0, M ≤ c * (Real.log degree / Real.log k) + ∧ ∃ cₖ : ℕ → ℝ, proofLen ≤ (Fintype.card ι) + (cₖ k) * (Real.log degree) + ∧ qNumtoInput = secpar / (- Real.log (1 - δ)) + ∧ ∃ cₖ : ℕ → ℝ, qNumtoProofstr ≤ + (cₖ k) * ((Real.log degree) + + secpar * (Real.log ((Real.log degree) / Real.log (1/rate (code φ degree))))) +:= by sorry + +end MainTheorem + +section RBRSoundness + +open LinearCode + +/-- Lemma 5.4: Round-by-round soundness of the STIR IOPP + Consider parameters: + `ι = {ιᵢ}_{i = 0, ..., M}` be smooth evaluation domains + `P : Params ι F` containing required protocol parameters - + initial degree, folding parameters `foldingParamᵢ`, embedding `φᵢ`, + repetition parameters `repeatParamᵢ` + `hParams : ParamConditions ι P`, stating conditions that parameters of P must satisfy + `degreeᵢ = deg / ∏ j ({ε_fold} ∪ {ε_fin} ∪ univ.image ε_out ∪ univ.image ε_shift).max' (by simp) + (IsSecureWithGap (stirRelation (degree ι P 0) (P.φ 0) 0) + (stirRelation (degree ι P 0) (P.φ 0) (Dist.δ 0)) + ε_rbr π) ∧ + -- `ε_fold ≤ errStar(degree₀/foldingParam₀, ρ₀, δ₀, repeatParam₀)` + ε_fold ≤ err' F (P.deg / P.foldingParam 0) (rate (code (P.φ 0) P.deg)) + (Dist.δ 0) (P.repeatParam 0) + ∧ + -- Note here that `j : Fin M`, so we need to cast into `Fin (M + 1)` for indexing of + -- `Dist.δ` and `P.repeatParam`. To get `j`, we use `.castSucc`, whereas to get `j + 1`, + -- we use `.succ`. + -- Because of the difference in indexing between the paper and the code, we essentially have + -- `j = i - 1` compared to the paper. + -- `ε_out_{j+1} ≤ l_{j+1}²/2 * (degree_{j+1}/ |F| - |ι_{j+1}|)^s` + ∀ {j : Fin M} (hⱼ : j.val ≠ 0), + ε_out j ≤ ((Dist.l j.succ : ℝ) ^ 2 / 2) * + ((degree ι P j.succ : ℝ) / (Fintype.card F - Fintype.card (ι j.succ))) ^ s + ∧ + -- `ε_shift_{j+1} ≤ (1 - δ_j)^repeatParam_j` + -- `+ errStar(degree_{j+1}, ρ_{j+1}, δ_{j+1}, repeatParam_j + s)` + -- `+ errStar(degree_{j+1}/foldingParam_{j+1}, ρ_{j+1}, δ_{j+1}, repeatParam_{j+1})` + ε_shift j ≤ + (1 - Dist.δ j.castSucc) ^ (P.repeatParam j.castSucc) + + -- err'(degreeⱼ, ρ(codeⱼ), δⱼ, repeatParam_j + s), where codeⱼ = code φⱼ degreeⱼ + err' F (degree ι P j.succ) (rate (code (P.φ j.succ) (degree ι P j.succ))) + (Dist.δ j.succ) (P.repeatParam j.castSucc) + s + + -- err'(degreeⱼ / foldingParamⱼ, ρ(codeⱼ), δⱼ, repeatParamⱼ) + err' F ((degree ι P j.succ) / P.foldingParam j.succ) + (rate (code (P.φ j.succ) (degree ι P j.succ))) + (Dist.δ j.succ) (P.repeatParam j.succ) + ∧ + -- `ε_fin ≤ (1 - δ_M)^repeatParam_M` + ε_fin ≤ (1 - Dist.δ (Fin.last M)) ^ (P.repeatParam (Fin.last M)) := +by + sorry + +end RBRSoundness + +end StirIOP diff --git a/ArkLib/ProofSystem/Stir/OutOfDomSmpl.lean b/ArkLib/ProofSystem/Stir/OutOfDomSmpl.lean new file mode 100644 index 000000000..1cc5f1791 --- /dev/null +++ b/ArkLib/ProofSystem/Stir/OutOfDomSmpl.lean @@ -0,0 +1,65 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter, Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.Data.Probability.Notation +import Mathlib.Data.Fintype.Basic + +open Finset ListDecodable NNReal Polynomial ProbabilityTheory ReedSolomon +namespace OutOfDomSmpl + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [DecidableEq ι] + +/-! Section 4.3 [ACFY24] -/ + +/--returns the domain complement `F \ φ(ι)`-/ +def domainComplement (φ : ι ↪ F) : Finset F := + Finset.univ \ Finset.image φ.toFun Finset.univ + +/-- Pr_{r₀, …, r_{s-1} ← (𝔽 \ φ(ι)) } + [ ∃ distinct u, u′ ∈ List(C, f, δ) : + ∀ i < s, u(r_i) = u′(r_i) ] + here, List (C, f, δ) denotes the list of codewords of C δ-close to f, + wrt the Relative Hamming distance. -/ +noncomputable def listDecodingCollisionProbability + (φ : ι ↪ F) (f : ι → F) (δ : ℝ) (s degree: ℕ) (Genfun : F → Fin s → F) + (h_nonempty : Nonempty (domainComplement φ)) : ENNReal := + Pr_{let r ←$ᵖ (domainComplement φ)}[ ∃ (u u' : code φ degree), + u.val ≠ u'.val ∧ + u.val ∈ relHammingBall (code φ degree) f δ ∧ + u'.val ∈ relHammingBall (code φ degree) f δ ∧ + ∀ i : Fin s, + let uPoly := decodeLT u + let uPoly' := decodeLT u' + (uPoly : F[X]).eval (Genfun r i) + = (uPoly' : F[X]).eval (Genfun r i) + ] + +/--Lemma 4.5.1-/ +lemma out_of_dom_smpl_1 + {δ l : ℝ≥0} {s : ℕ} {f : ι → F} {degree : ℕ} {φ : ι ↪ F} + (C : Set (ι → F)) (hC : C = code φ degree) + (h_decodable : listDecodable C δ l) + (h_nonempty : Nonempty (domainComplement φ)) + (Genfun : F → Fin s → F) : + listDecodingCollisionProbability φ f δ s degree Genfun h_nonempty ≤ + ((l * (l-1) / 2)) * ((degree - 1) / (Fintype.card F - Fintype.card ι))^s + := by sorry + +/--Lemma 4.5.2-/ +lemma out_of_dom_smpl_2 + {δ l : ℝ≥0} {s : ℕ} {f : ι → F} {degree : ℕ} {φ : ι ↪ F} + (C : Set (ι → F)) (hC : C = code φ degree) + (h_decodable : listDecodable C δ l) + (h_nonempty : Nonempty (domainComplement φ)) + (Genfun : F → Fin s → F) : + listDecodingCollisionProbability φ f δ s degree Genfun h_nonempty ≤ + ((l^2 / 2)) * (degree / (Fintype.card F - Fintype.card ι))^s + := by sorry + +end OutOfDomSmpl diff --git a/ArkLib/ProofSystem/Stir/ProximityBound.lean b/ArkLib/ProofSystem/Stir/ProximityBound.lean new file mode 100644 index 000000000..3a248c05d --- /dev/null +++ b/ArkLib/ProofSystem/Stir/ProximityBound.lean @@ -0,0 +1,22 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter, Poulami Das (Least Authority) +-/ + +import Mathlib.Data.Finset.Basic +import Mathlib.Data.Real.Basic +import Mathlib.Data.Real.Sqrt + +open NNReal + +/-- Proximity bound function -/ +noncomputable def Bstar (x : ℝ) : ℝ := x.sqrt + +/-- Proximity error function -/ +noncomputable def err' (F : Type*) [Fintype F] (d : ℕ) (ρ : ℝ) (δ : ℝ) (m: ℕ) : ℝ := + if δ ≤ (1 - ρ) / 2 then + ((m - 1) * d) / (ρ * (Fintype.card F)) + else + let min_val := min (1 - (Real.sqrt ρ) - δ ) ((Real.sqrt ρ) / 20) + ((m - 1) * d^2) / ((Fintype.card F) * (2 * min_val)^7) diff --git a/ArkLib/ProofSystem/Stir/ProximityGap.lean b/ArkLib/ProofSystem/Stir/ProximityGap.lean new file mode 100644 index 000000000..1e6ac769a --- /dev/null +++ b/ArkLib/ProofSystem/Stir/ProximityGap.lean @@ -0,0 +1,35 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter, Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.Basic +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.Probability.Notation +import ArkLib.ProofSystem.Stir.ProximityBound + +open NNReal ProbabilityTheory ReedSolomon + +/-- Theorem 4.1[BCIKS20] from STIR[ACFY24] + Let `C = RS[F, ι, degree]` be a ReedSolomon code with rate `degree / |ι|` + and let Bstar(ρ) = √ρ. For all `δ ∈ (0, 1 - Bstar(ρ))`, `f₁,...,fₘ : ι → F`, if + `Pr_{r ← F} [ δᵣ(rⱼ * fⱼ, C) ≤ δ] > err'(degree, ρ, δ, m)` + then ∃ S ⊆ ι, |S| ≥ (1-δ) * |ι| and + ∀ i : m, ∃ u : C, u(S)=fᵢ(S) + -/ +lemma proximity_gap + {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [Nonempty ι] {φ : ι ↪ F} + {degree m : ℕ} {δ : ℝ≥0} {f : Fin m → ι → F} {GenFun : F → Fin m → F} + (h : ∀ (hδLe : δ < 1 - Bstar (LinearCode.rate (code φ degree))) {f : Fin m → ι → F}, + Pr_{let r ←$ᵖ F}[ + δᵣ((fun x => ∑ j : Fin m, (GenFun r j) * f j x) , code φ degree) ≤ (δ : ℝ)] + > ENNReal.ofReal (err' F degree (LinearCode.rate (code φ degree)) δ m)) : + + ∃ S : Finset ι, + S.card ≥ (1 - δ) * (Fintype.card ι) ∧ + ∃ u : (ι → F), + u ∈ (code φ degree) ∧ ∀ i : Fin m, ∀ x ∈ S, f i x = u x + +:= by sorry diff --git a/ArkLib/ProofSystem/Stir/Quotienting.lean b/ArkLib/ProofSystem/Stir/Quotienting.lean new file mode 100644 index 000000000..ac2e7051c --- /dev/null +++ b/ArkLib/ProofSystem/Stir/Quotienting.lean @@ -0,0 +1,70 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter, Poulami Das (Least Authority) +-/ +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.CodingTheory.ListDecodability + +open Polynomial ReedSolomon ListDecodable + +namespace Quotienting + +variable {n : ℕ} + {F : Type*} [Field F] [Fintype F] [DecidableEq F] + {ι : Finset F} + +/-- Let `Ans : S → F`, `ansPoly(Ans, S)` is the unique interpolating polynomial of degree < |S| + with `AnsPoly(s) = Ans(s)` for each s ∈ S. + + Note: For S=∅ we get Ans'(x) = 0 (the zero polynomial) -/ +noncomputable def ansPoly (S : Finset F) (Ans : S → F) : Polynomial F := + Lagrange.interpolate S.attach (fun i => (i : F)) Ans + +/-- VanishingPoly is the vanishing polynomial on S, i.e. the unique polynomial of degree |S|+1 + that is 0 at each s ∈ S and is not the zero polynomial. That is V(X) = ∏(s ∈ S) (X - s). -/ +noncomputable def vanishingPoly (S : Finset F) : Polynomial F := + ∏ s ∈ S, (Polynomial.X - Polynomial.C s) + +/-- Definition 4.2 + funcQuotient is the quotient function that outputs + if x ∈ S, Fill(x). + else (f(x) - Ans'(x)) / V(x). + Note here that, V(x) = 0 ∀ x ∈ S, otherwise V(x) ≠ 0. -/ +noncomputable def funcQuotient (f : ι → F) (S : Finset F) (Ans Fill : S → F) : ι → F := + fun x => + if hx : x.val ∈ S then Fill ⟨x.val, hx⟩ -- if x ∈ S, Fill(x). + else (f x - (ansPoly S Ans).eval x.val) / (vanishingPoly S).eval x.val + +/-- Definition 4.3 + polyQuotient is the polynomial derived from the polynomials fPoly, Ans' and V, where + Ans' is a polynomial s.t. Ans'(x) = fPoly(x) for x ∈ S, and + V is the vanishing polynomial on S as before. + Then, polyQuotient = (fPoly - Ans') / V, where + polyQuotient.degree < (fPoly.degree - ι.card) -/ +noncomputable def polyQuotient {degree : ℕ} (S : Finset F) (fPoly : Polynomial F) + (hPoly : fPoly.degree < degree) : Polynomial F := + (fPoly - (ansPoly S (fun s => fPoly.eval s))) / (vanishingPoly S) + +/-- We define the set disagreementSet(f,ι,S,Ans) as the set of all points x ∈ ι that lie in S +such that the Ans' disagrees with f, we have +disagreementSet := { x ∈ ι ∩ S ∧ AnsPoly x ≠ f x }. -/ +noncomputable def disagreementSet (f : ι → F) (S : Finset F) (Ans : S → F) : Finset F := + (ι.attach.filter (fun x ↦ (ansPoly S Ans).eval x.val ≠ f x)).image Subtype.val + +/-- Quotienting Lemma 4.4 + Let `f : ι → F` be a function, `degree` a degree parameter, `δ ∈ (0,1)` be a distance parameter + `S` be a set with |S| < degree, `Ans, Fill : S → F`. Suppose for all `u ∈ Λ(code, f, δ)`, + there exists `x : S`, such that `uPoly(x) ≠ Ans(x)` then + `δᵣ(funcQuotient(f, S, Ans, Fill), code[ι, F, degree - |S|]) + |T|/|ι| > δ`, + where T is the disagreementSet as defined above -/ +lemma quotienting [DecidableEq F] {degree : ℕ} {domain : ι ↪ F} [Nonempty ι] + (S : Finset F) (hS_lt : S.card < degree) (r : F) + (f : ι → F) (Ans Fill : S → F) (δ : ℝ) (hδPos : δ > 0) (hδLt : δ < 1) + (h : ∀ u : code domain degree, u.val ∈ (relHammingBall ↑(code domain degree) f δ) → + ∃ (x : S) (hx : x.val ∈ S), ((decodeLT u) : F[X]).eval x.val ≠ Ans ⟨x.val, hx⟩) : + δᵣ((funcQuotient f S Ans Fill), (code domain (degree - S.card))) + + ((disagreementSet f S Ans).card : ℝ) / (ι.card : ℝ) > δ := by + sorry + +end Quotienting diff --git a/ArkLib/ProofSystem/Sumcheck/Impl/Basic.lean b/ArkLib/ProofSystem/Sumcheck/Impl/Basic.lean new file mode 100644 index 000000000..adf784775 --- /dev/null +++ b/ArkLib/ProofSystem/Sumcheck/Impl/Basic.lean @@ -0,0 +1,32 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.ProofSystem.Sumcheck.Spec.General +import ArkLib.Data.MlPoly.Equiv + +/-! + # Executable Spec of the Sum-check Protocol + +This file contains the basic implementation of the sum-check protocol. Instead of viewing the +protocol purely as a non-computable specification in terms of `MvPolynomial`, we give the standard +algorithms (linear time & streaming) for the setting of: +- A number of computable multilinear polynomials `p : ι → MlPoly R` +- A combination function `f` that is itself a (very simple) computable multivariate polynomial + `f : CMvPoly ι R`. (pending merge from Plonky3's development by Nethermind) + +Future extensions will include optimized variants for various protocols (well, to the extent that +they change the verifier, since we don't care too much about prover's efficiency): +- Not sending evaluation at 0/1 +- Sending evaluation at infinity instead +-/ + +namespace Sumcheck + +namespace Impl + +end Impl + +end Sumcheck diff --git a/ArkLib/ProofSystem/Sumcheck/SingleRound.lean b/ArkLib/ProofSystem/Sumcheck/SingleRound.lean deleted file mode 100644 index 1d4c0c7da..000000000 --- a/ArkLib/ProofSystem/Sumcheck/SingleRound.lean +++ /dev/null @@ -1,645 +0,0 @@ -/- -Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. -Released under Apache 2.0 license as described in the file LICENSE. -Authors: Quang Dao --/ - -import ArkLib.OracleReduction.Security.Basic -import ArkLib.OracleReduction.Composition.Sequential.General -import ArkLib.OracleReduction.LiftContext.Basic -import ArkLib.Data.Fin.Basic - -/-! -# Single round of the Sum-check Protocol - -We define a single round of the sum-check protocol as a two-message oracle reduction, and prove that -it is perfect complete and round-by-round knowledge sound. Specification & security proofs of the -full sum-check protocol are given in `Basic.lean`, following our sequential composition results. - -## Protocol Description - -The sum-check protocol is parameterized by the following: -- `R`: the underlying ring (for soundness, required to be finite and a domain) -- `n + 1 : ℕ+`: the number of variables (also number of rounds) -- `deg : ℕ`: the individual degree bound for the polynomial -- `D : Fin m ↪ R`: the set of `m` evaluation points for each variable (for some `m`), represented as - an injection `Fin m ↪ R`. The image of `D` as a finite subset of `R` is written as - `Finset.univ.map D`. -- `oSpec : OracleSpec ι`: the set of underlying oracles (e.g. random oracles) that may be needed for - other reductions. However, the sum-check protocol does _not_ use any oracles. - -The sum-check relation has no witness. The statement for the `i`-th round, where `i : Fin (n + 2)`, - contains: -- `target : R`, which is the target value for sum-check -- `challenges : Fin i → R`, which is the list of challenges sent from the verifier to the prover in - previous rounds - -There is a single oracle statement, which is: -- `poly : MvPolynomial (Fin (n + 2)) R`, the multivariate polynomial that is summed over - -The sum-check relation for the `i`-th round checks that: - - `∑ x ∈ (univ.map D) ^ᶠ (n + 1 - i), poly ⸨challenges, x⸩ = target`. - -Note that the last statement (when `i = n`) is the output statement of the sum-check protocol. - -For `i = 0, ..., n`, the `i`-th round of the sum-check protocol consists of the following: - -1. The prover sends a univariate polynomial `pᵢ ∈ R⦃≤ deg⦄[X]` of degree at most `deg`. If the - prover is honest, then we have: - - `pᵢ(X) = ∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨X ⦃i⦄, challenges, x⸩`. - - Here, `poly ⸨X ⦃i⦄, challenges, x⸩` is the polynomial `poly` evaluated at the concatenation of the - prior challenges `challenges`, the `i`-th variable as the new indeterminate `X`, and the rest of - the values `x ∈ (univ.map D) ^ᶠ (n - i)`. - - In the oracle protocol, this polynomial `pᵢ` is turned into an oracle for which the verifier can - query for evaluations at arbitrary points. - -2. The verifier then sends the `i`-th challenge `rᵢ` sampled uniformly at random from `R`. - -3. The (oracle) verifier then performs queries for the evaluations of `pᵢ` at all points in - `(univ.map D)`, and checks that: `∑ x in (univ.map D), pᵢ.eval x = target`. - - If the check fails, then the verifier outputs `failure`. - - Otherwise, it outputs a statement for the next round as follows: - - `target` is updated to `pᵢ.eval rᵢ` - - `challenges` is updated to the concatenation of the previous challenges and `rᵢ` - -## Simplification - -We may break this down further into two one-message oracle reductions. - -1. The first message from the prover to the verifier can be seen as invoking a ``virtual'' protocol - as follows: - -- `𝒫` holds some data `d` available as an oracle statement to `𝒱`, and wants to convince `𝒱` of - some predicate `pred` on `d`, expressible as an oracle computation leveraging the oracle - statement's query structure. -- `𝒫` sends `d'` to `𝒱` as an oracle message. `𝒱` directly checks `pred d'` by performing said - oracle computation on `d'` and outputs the result. - -2. The second message (a challenge) from the verifier to the prover can be seen as invoking a - ``virtual'' protocol as follows: - -- `𝒫` holds two data `d₁` and `d₂`, available as oracle statements, and wants to convince `𝒱` that - `d₁ = d₂`. -- `𝒱` sends a random query `q` to `𝒫`. It then checks that `oracle d₁ q = oracle d₂ q = r`, and - outputs `r` as the output statement. - -The virtual aspect is because of the substitution: `d = d' = s_i(X)`, where recall -`s_i(X) = ∑ x ∈ D^{n - i}, p(r_0, ..., r_{i-1}, X, x)`. - -The predicate is that `∑ y ∈ D, s_i(y) = claim_i`. - --/ - -namespace Sumcheck - -open Polynomial MvPolynomial OracleSpec OracleComp ProtocolSpec Finset - -noncomputable section - -namespace Spec - -namespace Simpler - --- Let's try to simplify a single round of sum-check, and appeal to compositionality to lift --- the result to the full protocol. - --- In this simplified setting, the sum-check protocol consists of a _univariate_ polynomial --- `p : R⦃≤ d⦄[X]` of degree at most `d`, and the relation is that --- `∑ x ∈ univ.map D, p.eval x = target`. - --- We further break it down into each message: --- In order of (witness, oracle statement, public statement ; relation): --- (∅, p : R⦃≤ d⦄[X], t : R ; ∑ x ∈ univ.map D, p.eval x = t) =>[1st message] --- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], t : R ; p = q) =>[omit unused public statement] --- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], ∅ ; p = q) =>[2nd message] --- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], r : R ; p.eval r = q.eval r, t = q.eval r) - -variable (R : Type) [CommSemiring R] (d : ℕ) {m : ℕ} (D : Fin m ↪ R) - -def InputOracleStatement : Unit → Type := fun _ => R⦃≤ d⦄[X] - -def OutputOracleStatement : Unit ⊕ Unit → Type := fun _ => R⦃≤ d⦄[X] - -def inputRelation : R × (∀ i, InputOracleStatement R d i) → Unit → Prop := - fun ⟨target, oStmt⟩ _ => ∑ x ∈ (univ.map D), (oStmt ()).1.eval x = target - -def outputRelation : R × (∀ i, OutputOracleStatement R d i) → Unit → Prop := - fun ⟨chal, oStmt⟩ _ => (oStmt (.inl ())).1.eval chal = (oStmt (.inr ())).1.eval chal - -@[reducible] -def pSpec : ProtocolSpec 2 := ![(.P_to_V, R⦃≤ d⦄[X]), (.V_to_P, R)] - -instance : IsSingleRound (pSpec R d) where - prover_first' := by simp [pSpec, getDir] - verifier_last' := by simp [pSpec, getDir, Neg.neg] - -instance instOracleInterfaceMessagePSpec : OracleInterface ((pSpec R d).Message default) := by - simp [pSpec, default] - exact instOracleInterfacePolynomialDegreeLE - -instance instVCVCompatibleChallengePSpec [VCVCompatible R] : - VCVCompatible ((pSpec R d).Challenge default) := by - simp [pSpec, Challenge, default] - infer_instance - -def prover : OracleProver (pSpec R d) []ₒ R Unit R Unit - (InputOracleStatement R d) (OutputOracleStatement R d) where - PrvState - | 0 => R⦃≤ d⦄[X] - | 1 => R⦃≤ d⦄[X] - | 2 => R⦃≤ d⦄[X] × R - - input := fun ⟨_, oStmt⟩ () => oStmt () - - sendMessage - | ⟨0, _⟩ => fun poly => pure ⟨poly, poly⟩ - | ⟨1, h⟩ => nomatch h - - receiveChallenge - | ⟨0, h⟩ => nomatch h - | ⟨1, _⟩ => fun poly chal => ⟨poly, chal⟩ - - output := fun ⟨poly, chal⟩ => ((chal, fun _ => poly), ()) - -variable [VCVCompatible R] - -def verifier : Verifier (pSpec R d) []ₒ (R × (∀ i, InputOracleStatement R d i)) - (R × (∀ i, OutputOracleStatement R d i)) where - verify := fun ⟨target, oStmt⟩ transcript => do - guard (∑ x ∈ (univ.map D), (transcript 0).1.eval x = target) - pure ⟨transcript 1, fun _ => oStmt ()⟩ - -def reduction : Reduction (pSpec R d) []ₒ (R × (∀ i, InputOracleStatement R d i)) Unit - (R × (∀ i, OutputOracleStatement R d i)) Unit where - prover := prover R d - verifier := verifier R d D - -open Reduction -open scoped NNReal - -instance : ∀ i, Fintype (OracleInterface.Response (Challenge (pSpec R d) i)) := by - sorry -instance : ∀ i, Inhabited (OracleInterface.Response (Challenge (pSpec R d) i)) := sorry -instance : [Challenge (pSpec R d)]ₒ.FiniteRange := inferInstance - -instance : Nonempty []ₒ.QueryLog := by simp [QueryLog]; infer_instance -instance : Nonempty ((pSpec R d).FullTranscript) := by - refine ⟨fun i => ?_⟩ - rcases i with _ | _ - · simp; exact default - · simp; exact default - -section simp_lemmas -- Some extra lemmas that still need to move to vcv - -universe u v w - -variable {ι : Type u} {spec : OracleSpec ι} {α β γ ω : Type u} - -@[simp] -lemma probFailure_bind_eq_zero_iff [spec.FiniteRange] - (oa : OracleComp spec α) (ob : α → OracleComp spec β) : - [⊥ | oa >>= ob] = 0 ↔ [⊥ | oa] = 0 ∧ ∀ x ∈ oa.support, [⊥ | ob x] = 0 := by - simp [probFailure_bind_eq_tsum, ← imp_iff_not_or] - -@[simp] -- TODO: more general version/class for query impls that never have failures -lemma loggingOracle.probFailure_simulateQ [spec.FiniteRange] (oa : OracleComp spec α) : - [⊥ | (simulateQ loggingOracle oa).run] = [⊥ | oa] := by - induction oa using OracleComp.induction with - | pure a => simp - | query_bind i t oa ih => simp [ih, probFailure_bind_eq_tsum] - | failure => simp - -@[simp] -lemma WriterT.run_map {m : Type u → Type v} [Monad m] [Monoid ω] - (x : WriterT ω m α) (f : α → β) : - (f <$> x).run = Prod.map f id <$> x.run := rfl - -@[simp] -lemma probFailure_liftComp {ι' : Type w} {superSpec : OracleSpec ι'} - [spec.FiniteRange] [superSpec.FiniteRange] - [h : MonadLift (OracleQuery spec) (OracleQuery superSpec)] - (oa : OracleComp spec α) : [⊥ | liftComp oa superSpec] = [⊥ | oa] := by - simp only [OracleComp.probFailure_def, OracleComp.evalDist_liftComp] - -@[simp] -lemma liftComp_support {ι' : Type w} {superSpec : OracleSpec ι'} - [h : MonadLift (OracleQuery spec) (OracleQuery superSpec)] - (oa : OracleComp spec α) : (liftComp oa superSpec).support = oa.support := by - induction oa using OracleComp.induction with - | pure a => simp - | query_bind i t oa ih => simp [ih] - | failure => simp - --- Stub lemma for now, will be available in the next VCVio update -lemma neverFails_map_iff' (oa : OracleComp spec α) (f : α → β) : - neverFails (f <$> oa) ↔ neverFails oa := by - rw [map_eq_bind_pure_comp] - simp [neverFails, neverFailsWhen, Function.comp_apply, implies_true, and_true] - -end simp_lemmas - -theorem perfect_completeness : (reduction R d D).perfectCompleteness - (inputRelation R d D) (outputRelation R d) := by - rw [perfectCompleteness_eq_prob_one] - intro ⟨target, oStmt⟩ () hValid - generalize h : oStmt () = p; obtain ⟨poly, hp⟩ := p - -- Need `convert` because of some duplicate instances, should eventually track those down - convert (probEvent_eq_one_iff _ _).2 ⟨?_, ?_⟩ - · simp only [Reduction.run, probFailure_bind_eq_zero_iff] - constructor - · simp -- There's still some pathing issue here w/ simp, need to simp in steps which is sub-par - unfold Prover.run Prover.runToRound Prover.processRound - simp [Fin.induction, Fin.induction.go, reduction, prover, neverFails_map_iff'] - · intro ⟨⟨stmt, oStmtOut⟩, _, transcript⟩ - simp -- Also some pathing issues, need to simp once before reducing `reduction` - simp [reduction, verifier, Verifier.run] - intro hSupport - simp [Prover.run, Prover.runToRound, Prover.processRound, reduction, prover] at hSupport - obtain ⟨h1, h2⟩ := hSupport - simp [← h2, Transcript.snoc, Fin.snoc, h] - simp [inputRelation, h] at hValid - exact hValid - · intro ⟨⟨⟨prvStmtOut, prvOStmtOut⟩, _⟩, verStmtOut, transcript⟩ hSupport - simp only [run, support_bind, liftM_eq_liftComp, Set.mem_iUnion, support_pure, - Set.mem_singleton_iff, Prod.eq_iff_fst_eq_snd_eq, true_and] at hSupport - obtain ⟨x1, hx1, ⟨x2_1, x2_2⟩, hx2, ⟨⟨⟨h2_1, h2_2⟩, _⟩, ⟨⟨h3_1, h3_2⟩, h3_3⟩⟩⟩ := hSupport - simp only [reduction, prover, Prover.run, Prover.runToRound] at hx1 - simp [Prover.processRound] at hx1 - obtain ⟨a, b, hab, hx1'⟩ := hx1 - simp only [Verifier.run, reduction, verifier] at hx2 - simp [liftComp_support, Transcript.snoc, Fin.snoc] at hx2 - obtain ⟨h1, h2, h3⟩ := hx2 - split; rename_i stuff prvStmtOut' _ verStmtOut' trans hEq - simp at hEq - obtain ⟨hPrvStmtOut, hVerStmtOut, hTranscript⟩ := hEq - have h4 : verStmtOut = ⟨x2_1, x2_2⟩ := Prod.ext h3_1 h3_2 - simp [outputRelation, ← hPrvStmtOut, ← hVerStmtOut, ← h3, h3_1, h3_2, h4, h2_1, h2_2, h2] - -end Simpler - --- The variables for sum-check -variable (R : Type) [CommSemiring R] (n : ℕ) (deg : ℕ) {m : ℕ} (D : Fin m ↪ R) - -/-- Statement for sum-check, parameterized by the ring `R`, the number of variables `n`, -and the round index `i : Fin (n + 2)` - -Note that when `i = Fin.last (n + 1)`, this is the output statement of the sum-check protocol -/ -structure Statement (i : Fin (n + 2)) where - -- The target value for sum-check - target : R - -- The challenges sent from the verifier to the prover from previous rounds - challenges : Fin i → R - -@[reducible] -def OracleStatement : Unit → Type := fun _ => R⦃≤ deg⦄[X Fin (n + 1)] - -/-- The sum-check relation for the `i`-th round, for `i ≤ n` -/ -def relation (i : Fin (n + 2)) : - (Statement R n i) × (∀ i, OracleStatement R n deg i) → Unit → Prop := - fun ⟨⟨target, challenges⟩, polyOracle⟩ _ => - ∑ x ∈ (univ.map D) ^ᶠ (n + 1 - i), (polyOracle ()).val ⸨challenges, x⸩ = target - -namespace SingleRound - -/-- Protocol specification for the `i`-th round of the sum-check protocol - -Consists of a message from prover to verifier of degree at most `deg`, and a message -from verifier to prover of a field element in `R`. -/ -@[reducible] -def pSpec : ProtocolSpec 2 := ![(.P_to_V, R⦃≤ deg⦄[X]), (.V_to_P, R)] - --- /-- Combination of the protocol specifications for all rounds -/ --- def pSpecCombined : ProtocolSpec ((n + 1) * 2) := --- (compose n (fun _ => 2) (fun _ => pSpec R deg)).cast (by simp) - -instance : IsSingleRound (pSpec R deg) where - prover_first' := by simp [pSpec, getDir] - verifier_last' := by simp [pSpec, getDir, Neg.neg] - -/-- Recognize that the (only) message from the prover to the verifier has type `R⦃≤ deg⦄[X]`, and - hence can be turned into an oracle for evaluating the polynomial -/ -instance instOracleInterfaceMessagePSpec : OracleInterface ((pSpec R deg).Message default) := by - simp only [pSpec, default, Matrix.cons_val_zero] - exact instOracleInterfacePolynomialDegreeLE - -/-- Recognize that the challenge from the verifier to the prover has type `R`, and hence can be - sampled uniformly at random -/ -instance instVCVCompatibleChallengePSpec [VCVCompatible R] : - VCVCompatible ((pSpec R deg).Challenge default) := by - simp only [pSpec, default, Matrix.cons_val_one, Matrix.head_cons] - infer_instance - -@[reducible] -def proverState (i : Fin (n + 1)) : ProverState 2 where - PrvState - | 0 => (Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i) - | 1 => (Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i) - | 2 => (Statement R n i.succ) × (∀ i, OracleStatement R n deg i) - -/-- Prover input for the `i`-th round of the sum-check protocol, where `i < n` -/ -def proverIn (i : Fin (n + 1)) : ProverIn - ((Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i)) - Unit ((proverState R n deg i).PrvState 0) where - input := fun x _ => x - -/-- Auxiliary lemma for proving that the polynomial sent by the honest prover is of degree at most - `deg` -/ -theorem sumcheck_roundPoly_degreeLE (i : Fin (n + 1)) {challenges : Fin i.castSucc → R} - {poly : R[X Fin (n + 1)]} (hp : poly ∈ R⦃≤ deg⦄[X Fin (n + 1)]) : - ∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨X ⦃i⦄, challenges, x⸩' - (by simp; omega) ∈ R⦃≤ deg⦄[X] := by - refine mem_degreeLE.mpr (le_trans (degree_sum_le ((univ.map D) ^ᶠ (n - i)) _) ?_) - simp only [Finset.sup_le_iff, Fintype.mem_piFinset, mem_map, mem_univ, true_and] - intro x hx - refine le_trans (degree_map_le) (natDegree_le_iff_degree_le.mp ?_) - rw [natDegree_finSuccEquivNth] - exact degreeOf_le_iff.mpr fun m a ↦ hp a i - -variable {ι : Type} (oSpec : OracleSpec ι) - -/-- Prover interaction for the `i`-th round of the sum-check protocol, where `i < n`. -/ -def proverRound (i : Fin (n + 1)) : ProverRound (pSpec R deg) oSpec where - PrvState := (proverState R n deg i).PrvState - - sendMessage - | ⟨0, _⟩ => fun state => - let ⟨⟨_, challenges⟩, oStmt⟩ := state - let ⟨poly, hp⟩ := oStmt 0 - pure ⟨ ⟨∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨X ⦃i⦄, challenges, x⸩'(by simp; omega), - sumcheck_roundPoly_degreeLE R n deg D i hp⟩, - state⟩ - | ⟨1, h⟩ => nomatch h - - receiveChallenge - | ⟨0, h⟩ => nomatch h - | ⟨1, _⟩ => fun ⟨⟨target, challenges⟩, oStmt⟩ chal => - let ⟨poly, hp⟩ := oStmt 0 - letI newChallenges : Fin i.succ → R := Fin.snoc challenges chal - letI newTarget := ∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨newChallenges, x⸩'(by simp; omega) - ⟨⟨newTarget, newChallenges⟩, fun _ => ⟨poly, hp⟩⟩ - -/-- Since there is no witness, the prover's output for each round `i < n` of the sum-check protocol - is trivial -/ -def proverOut (i : Fin (n + 1)) : ProverOut - (Statement R n i.succ × (∀ i, OracleStatement R n deg i)) Unit - ((proverState R n deg i).PrvState (Fin.last 2)) where - output := fun x => (x, ()) - -/-- The overall prover for the `i`-th round of the sum-check protocol, where `i < n`. This is only - well-defined for `n > 0`, since when `n = 0` there is no protocol. -/ -def prover (i : Fin (n + 1)) : OracleProver (pSpec R deg) oSpec - (Statement R n i.castSucc) Unit (Statement R n i.succ) Unit - (OracleStatement R n deg) (OracleStatement R n deg) where - toProverState := proverState R n deg i - toProverIn := proverIn R n deg i - sendMessage := (proverRound R n deg D oSpec i).sendMessage - receiveChallenge := (proverRound R n deg D oSpec i).receiveChallenge - toProverOut := proverOut R n deg i - -variable [VCVCompatible R] - -/-- The (non-oracle) verifier of the sum-check protocol for the `i`-th round, where `i < n + 1` -/ -def verifier (i : Fin (n + 1)) : Verifier (pSpec R deg) oSpec - ((Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i)) - (Statement R n i.succ × (∀ i, OracleStatement R n deg i)) where - verify := fun ⟨⟨target, challenges⟩, oStmt⟩ transcript => do - let ⟨p_i, _⟩ : R⦃≤ deg⦄[X] := transcript 0 - let r_i : R := transcript 1 - guard (∑ x ∈ (univ.map D), p_i.eval x = target) - pure ⟨⟨p_i.eval r_i, Fin.snoc challenges r_i⟩, oStmt⟩ - -/-- The oracle verifier for the `i`-th round, where `i < n + 1` -/ -def oracleVerifier (i : Fin (n + 1)) : OracleVerifier (pSpec R deg) oSpec - (Statement R n i.castSucc) (Statement R n i.succ) - (OracleStatement R n deg) (OracleStatement R n deg) where - -- Queries for the evaluations of the polynomial at all points in `D`, - -- plus one query for the evaluation at the challenge `r_i` - -- Check that the sum of the evaluations equals the target, and updates the statement accordingly - -- (the new target is the evaluation of the polynomial at the challenge `r_i`) - verify := fun ⟨target, challenges⟩ chal => do - let evals : List R ← (List.finRange m).mapM - (fun i => do - return ← query - (spec := (oSpec ++ₒ ([OracleStatement R n deg]ₒ ++ₒ [(pSpec R deg).Message]ₒ))) - (Sum.inr <| Sum.inr default) (D i)) - guard (evals.sum = target) - let newTarget ← query - (spec := (oSpec ++ₒ ([OracleStatement R n deg]ₒ ++ₒ [(pSpec R deg).Message]ₒ))) - (Sum.inr <| Sum.inr default) (by simpa only using chal default) - letI newTarget : R := by simpa only - pure ⟨newTarget, Fin.snoc challenges (chal default)⟩ - - embed := Function.Embedding.inl - - hEq := fun _ => rfl - -/-- The sum-check reduction for the `i`-th round, where `i < n` and `n > 0` -/ -def reduction (i : Fin (n + 1)) : Reduction (pSpec R deg) oSpec - ((Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i)) Unit - ((Statement R n i.succ) × (∀ i, OracleStatement R n deg i)) Unit := - .mk (prover R n deg D oSpec i) (verifier R n deg D oSpec i) - -/-- The sum-check oracle reduction for the `i`-th round, where `i < n` and `n > 0` -/ -def oracleReduction (i : Fin (n + 1)) : OracleReduction (pSpec R deg) oSpec - (Statement R n i.castSucc) Unit (Statement R n i.succ) Unit - (OracleStatement R n deg) (OracleStatement R n deg) where - prover := prover R n deg D oSpec i - verifier := oracleVerifier R n deg D oSpec i - -section Security - -open Reduction -open scoped NNReal - -variable {R : Type} [CommSemiring R] [VCVCompatible R] {n : ℕ} {deg : ℕ} {m : ℕ} {D : Fin m ↪ R} - {ι : Type} {oSpec : OracleSpec ι} {i : Fin (n + 1)} - -/-- The oracle verifier does the same thing as the non-oracle verifier -/ -theorem oracleVerifier_eq_verifier : - (oracleVerifier R n deg D oSpec i).toVerifier = verifier R n deg D oSpec i := by - simp [pSpec, OracleVerifier.toVerifier, - instOracleInterfaceMessagePSpec, id_eq, oracleVerifier, bind_pure, guard_eq, - Fin.val_succ, bind_pure_comp, simulateQ_bind, simulateQ_map, simulateQ_query, - map_pure, Prod.map_apply, Fin.coe_castSucc, Function.Embedding.inl_apply, - eq_mpr_eq_cast, cast_eq, map_bind, Functor.map_map, verifier, Fin.isValue, Matrix.cons_val_zero, - sum_map, Verifier.mk.injEq, simulateQ] - -- rw [← List.mapM'_eq_mapM] - funext stmt transcript - split; next x p_i hp_i hEq => - have : p_i = (transcript 0).1 := by simp only [hEq] - subst this - simp [default, FullTranscript.messages, FullTranscript.challenges, - instOracleInterfacePolynomialDegreeLE, Option.elimM] - sorry - -variable [DecidableEq ι] [oSpec.FiniteRange] - --- set_option trace.profiler true - -/-- Completeness theorem for sumcheck-/ -theorem perfect_completeness : OracleReduction.perfectCompleteness - (pSpec := pSpec R deg) (oSpec := oSpec) - (relation R n deg D i.castSucc) (relation R n deg D i.succ) - (oracleReduction R n deg D oSpec i) := by - simp only [OracleReduction.perfectCompleteness, perfectCompleteness_eq_prob_one, - eq_iff_iff, iff_true, probEvent_eq_one_iff, Prod.forall] - unfold relation oracleReduction - -- prover verifier Reduction.run - intro ⟨target, challenge⟩ oStmt _ hValid - simp at hValid - constructor - · simp [Reduction.run, Prover.run, Prover.runToRound]; sorry - -- simp only [pSpec, evalDist, eq_mp_eq_cast, reduction, prover, - -- proverIn, proverRound, eq_mpr_eq_cast, proverOut, verifier, Matrix.cons_val_zero, - -- sum_map, decide_eq_true_eq, Bool.decide_or, Bool.decide_eq_true, decide_not, - -- simulate', simulate, map_pure, bind_pure_comp, PMF.pure_bind, Function.comp_apply] - -- simp only [map_eq_bind_pure_comp, bind, pure, PMF.bind_bind, PMF.pure_bind, - -- Function.comp_apply, Function.uncurry_apply_pair, PMF.bind_apply, PMF.uniformOfFintype_apply, - -- PMF.pure_apply, eq_iff_iff, eq_mp_eq_cast, mul_ite, mul_one, mul_zero, iff_true] - -- simp [Reduction.run, Prover.run, Prover.runToRound] - sorry - -- by_cases hp : p = True - -- · simp [hp, hReject] - -- sorry - -- · simp at hp - -- simp [hp, hReject] - -- intro r - -- constructor - -- · simp_rw [Polynomial.eval_finset_sum _ _ _, ← hSum] - -- simp only [Bool.not_eq_eq_eq_not, Bool.not_false, decide_eq_true_eq] - -- sorry - -- · simp_rw [Polynomial.eval_finset_sum _ _ _] - -- sorry - -- -- at this point we have reduced to a purely polynomial problem - -/-- State function for round-by-round soundness -/ -def stateFunction (i : Fin (n + 1)) : Verifier.StateFunction (pSpec := pSpec R deg) (oSpec := oSpec) - (relation R n deg D i.castSucc).language (relation R n deg D i.succ).language - (verifier R n deg D oSpec i) where - toFun := fun m ⟨stmt, oStmt⟩ partialTranscript => match m with - -- If `m = 0` (e.g. the transcript is empty), returns whether - -- the statement satisfies the relation - | 0 => relation R n deg D i.castSucc ⟨stmt, oStmt⟩ () - -- If `m = 1`, so the transcript contains the new polynomial `p_i`, returns the above check, - -- and also whether `p_i` is as expected - | 1 => relation R n deg D i.castSucc ⟨stmt, oStmt⟩ () - ∧ (by simpa using partialTranscript ⟨0, by simp⟩ : R⦃≤ deg⦄[X]) = - ⟨∑ x ∈ (univ.map D) ^ᶠ (n - i), (oStmt 0).1 ⸨X ⦃i⦄, stmt.challenges, x⸩'(by simp; omega), - sumcheck_roundPoly_degreeLE R n deg D i (oStmt 0).2⟩ - -- If `m = 2`, so we get the full transcript, returns the above checks, and also whether the - -- updated statement satisfies the new relation - | 2 => relation R n deg D i.succ ⟨⟨stmt.target, - by simpa using Fin.snoc stmt.challenges (by simpa using partialTranscript ⟨1, by simp⟩ : R)⟩, - oStmt⟩ () - toFun_empty := fun stmt hStmt => by simp_all [Function.language] - toFun_next := fun m hDir => match m with - | 0 => fun stmt tr hFalse => by simp_all - | 1 => nomatch hDir - toFun_full := fun stmt tr hFalse => by - simp_all [Function.language] - -- intro stmt' oStmt log h () - -- simp [Verifier.run] at h - -- have h' : ⟨stmt', oStmt⟩ ∈ Prod.fst '' - -- (simulate loggingOracle ∅ ((verifier R n deg D oSpec i).verify stmt tr)).support := by - -- simp [h]; exact ⟨log, h⟩ - -- contrapose! h' - -- rw [← OracleComp.support_map] - -- simp [verifier] - -- let x := tr ⟨0, by simp⟩ - sorry - -/-- Trivial extractor since witness is `Unit` -/ -def rbrExtractor : RBRExtractor (pSpec R deg) oSpec (Statement R n i.castSucc) Unit := - fun _ _ _ _ => () - --- /-- Round-by-round knowledge soundness theorem for sumcheck -/ --- theorem rbr_knowledge_soundness : (oracleVerifier R n deg D oSpec i).rbrKnowledgeSoundness --- (relation R n deg D i.castSucc) (relation R n deg D i.succ) (stateFunction i) --- (fun _ => (deg : ℝ≥0) / Fintype.card R) := sorry - --- def rbrKnowledgeSoundness (relIn : StmtIn → WitIn → Prop) (relOut : StmtOut → WitOut → Prop) --- (verifier : Verifier pSpec oSpec StmtIn StmtOut) --- (stateFunction : StateFunction relOut.language verifier) --- (rbrKnowledgeError : pSpec.ChallengeIdx → ℝ≥0) : Prop := - -end Security - -end SingleRound - -namespace Combined - -/-! - We give the type signature & security guarantees for the whole sum-check protocol (so that we - could use it for other oracle reductions, e.g. Spartan, without needing to prove security of - sum-check first) --/ - -def pSpec : ProtocolSpec ((n + 1) * 2) := - fun i => if i % 2 = 0 then (.P_to_V, R⦃≤ deg⦄[X]) else (.V_to_P, R) - -instance : ∀ i, OracleInterface ((pSpec R deg n).Message i) := fun ⟨i, hDir⟩ => by - by_cases h : i % 2 = 0 - · simp [pSpec, h]; infer_instance - · simp [pSpec, h]; simp [MessageIdx, pSpec, h] at hDir - -instance [VCVCompatible R] : ∀ i, VCVCompatible ((pSpec R deg n).Challenge i) := fun ⟨i, hDir⟩ => by - by_cases h : i % 2 = 0 - · simp [pSpec, h]; simp [pSpec, h] at hDir - · simp [pSpec, h]; infer_instance - -def StmtIn := R - -@[reducible] -def OStmtIn : Unit → Type := fun _ => R⦃≤ deg⦄[X (Fin (n + 1))] - -def WitIn := Unit - -def StmtOut := R × (Fin (n + 1) → R) - -@[reducible] -def OStmtOut : Unit → Type := fun _ => R⦃≤ deg⦄[X (Fin (n + 1))] - -def WitOut := Unit - -def relIn : (StmtIn R) × (∀ i, OStmtIn R n deg i) → WitIn → Prop := - fun ⟨target, polyOracle⟩ _ => ∑ x ∈ (univ.map D) ^ᶠ (n + 1), (polyOracle ()).1 ⸨x⸩ = target - -def relOut : (StmtOut R n) × (∀ i, OStmtOut R n deg i) → WitOut → Prop := - fun ⟨⟨target, challenges⟩, polyOracle⟩ _ => (polyOracle ()).1 ⸨challenges⸩ = target - -def prover : OracleProver (pSpec R deg n) []ₒ (StmtIn R) WitIn (StmtOut R n) WitOut - (OStmtIn R n deg) (OStmtOut R n deg) := sorry - -def verifier : OracleVerifier (pSpec R deg n) []ₒ (StmtIn R) (StmtOut R n) - (OStmtIn R n deg) (OStmtOut R n deg) := sorry - -def reduction : OracleReduction (pSpec R deg n) []ₒ (StmtIn R) WitIn (StmtOut R n) WitOut - (OStmtIn R n deg) (OStmtOut R n deg) := - .mk (prover R n deg) (verifier R n deg) - -variable [VCVCompatible R] - -/-- Perfect completeness for the (full) sum-check protocol -/ -theorem reduction_complete : (reduction R n deg).perfectCompleteness - (relIn R n deg D) (relOut R n deg) := sorry - --- def stateFunction : Reduction.StateFunction (pSpec R deg n) []ₒ --- (relIn R n deg D) (relOut R n deg) - --- /-- Round-by-round knowledge soundness for the (full) sum-check protocol -/ --- theorem reduction_sound : - -end Combined - -end Spec - --- end for noncomputable section -end - -end Sumcheck diff --git a/ArkLib/ProofSystem/Sumcheck/Basic.lean b/ArkLib/ProofSystem/Sumcheck/Spec/General.lean similarity index 56% rename from ArkLib/ProofSystem/Sumcheck/Basic.lean rename to ArkLib/ProofSystem/Sumcheck/Spec/General.lean index 1c700b9b7..c7a567b19 100644 --- a/ArkLib/ProofSystem/Sumcheck/Basic.lean +++ b/ArkLib/ProofSystem/Sumcheck/Spec/General.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Quang Dao -/ -import ArkLib.ProofSystem.Sumcheck.SingleRound +import ArkLib.ProofSystem.Sumcheck.Spec.SingleRound /-! # The Sum-check Protocol @@ -21,7 +21,7 @@ that those implementations derive security from that of the abstract protocol. The sum-check protocol is parameterized by the following: - `R`: the underlying ring (for soundness, required to be finite and a domain) -- `n + 1 : ℕ+`: the number of variables (also number of rounds) +- `n : ℕ+`: the number of variables (also number of rounds) - `deg : ℕ`: the individual degree bound for the polynomial - `D : Fin m ↪ R`: the set of `m` evaluation points for each variable (for some `m`), represented as an injection `Fin m ↪ R`. The image of `D` as a finite subset of `R` is written as @@ -29,31 +29,31 @@ The sum-check protocol is parameterized by the following: - `oSpec : OracleSpec ι`: the set of underlying oracles (e.g. random oracles) that may be needed for other reductions. However, the sum-check protocol does _not_ use any oracles. -The sum-check relation has no witness. The statement for the `i`-th round, where `i : Fin (n + 2)`, +The sum-check relation has no witness. The statement for the `i`-th round, where `i : Fin (n + 1)`, contains: - `target : R`, which is the target value for sum-check - `challenges : Fin i → R`, which is the list of challenges sent from the verifier to the prover in previous rounds There is a single oracle statement, which is: -- `poly : MvPolynomial (Fin (n + 2)) R`, the multivariate polynomial that is summed over +- `poly : MvPolynomial (Fin n) R`, the multivariate polynomial that is summed over -The sum-check relation for the `i`-th round checks that: +The sum-check relation for the `i`-th round, where `i = 0, ..., n`, checks that: - `∑ x ∈ (univ.map D) ^ᶠ (n + 1 - i), poly ⸨challenges, x⸩ = target`. + `∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨challenges, x⸩ = target`. Note that the last statement (when `i = n`) is the output statement of the sum-check protocol. -For `i = 0, ..., n`, the `i`-th round of the sum-check protocol consists of the following: +For `i = 0, ..., n - 1`, the `i`-th round of the sum-check protocol consists of the following: 1. The prover sends a univariate polynomial `pᵢ ∈ R⦃≤ deg⦄[X]` of degree at most `deg`. If the prover is honest, then we have: - `pᵢ(X) = ∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨X ⦃i⦄, challenges, x⸩`. + `pᵢ(X) = ∑ x ∈ (univ.map D) ^ᶠ (n - i - 1), poly ⸨X ⦃i⦄, challenges, x⸩`. Here, `poly ⸨X ⦃i⦄, challenges, x⸩` is the polynomial `poly` evaluated at the concatenation of the prior challenges `challenges`, the `i`-th variable as the new indeterminate `X`, and the rest of - the values `x ∈ (univ.map D) ^ᶠ (n - i)`. + the values `x ∈ (univ.map D) ^ᶠ (n - i - 1)`. In the oracle protocol, this polynomial `pᵢ` is turned into an oracle for which the verifier can query for evaluations at arbitrary points. @@ -71,10 +71,6 @@ For `i = 0, ..., n`, the `i`-th round of the sum-check protocol consists of the ## Notes & TODOs -An annoying issue is that we need to index over `i : Fin (n + 2)`, not `i : Fin (n + 1)`. This is -because existing `Fin` functions works better with `n + 1` which is clearly positive, and not `i : -Fin (n + 1)` (which would imply `n > 0`, but this fact is not apparent). - Note that to represent sum-check as a series of IORs, we will need to implicitly constrain the degree of the polynomials via using subtypes, such as `Polynomial.degreeLE` and `MvPolynomial.degreeOf`. This is because the oracle verifier only gets oracle access to evaluating @@ -86,7 +82,7 @@ degree checks. There are some generalizations that we could consider later: -- Generalize to `degs : Fin (n + 2) → ℕ` and `domain : Fin (n + 2) → (Fin m ↪ R)`, e.g. can vary the +- Generalize to `degs : Fin n → ℕ` and `domain : Fin n → (Fin m ↪ R)`, e.g. can vary the degree bound and the summation domain for each variable - Generalize the challenges to come from a suitable subset of `R` (e.g. subtractive sets), and not @@ -112,56 +108,69 @@ noncomputable section namespace Spec -variable (R : Type) [CommSemiring R] (d : ℕ) {m : ℕ} (D : Fin m ↪ R) (n : ℕ) - -def pSpec : ProtocolSpec ((n + 1) * 2) := - fun i => if i % 2 = 0 then (.P_to_V, R⦃≤ d⦄[X]) else (.V_to_P, R) - -instance : ∀ i, OracleInterface ((pSpec R d n).Message i) := fun ⟨i, hDir⟩ => by - by_cases h : i % 2 = 0 - · simp [pSpec, h]; infer_instance - · simp [pSpec, h]; simp [MessageIdx, pSpec, h] at hDir +variable (R : Type) [CommSemiring R] (deg : ℕ) {m : ℕ} (D : Fin m ↪ R) (n : ℕ) -instance [VCVCompatible R] : ∀ i, VCVCompatible ((pSpec R d n).Challenge i) := fun ⟨i, hDir⟩ => by - by_cases h : i % 2 = 0 - · simp [pSpec, h]; simp [pSpec, h] at hDir - · simp [pSpec, h]; infer_instance - -def StmtIn := R +variable {ι : Type} (oSpec : OracleSpec ι) +/-- The protocol specification for the general sum-check protocol, which is the composition of the + single-round protocol specifications -/ @[reducible] -def OStmtIn : Unit → Type := fun _ => R⦃≤ d⦄[X (Fin (n + 1))] +def pSpec : ProtocolSpec (∑ _ : Fin n, 2) := + -- (∑ _ : Fin n, 2) + ProtocolSpec.seqCompose (fun _ => SingleRound.pSpec R deg) + -- n * 2 + -- fun i => if i % 2 = 0 then (.P_to_V, R⦃≤ d⦄[X]) else (.V_to_P, R) -def WitIn := Unit +-- instance : ∀ i, OracleInterface ((pSpec R d n).Message i) := fun ⟨i, hDir⟩ => by +-- by_cases h : i % 2 = 0 +-- · simp [pSpec, h]; infer_instance +-- · simp [pSpec, h]; simp [MessageIdx, pSpec, h] at hDir -def StmtOut := R × (Fin (n + 1) → R) +instance [VCVCompatible R] : ∀ i, VCVCompatible ((pSpec R deg n).Challenge i) := sorry +-- fun ⟨i, hDir⟩ => by +-- by_cases h : i % 2 = 0 +-- · simp [pSpec, h]; simp [pSpec, h] at hDir +-- · simp [pSpec, h]; infer_instance -@[reducible] -def OStmtOut : Unit → Type := fun _ => R⦃≤ d⦄[X (Fin (n + 1))] +-- Recall that the relations for the rounds have been defined in `SingleRound.lean` -def WitOut := Unit +-- def relIn : (StmtIn R) × (∀ i, OStmtIn R d n i) → WitIn → Prop := +-- fun ⟨target, polyOracle⟩ _ => ∑ x ∈ (univ.map D) ^ᶠ (n + 1), (polyOracle ()).val ⸨x⸩ = target -def relIn : (StmtIn R) × (∀ i, OStmtIn R d n i) → WitIn → Prop := - fun ⟨target, polyOracle⟩ _ => ∑ x ∈ (univ.map D) ^ᶠ (n + 1), (polyOracle ()).val ⸨x⸩ = target +-- def relOut : (StmtOut R n) × (∀ i, OStmtOut R d n i) → WitOut → Prop := +-- fun ⟨⟨target, challenges⟩, polyOracle⟩ _ => (polyOracle ()).1 ⸨challenges⸩ = target -def relOut : (StmtOut R n) × (∀ i, OStmtOut R d n i) → WitOut → Prop := - fun ⟨⟨target, challenges⟩, polyOracle⟩ _ => (polyOracle ()).1 ⸨challenges⸩ = target +-- def prover : OracleProver (pSpec R d n) oSpec +-- (Statement R n 0) Unit (Statement R n (.last (n + 1))) Unit +-- (OracleStatement R n d) (OracleStatement R n d) := sorry -def prover : OracleProver (pSpec R d n) []ₒ (StmtIn R) WitIn (StmtOut R n) WitOut - (OStmtIn R n d) (OStmtOut R n d) := sorry +-- def verifier : OracleVerifier (pSpec R d n) oSpec +-- (Statement R n 0) (Statement R n (.last (n + 1))) +-- (OracleStatement R n d) (OracleStatement R n d) := sorry -def verifier : OracleVerifier (pSpec R d n) []ₒ (StmtIn R) (StmtOut R n) - (OStmtIn R n d) (OStmtOut R n d) := sorry +variable [VCVCompatible R] + +@[reducible] +def reduction : Reduction oSpec + (Statement R n 0 × ∀ i, OracleStatement R n deg i) Unit + (Statement R n (.last n) × ∀ i, OracleStatement R n deg i) Unit + (pSpec R deg n) := + Reduction.seqCompose (oSpec := oSpec) + (Stmt := fun i => Statement R n i × (∀ j, OracleStatement R n deg j)) + (Wit := fun _ => Unit) + (pSpec := fun _ => SingleRound.pSpec R deg) + (SingleRound.reduction R n deg D oSpec) -def reduction : OracleReduction (pSpec R d n) []ₒ (StmtIn R) WitIn (StmtOut R n) WitOut - (OStmtIn R n d) (OStmtOut R n d) := - .mk (prover R d n) (verifier R d n) +-- TODO: define the oracle reduction version once we have defined `OracleReduction.seqCompose` -variable [VCVCompatible R] +variable [oSpec.FiniteRange] +-- Time-out for some reasons, will fix soon -- /-- Perfect completeness for the (full) sum-check protocol -/ --- theorem reduction_complete : (reduction R d n).perfectCompleteness --- (relIn R d D n) (relOut R d n) := sorry +-- theorem reduction_complete : (reduction R deg D n oSpec).perfectCompleteness +-- (relationRound R n deg D 0) (relationRound R n deg D (.last (n + 1))) := +-- Reduction.completeness_seqCompose (R := reduction R deg D n oSpec) +-- (fun _ => 0) (fun i => sorry) -- def stateFunction : Reduction.StateFunction (pSpec R deg n) []ₒ -- (relIn R n deg D) (relOut R n deg) diff --git a/ArkLib/ProofSystem/Sumcheck/Spec/SingleRound.lean b/ArkLib/ProofSystem/Sumcheck/Spec/SingleRound.lean new file mode 100644 index 000000000..9d807004e --- /dev/null +++ b/ArkLib/ProofSystem/Sumcheck/Spec/SingleRound.lean @@ -0,0 +1,678 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ + +import ArkLib.OracleReduction.Security.Basic +import ArkLib.OracleReduction.Composition.Sequential.General +import ArkLib.OracleReduction.LiftContext.OracleReduction +import ArkLib.Data.Fin.Basic +import ArkLib.ToVCVio.Lemmas + +/-! +# Single round of the Sum-check Protocol + +We define a single round of the sum-check protocol as a two-message oracle reduction, and prove that +it is perfect complete and round-by-round knowledge sound. Specification & security proofs of the +full sum-check protocol are given in `Basic.lean`, following our sequential composition results. + +## Protocol Description + +The sum-check protocol is parameterized by the following: +- `R`: the underlying ring (for soundness, required to be finite and a domain) +- `n + 1 : ℕ+`: the number of variables (also number of rounds) +- `deg : ℕ`: the individual degree bound for the polynomial +- `D : Fin m ↪ R`: the set of `m` evaluation points for each variable (for some `m`), represented as + an injection `Fin m ↪ R`. The image of `D` as a finite subset of `R` is written as + `Finset.univ.map D`. +- `oSpec : OracleSpec ι`: the set of underlying oracles (e.g. random oracles) that may be needed for + other reductions. However, the sum-check protocol does _not_ use any oracles. + +The sum-check relation has no witness. The statement for the `i`-th round, where `i : Fin (n + 1)`, + contains: +- `target : R`, which is the target value for sum-check +- `challenges : Fin i → R`, which is the list of challenges sent from the verifier to the prover in + previous rounds + +There is a single oracle statement, which is: +- `poly : MvPolynomial (Fin (n + 1)) R`, the multivariate polynomial that is summed over + +The sum-check relation for the `i`-th round checks that: + + `∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨challenges, x⸩ = target`. + +Note that the last statement (when `i = n`) is the output statement of the sum-check protocol. + +For `i = 0, ..., n - 1`, the `i`-th round of the sum-check protocol consists of the following: + +1. The prover sends a univariate polynomial `pᵢ ∈ R⦃≤ deg⦄[X]` of degree at most `deg`. If the + prover is honest, then we have: + + `pᵢ(X) = ∑ x ∈ (univ.map D) ^ᶠ (n - i - 1), poly ⸨X ⦃i⦄, challenges, x⸩`. + + Here, `poly ⸨X ⦃i⦄, challenges, x⸩` is the polynomial `poly` evaluated at the concatenation of the + prior challenges `challenges`, the `i`-th variable as the new indeterminate `X`, and the rest of + the values `x ∈ (univ.map D) ^ᶠ (n - i - 1)`. + + In the oracle protocol, this polynomial `pᵢ` is turned into an oracle for which the verifier can + query for evaluations at arbitrary points. + +2. The verifier then sends the `i`-th challenge `rᵢ` sampled uniformly at random from `R`. + +3. The (oracle) verifier then performs queries for the evaluations of `pᵢ` at all points in + `(univ.map D)`, and checks that: `∑ x in (univ.map D), pᵢ.eval x = target`. + + If the check fails, then the verifier outputs `failure`. + + Otherwise, it outputs a statement for the next round as follows: + - `target` is updated to `pᵢ.eval rᵢ` + - `challenges` is updated to the concatenation of the previous challenges and `rᵢ` + +## Simplification + +We may break this down further into two one-message oracle reductions. + +1. The first message from the prover to the verifier can be seen as invoking a ``virtual'' protocol + as follows: + +- `𝒫` holds some data `d` available as an oracle statement to `𝒱`, and wants to convince `𝒱` of + some predicate `pred` on `d`, expressible as an oracle computation leveraging the oracle + statement's query structure. +- `𝒫` sends `d'` to `𝒱` as an oracle message. `𝒱` directly checks `pred d'` by performing said + oracle computation on `d'` and outputs the result. + +2. The second message (a challenge) from the verifier to the prover can be seen as invoking a + ``virtual'' protocol as follows: + +- `𝒫` holds two data `d₁` and `d₂`, available as oracle statements, and wants to convince `𝒱` that + `d₁ = d₂`. +- `𝒱` sends a random query `q` to `𝒫`. It then checks that `oracle d₁ q = oracle d₂ q = r`, and + outputs `r` as the output statement. + +The virtual aspect is because of the substitution: `d = d' = s_i(X)`, where recall +`s_i(X) = ∑ x ∈ D^{n - i - 1}, p(r_0, ..., r_{i-1}, X, x)`. + +The predicate is that `∑ y ∈ D, s_i(y) = claim_i`. + +-/ + +namespace Sumcheck + +open Polynomial MvPolynomial OracleSpec OracleComp ProtocolSpec Finset + +noncomputable section + +namespace Spec + +-- The variables for sum-check +variable (R : Type) [CommSemiring R] (n : ℕ) (deg : ℕ) {m : ℕ} (D : Fin m ↪ R) + +/-- Statement for sum-check, parameterized by the ring `R`, the number of variables `n`, +and the round index `i : Fin (n + 1)` + +Note that when `i = Fin.last n`, this is the output statement of the sum-check protocol -/ +structure Statement (i : Fin (n + 1)) where + -- The target value for sum-check + target : R + -- The challenges sent from the verifier to the prover from previous rounds + challenges : Fin i → R + +/-- Oracle statement for sum-check, which is a multivariate polynomial over `n` variables of + individual degree at most `deg`, equipped with the poly evaluation oracle interface. -/ +@[reducible] +def OracleStatement : Unit → Type := fun _ => R⦃≤ deg⦄[X Fin n] + +/-- The sum-check relation for the `i`-th round, for `i ≤ n` -/ +def relationRound (i : Fin (n + 1)) : + Set (((Statement R n i) × (∀ i, OracleStatement R n deg i)) × Unit) := + { ⟨⟨⟨target, challenges⟩, polyOracle⟩, _⟩ | + ∑ x ∈ (univ.map D) ^ᶠ (n - i), (polyOracle ()).val ⸨challenges, x⸩ = target } + +namespace SingleRound + +namespace Simple + +-- Let's try to simplify a single round of sum-check, and appeal to compositionality to lift +-- the result to the full protocol. + +-- In this simplified setting, the sum-check protocol consists of a _univariate_ polynomial +-- `p : R⦃≤ d⦄[X]` of degree at most `d`, and the relation is that +-- `∑ x ∈ univ.map D, p.eval x = newTarget`. + +-- We further break it down into each message: +-- In order of (witness, oracle statement, public statement ; relation): +-- (∅, p : R⦃≤ d⦄[X], old_claim : R ; ∑ x ∈ univ.map D, p.eval x = old_claim) =>[Initial Context] +-- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], old_claim : R ; +-- ∑ x ∈ univ.map D, q.eval x = old_claim ; p = q) =>[Send Claim] (note replaced `p` with `q`) +-- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], old_claim : R ; p = q) =>[Check Claim] +-- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], ∅ ; p = q) =>[Reduce Claim] +-- (∅, (p, q) : R⦃≤ d⦄[X] × R⦃≤ d⦄[X], r : R ; p.eval r = q.eval r) =>[Random Query] +-- (∅, p : R⦃≤ d⦄[X], new_claim : R ; ∑ x ∈ univ.map D, p.eval x = new_claim) =>[Reduce Claim] + +-- Doesn't seem worth it for `Stmt{In/Out}`? Need to write `StmtIn R` and `StmtOut R` everywhere +-- instead of just `R` +@[reducible, simp] +def StmtIn : Type := R + +@[reducible, simp] +def StmtOut : Type := R × R + +@[reducible, simp] +def OStmtIn : Unit → Type := fun _ => R⦃≤ deg⦄[X] + +@[reducible, simp] +def OStmtOut : Unit → Type := fun _ => R⦃≤ deg⦄[X] + +def inputRelation : Set ((StmtIn R × (∀ i, OStmtIn R deg i)) × Unit) := + { ⟨⟨target, oStmt⟩, _⟩ | ∑ x ∈ (univ.map D), (oStmt ()).1.eval x = target } + +def outputRelation : Set ((StmtOut R × (∀ i, OStmtOut R deg i)) × Unit) := + { ⟨⟨⟨newTarget, chal⟩, oStmt⟩, _⟩ | (oStmt ()).1.eval chal = newTarget } + +@[reducible] +def pSpec : ProtocolSpec 2 := ![(.P_to_V, R⦃≤ deg⦄[X]), (.V_to_P, R)] + +instance : IsSingleRound (pSpec R deg) where + prover_first' := by simp [pSpec] + verifier_last' := by simp [pSpec] + +instance instOracleInterfaceMessagePSpec : OracleInterface ((pSpec R deg).Message default) := by + simp [pSpec, default] + exact instOracleInterfacePolynomialDegreeLE + +instance instVCVCompatibleChallengePSpec [VCVCompatible R] : + VCVCompatible ((pSpec R deg).Challenge default) := by + simp [pSpec, Challenge, default] + infer_instance + +variable {ι : Type} (oSpec : OracleSpec ι) + +/-- The prover in the simple description of a single round of sum-check. + + Takes in input `target : R` and `poly : R⦃≤ deg⦄[X]`, and: + - Sends a message `poly' := poly` to the verifier + - Receive `chal` from the verifier + - Outputs `(newTarget, chal) : R × R`, where `newTarget := poly.eval chal` +-/ +def prover : OracleProver oSpec (StmtIn R) (OStmtIn R deg) Unit (StmtOut R) (OStmtOut R deg) Unit + (pSpec R deg) where + PrvState + | 0 => R⦃≤ deg⦄[X] + | 1 => R⦃≤ deg⦄[X] + | 2 => R⦃≤ deg⦄[X] × R + + input := fun ⟨⟨_, oStmt⟩, _⟩ => oStmt () + + sendMessage + | ⟨0, _⟩ => fun polyLE => pure ⟨polyLE, polyLE⟩ + | ⟨1, h⟩ => nomatch h + + receiveChallenge + | ⟨0, h⟩ => nomatch h + | ⟨1, _⟩ => fun polyLE chal => ⟨polyLE, chal⟩ + + output := fun ⟨polyLE, chal⟩ => (((polyLE.val.eval chal, chal), fun _ => polyLE), ()) + +variable [VCVCompatible R] + +/-- The verifier for the simple description of a single round of sum-check -/ +def verifier : Verifier oSpec (StmtIn R × (∀ i, OStmtIn R deg i)) + (StmtOut R × (∀ i, OStmtOut R deg i)) (pSpec R deg) where + verify := fun ⟨target, oStmt⟩ transcript => do + letI polyLE := transcript 0 + guard (∑ x ∈ (univ.map D), polyLE.val.eval x = target) + letI chal := transcript 1 + pure ⟨⟨(oStmt ()).val.eval chal, chal⟩, fun _ => oStmt ()⟩ + +/-- The reduction for the simple description of a single round of sum-check -/ +def reduction : Reduction oSpec (StmtIn R × (∀ i, OStmtIn R deg i)) Unit + (StmtOut R × (∀ i, OStmtOut R deg i)) Unit (pSpec R deg) where + prover := prover R deg oSpec + verifier := verifier R deg D oSpec + +open Function in +def oracleVerifier : OracleVerifier oSpec (StmtIn R) (OStmtIn R deg) (StmtOut R) (OStmtOut R deg) + (pSpec R deg) where + verify := fun target chal => do + let evals : Vector R m ← (Vector.finRange m).mapM + (fun i => query (spec := [OStmtIn R deg]ₒ) () (D i)) + guard (evals.sum = target) + -- Needs to convert `evals` to `R⦃≤ deg⦄[X]`, and then evaluate at `chal` + pure (sorry, chal default) + embed := .inl + hEq := fun i => by simp [pSpec] + +def oracleReduction : OracleReduction oSpec (StmtIn R) (OStmtIn R deg) Unit + (StmtOut R) (OStmtOut R deg) Unit (pSpec R deg) where + prover := prover R deg oSpec + verifier := oracleVerifier R deg D oSpec + +open Reduction +open scoped NNReal + +instance : ∀ i, VCVCompatible (OracleInterface.Response (Challenge (pSpec R deg) i)) + | ⟨1, _⟩ => by dsimp [pSpec, OracleInterface.Response]; infer_instance +instance : [Challenge (pSpec R deg)]ₒ.FiniteRange := inferInstance + +-- instance : Nonempty []ₒ.QueryLog := by simp [QueryLog]; infer_instance +-- instance : Nonempty ((pSpec R deg).FullTranscript) := by +-- refine ⟨fun i => ?_⟩ +-- rcases i with _ | _ +-- · simp; exact default +-- · simp; exact default + +-- TODO: show that the oracle verifier reduces to the (non-oracle) verifier +theorem oracleVerifier_eq_verifier : + (oracleVerifier R deg D oSpec).toVerifier = verifier R deg D oSpec := by + ext + simp [OracleVerifier.toVerifier, verifier, OracleInterface.simOracle2] + sorry + +/-- The oracle reduction is equivalent to the non-oracle reduction -/ +theorem oracleReduction_eq_reduction : + (oracleReduction R deg D oSpec).toReduction = reduction R deg D oSpec := by + ext : 1 <;> + simp [OracleReduction.toReduction, oracleReduction, reduction, oracleVerifier_eq_verifier] + +variable [oSpec.FiniteRange] + +/-- Perfect completeness for the (non-oracle) reduction -/ +theorem reduction_completeness : (reduction R deg D oSpec).perfectCompleteness + (inputRelation R deg D) (outputRelation R deg) := by + rw [perfectCompleteness_eq_prob_one] + intro ⟨target, oStmt⟩ () hValid + generalize h : oStmt () = p; obtain ⟨poly, hp⟩ := p + -- Need `convert` because of some duplicate instances, should eventually track those down + convert (probEvent_eq_one_iff _ _).2 ⟨?_, ?_⟩ + · simp only [Reduction.run, probFailure_bind_eq_zero_iff] + constructor + · simp -- There's still some pathing issue here w/ simp, need to simp in steps which is sub-par + unfold Prover.run Prover.runToRound Prover.processRound + simp [Fin.induction, Fin.induction.go, reduction, prover] + · stop + intro ⟨⟨stmt, oStmtOut⟩, _, transcript⟩ + simp -- Also some pathing issues, need to simp once before reducing `reduction` + simp [reduction, verifier, Verifier.run] + intro hSupport + simp [Prover.run, Prover.runToRound, Prover.processRound, reduction, prover] at hSupport + obtain ⟨h1, h2⟩ := hSupport + simp [← h2, Transcript.concat, Fin.snoc, h] + simp [inputRelation, h] at hValid + exact hValid + · intro ⟨⟨⟨prvStmtOut, prvOStmtOut⟩, _⟩, verStmtOut, transcript⟩ hSupport + simp only [run, support_bind, liftM_eq_liftComp, Set.mem_iUnion, support_pure, + Set.mem_singleton_iff, Prod.eq_iff_fst_eq_snd_eq] at hSupport + obtain ⟨x1, hx1, x2_1, hx2, ⟨⟨⟨h2_1, h2_2⟩, _⟩, ⟨⟨h3_1, h3_2⟩, h3_3⟩⟩⟩ := hSupport + simp only [reduction, prover, Prover.run, Prover.runToRound] at hx1 + simp [Prover.processRound] at hx1 + obtain ⟨a, b, hab, hx1'⟩ := hx1 + simp only [Verifier.run, reduction, verifier] at hx2 + simp [Transcript.concat, Fin.snoc] at hx2 + obtain ⟨h1, h2, h3⟩ := hx2 + split; rename_i stuff prvStmtOut' _ verStmtOut' trans hEq + simp at hEq + obtain ⟨hPrvStmtOut, hVerStmtOut, hTranscript⟩ := hEq + simp only [outputRelation, ← hVerStmtOut, StmtOut, OStmtOut, ← hPrvStmtOut, h2_2] + aesop + +/-- Perfect completeness for the oracle reduction -/ +theorem oracleReduction_completeness : (oracleReduction R deg D oSpec).perfectCompleteness + (inputRelation R deg D) (outputRelation R deg) := by + unfold OracleReduction.perfectCompleteness + rw [oracleReduction_eq_reduction] + exact reduction_completeness R deg D oSpec + +/-- Round-by-round knowledge soundness for the verifier -/ +theorem verifier_rbr_knowledge_soundness : (verifier R deg D oSpec).rbrKnowledgeSoundness + (inputRelation R deg D) (outputRelation R deg) (fun _ => (deg : ℝ≥0) / (Fintype.card R)) := by + sorry + +/-- Round-by-round knowledge soundness for the oracle verifier -/ +theorem oracleVerifier_rbr_knowledge_soundness : + (oracleVerifier R deg D oSpec).rbrKnowledgeSoundness + (inputRelation R deg D) (outputRelation R deg) (fun _ => (deg : ℝ≥0) / (Fintype.card R)) := by + sorry + +theorem oracleReduction_rbr_knowledge_soundness : + (oracleVerifier R deg D oSpec).rbrKnowledgeSoundness + (inputRelation R deg D) (outputRelation R deg) (fun _ => (deg : ℝ≥0) / (Fintype.card R)) := by + unfold OracleVerifier.rbrKnowledgeSoundness + rw [oracleVerifier_eq_verifier] + exact verifier_rbr_knowledge_soundness R deg D oSpec + +-- TODO: break down the oracle reduction into a series of oracle reductions as stated above + +end Simple + +/-- Protocol specification for the `i`-th round of the sum-check protocol + +Consists of a message from prover to verifier of degree at most `deg`, and a message +from verifier to prover of a field element in `R`. -/ +@[reducible] +def pSpec : ProtocolSpec 2 := ![(.P_to_V, R⦃≤ deg⦄[X]), (.V_to_P, R)] + +instance : IsSingleRound (pSpec R deg) where + prover_first' := by simp [pSpec] + verifier_last' := by simp [pSpec] + +/-- Recognize that the (only) message from the prover to the verifier has type `R⦃≤ deg⦄[X]`, and + hence can be turned into an oracle for evaluating the polynomial -/ +instance instOracleInterfaceMessagePSpec : OracleInterface ((pSpec R deg).Message default) := by + simp only [pSpec, default] + exact instOracleInterfacePolynomialDegreeLE + +/-- Recognize that the challenge from the verifier to the prover has type `R`, and hence can be + sampled uniformly at random -/ +instance instVCVCompatibleChallengePSpec [VCVCompatible R] : + VCVCompatible ((pSpec R deg).Challenge default) := by + simp only [pSpec, default] + infer_instance + +/-- Auxiliary lemma for proving that the polynomial sent by the honest prover is of degree at most + `deg` -/ +theorem sumcheck_roundPoly_degreeLE (i : Fin (n + 1)) {challenges : Fin i.castSucc → R} + {poly : R[X Fin (n + 1)]} (hp : poly ∈ R⦃≤ deg⦄[X Fin (n + 1)]) : + ∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨X ⦃i⦄, challenges, x⸩' + (by simp; omega) ∈ R⦃≤ deg⦄[X] := by + refine mem_degreeLE.mpr (le_trans (degree_sum_le ((univ.map D) ^ᶠ (n - i)) _) ?_) + simp only [Finset.sup_le_iff, Fintype.mem_piFinset, mem_map, mem_univ, true_and] + intro x hx + refine le_trans (degree_map_le) (natDegree_le_iff_degree_le.mp ?_) + rw [natDegree_finSuccEquivNth] + exact degreeOf_le_iff.mpr fun m a ↦ hp a i + +/-- The oracle statement lens that connect the simple to the full single-round sum-check protocol + +For `n = 0`, since `poly : R[X Fin 0]` is just a constant, we need to embed it as a constant poly. + +For other `n := n + 1`, we proceed with the sum `∑ x ∈ D ^ (n - i), poly ⸨challenges, X, x⸩` -/ +def oStmtLens (i : Fin n) : OracleStatement.Lens + (Statement R n i.castSucc) (Statement R n i.succ) (Simple.StmtIn R) (Simple.StmtOut R) + (OracleStatement R n deg) (OracleStatement R n deg) + (Simple.OStmtIn R deg) (Simple.OStmtOut R deg) where + + toFunA := fun ⟨⟨target, challenges⟩, oStmt⟩ => + ⟨target, fun _ => + match h : n with + | 0 => ⟨Polynomial.C <| MvPolynomial.isEmptyAlgEquiv R (Fin 0) (oStmt ()), by + rw [Polynomial.mem_degreeLE]; exact le_trans Polynomial.degree_C_le (by simp)⟩ + | n + 1 => + ⟨∑ x ∈ (univ.map D) ^ᶠ (n - i), (oStmt ()).val ⸨X ⦃i⦄, challenges, x⸩'(by simp; omega), + sumcheck_roundPoly_degreeLE R n deg D i (oStmt ()).property⟩⟩ + + toFunB := fun ⟨⟨_oldTarget, challenges⟩, oStmt⟩ ⟨⟨newTarget, chal⟩, oStmt'⟩ => + ⟨⟨newTarget, Fin.snoc challenges chal⟩, oStmt⟩ + +@[simp] +def oCtxLens (i : Fin n) : OracleContext.Lens + (Statement R n i.castSucc) (Statement R n i.succ) (Simple.StmtIn R) (Simple.StmtOut R) + (OracleStatement R n deg) (OracleStatement R n deg) + (Simple.OStmtIn R deg) (Simple.OStmtOut R deg) + Unit Unit Unit Unit where + wit := Witness.Lens.trivial + stmt := oStmtLens R n deg D i + +@[simp] +def extractorLens (i : Fin n) : Extractor.Lens + (Statement R n i.castSucc × (∀ i, OracleStatement R n deg i)) + (Statement R n i.succ × (∀ i, OracleStatement R n deg i)) + (Simple.StmtIn R × (∀ i, Simple.OStmtIn R deg i)) + (Simple.StmtOut R × (∀ i, Simple.OStmtOut R deg i)) + Unit Unit Unit Unit where + stmt := oStmtLens R n deg D i + wit := Witness.InvLens.trivial + +variable {ι : Type} (oSpec : OracleSpec ι) [VCVCompatible R] + +/-- The sum-check reduction for the `i`-th round, where `i < n` and `n > 0` -/ +def reduction (i : Fin n) : Reduction oSpec + ((Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i)) Unit + ((Statement R n i.succ) × (∀ i, OracleStatement R n deg i)) Unit (pSpec R deg) := + (Simple.reduction R deg D oSpec).liftContext (oCtxLens R n deg D i).toContext + +/-- The sum-check oracle reduction for the `i`-th round, where `i < n` and `n > 0` -/ +def oracleReduction (i : Fin n) : OracleReduction oSpec + (Statement R n i.castSucc) (OracleStatement R n deg) Unit + (Statement R n i.succ) (OracleStatement R n deg) Unit (pSpec R deg) := + (Simple.oracleReduction R deg D oSpec).liftContext (oCtxLens R n deg D i) + +section Security + +open Reduction +open scoped NNReal + +variable {R : Type} [CommSemiring R] [VCVCompatible R] {n : ℕ} {deg : ℕ} {m : ℕ} {D : Fin m ↪ R} + {ι : Type} {oSpec : OracleSpec ι} (i : Fin n) + +-- Showing that the lenses satisfy the completeness and rbr knowledge soundness conditions + +instance oCtxLens_complete : + (oCtxLens R n deg D i).toContext.IsComplete + (relationRound R n deg D i.castSucc) (Simple.inputRelation R deg D) + (relationRound R n deg D i.succ) (Simple.outputRelation R deg) + ((Simple.oracleReduction R deg D oSpec).toReduction.compatContext + (oCtxLens R n deg D i).toContext) +where + proj_complete := by + simp [relationRound, Simple.inputRelation] + unfold oStmtLens + induction n with + | zero => exact Fin.elim0 i + | succ n ih => + intro stmt oStmt hRelIn + simp [← hRelIn] + -- Now it's a statement about polynomials + sorry + lift_complete := by + simp [relationRound] + unfold compatContext oStmtLens + -- simp + -- induction n with + -- | zero => exact Fin.elim0 i + -- | succ n ih => + -- simp + sorry + +instance extractorLens_rbr_knowledge_soundness : + Extractor.Lens.IsKnowledgeSound + (relationRound R n deg D i.castSucc) (Simple.inputRelation R deg D) + (relationRound R n deg D i.succ) (Simple.outputRelation R deg) + ((Simple.oracleVerifier R deg D oSpec).toVerifier.compatStatement (oStmtLens R n deg D i)) + (fun _ _ => True) + ⟨oStmtLens R n deg D i, Witness.InvLens.trivial⟩ where + proj_knowledgeSound := by + simp [relationRound, Simple.outputRelation, Verifier.compatStatement, + Simple.oracleVerifier_eq_verifier, Simple.verifier, Verifier.run] + lift_knowledgeSound := by + simp [relationRound, Simple.inputRelation, Statement.Lens.proj] + unfold oStmtLens + induction n with + | zero => exact Fin.elim0 i + | succ n ih => + intro stmt oStmt hRelIn + simp at hRelIn ⊢ + -- Now it's a statement about polynomials + sorry + + +variable [oSpec.FiniteRange] + +theorem reduction_completeness : (reduction R n deg D oSpec i).perfectCompleteness + (relationRound R n deg D i.castSucc) (relationRound R n deg D i.succ) := sorry + +theorem verifier_rbr_knowledge_soundness : + (reduction R n deg D oSpec i).verifier.rbrKnowledgeSoundness + (relationRound R n deg D i.castSucc) (relationRound R n deg D i.succ) + (fun _ => (deg : ℝ≥0) / Fintype.card R) := sorry + +/-- Completeness theorem for single-round of sum-check, obtained by transporting the completeness +proof for the simplified version -/ +theorem oracleReduction_completeness : (oracleReduction R n deg D oSpec i).perfectCompleteness + (relationRound R n deg D i.castSucc) (relationRound R n deg D i.succ) := + OracleReduction.liftContext_perfectCompleteness + (lens := oCtxLens R n deg D i) + (lensComplete := oCtxLens_complete i) + (Simple.oracleReduction_completeness R deg D oSpec) + +/-- Round-by-round knowledge soundness theorem for single-round of sum-check, obtained by + transporting the knowledge soundness proof for the simplified version -/ +theorem oracleVerifier_rbr_knowledge_soundness : + (oracleReduction R n deg D oSpec i).verifier.rbrKnowledgeSoundness + (relationRound R n deg D i.castSucc) (relationRound R n deg D i.succ) + (fun _ => (deg : ℝ≥0) / Fintype.card R) := + OracleVerifier.liftContext_rbr_knowledgeSoundness + (stmtLens := oStmtLens R n deg D i) + (witLens := Witness.InvLens.trivial) + (Simple.oracleVerifier R deg D oSpec) + (lensKS := extractorLens_rbr_knowledge_soundness i) + (Simple.oracleVerifier_rbr_knowledge_soundness R deg D oSpec) + +-- /-- State function for round-by-round soundness. No need for this manual definition -/ +-- def stateFunction (i : Fin (n + 1)) : Verifier.StateFunction pSpec oSpec +-- (relationRound R n deg D i.castSucc).language (relationRound R n deg D i.succ).language +-- (reduction R n deg D oSpec i).verifier where +-- toFun := fun m ⟨stmt, oStmt⟩ partialTranscript => match m with +-- -- If `m = 0` (e.g. the transcript is empty), returns whether +-- -- the statement satisfies the relation +-- | 0 => relationRound R n deg D i.castSucc ⟨stmt, oStmt⟩ () +-- -- If `m = 1`, so the transcript contains the new polynomial `p_i`, returns the above check, +-- -- and also whether `p_i` is as expected +-- | 1 => relationRound R n deg D i.castSucc ⟨stmt, oStmt⟩ () +-- ∧ (by simpa using partialTranscript ⟨0, by simp⟩ : R⦃≤ deg⦄[X]) = +-- ⟨∑ x ∈ (univ.map D) ^ᶠ (n - i), (oStmt 0).1 ⸨X ⦃i⦄, stmt.challenges, x⸩'(by simp; omega), +-- sumcheck_roundPoly_degreeLE R n deg D i (oStmt 0).2⟩ +-- -- If `m = 2`, so we get the full transcript, returns the above checks, and also whether the +-- -- updated statement satisfies the new relation +-- | 2 => relationRound R n deg D i.succ ⟨⟨stmt.target, +-- by simpa using +-- Fin.snoc stmt.challenges (by simpa using partialTranscript ⟨1, by simp⟩ : R)⟩, +-- oStmt⟩ () +-- toFun_empty := fun stmt hStmt => by simp_all [Function.language] +-- toFun_next := fun m hDir => match m with +-- | 0 => fun stmt tr hFalse => by simp_all +-- | 1 => nomatch hDir +-- toFun_full := fun stmt tr hFalse => by +-- simp_all [Function.language] +-- -- intro stmt' oStmt log h () +-- -- simp [Verifier.run] at h +-- -- have h' : ⟨stmt', oStmt⟩ ∈ Prod.fst '' +-- -- (simulate loggingOracle ∅ ((verifier R n deg D oSpec i).verify stmt tr)).support := by +-- -- simp [h]; exact ⟨log, h⟩ +-- -- contrapose! h' +-- -- rw [← OracleComp.support_map] +-- -- simp [verifier] +-- -- let x := tr ⟨0, by simp⟩ +-- sorry + +-- /-- Trivial extractor since witness is `Unit` -/ +-- def rbrExtractor : Extractor.RoundByRound (pSpec R deg) oSpec (Statement R n i.castSucc) Unit := +-- fun _ _ _ _ => () + +end Security + +namespace Unfolded + +-- The rest of the below are for equivalence checking. We have deduced the construction & security +-- of the single round protocol from its simplified version via context lifting. + +@[reducible] +def proverState (i : Fin n) : ProverState 2 where + PrvState + | 0 => (Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i) + | 1 => (Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i) + | 2 => (Statement R n i.succ) × (∀ i, OracleStatement R n deg i) + +/-- Prover input for the `i`-th round of the sum-check protocol, where `i < n` -/ +def proverIn (i : Fin n) : ProverIn + ((Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i)) + Unit ((proverState R n deg i).PrvState 0) where + input := Prod.fst + +/-- Prover interaction for the `i`-th round of the sum-check protocol, where `i < n`. -/ +def proverRound (i : Fin n) : ProverRound oSpec (pSpec R deg) where + PrvState := (proverState R n deg i).PrvState + + sendMessage + | ⟨0, _⟩ => fun state => + match n with + | 0 => sorry + | n + 1 => + let ⟨⟨_, challenges⟩, oStmt⟩ := state + let ⟨poly, hp⟩ := oStmt 0 + pure ⟨ ⟨∑ x ∈ (univ.map D) ^ᶠ (n - i), poly ⸨X ⦃i⦄, challenges, x⸩'(by simp; omega), + sumcheck_roundPoly_degreeLE R n deg D i hp⟩, + state⟩ + | ⟨1, h⟩ => nomatch h + + receiveChallenge + | ⟨0, h⟩ => nomatch h + | ⟨1, _⟩ => fun ⟨⟨target, challenges⟩, oStmt⟩ chal => + let ⟨poly, hp⟩ := oStmt 0 + letI newChallenges : Fin i.succ → R := Fin.snoc challenges chal + letI newTarget := ∑ x ∈ (univ.map D) ^ᶠ (n - i - 1), poly ⸨newChallenges, x⸩'(by simp; omega) + ⟨⟨newTarget, newChallenges⟩, fun _ => ⟨poly, hp⟩⟩ + +/-- Since there is no witness, the prover's output for each round `i < n` of the sum-check protocol + is trivial -/ +def proverOut (i : Fin n) : ProverOut + (Statement R n i.succ × (∀ i, OracleStatement R n deg i)) Unit + ((proverState R n deg i).PrvState (Fin.last 2)) where + output := fun x => (x, ()) + +/-- The overall prover for the `i`-th round of the sum-check protocol, where `i < n`. This is only + well-defined for `n > 0`, since when `n = 0` there is no protocol. -/ +def prover (i : Fin n) : OracleProver oSpec + (Statement R n i.castSucc) (OracleStatement R n deg) Unit + (Statement R n i.succ) (OracleStatement R n deg) Unit (pSpec R deg) where + toProverState := proverState R n deg i + toProverIn := proverIn R n deg i + sendMessage := (proverRound R n deg D oSpec i).sendMessage + receiveChallenge := (proverRound R n deg D oSpec i).receiveChallenge + toProverOut := proverOut R n deg i + +/-- The (non-oracle) verifier of the sum-check protocol for the `i`-th round, where `i < n + 1` -/ +def verifier (i : Fin n) : Verifier oSpec + ((Statement R n i.castSucc) × (∀ i, OracleStatement R n deg i)) + (Statement R n i.succ × (∀ i, OracleStatement R n deg i)) (pSpec R deg) where + verify := fun ⟨⟨target, challenges⟩, oStmt⟩ transcript => do + let ⟨p_i, _⟩ : R⦃≤ deg⦄[X] := transcript 0 + let r_i : R := transcript 1 + guard (∑ x ∈ (univ.map D), p_i.eval x = target) + pure ⟨⟨p_i.eval r_i, Fin.snoc challenges r_i⟩, oStmt⟩ + +/-- The oracle verifier for the `i`-th round, where `i < n + 1` -/ +def oracleVerifier (i : Fin n) : OracleVerifier oSpec + (Statement R n i.castSucc) (OracleStatement R n deg) + (Statement R n i.succ) (OracleStatement R n deg) (pSpec R deg) where + -- Queries for the evaluations of the polynomial at all points in `D`, + -- plus one query for the evaluation at the challenge `r_i` + -- Check that the sum of the evaluations equals the target, and updates the statement accordingly + -- (the new target is the evaluation of the polynomial at the challenge `r_i`) + verify := fun ⟨target, challenges⟩ chal => do + let evals : List R ← (List.finRange m).mapM + (fun i => do + return ← query + (spec := (oSpec ++ₒ ([OracleStatement R n deg]ₒ ++ₒ [(pSpec R deg).Message]ₒ))) + (Sum.inr <| Sum.inr default) (D i)) + guard (evals.sum = target) + let newTarget ← query + (spec := (oSpec ++ₒ ([OracleStatement R n deg]ₒ ++ₒ [(pSpec R deg).Message]ₒ))) + (Sum.inr <| Sum.inr default) (by simpa only using chal default) + letI newTarget : R := by simpa only + pure ⟨newTarget, Fin.snoc challenges (chal default)⟩ + + embed := Function.Embedding.inl + + hEq := fun _ => rfl + +end Unfolded + +end SingleRound + +end Spec + +-- end for noncomputable section +end + +end Sumcheck diff --git a/ArkLib/ProofSystem/Whir.lean b/ArkLib/ProofSystem/Whir.lean new file mode 100644 index 000000000..5fb7c0640 --- /dev/null +++ b/ArkLib/ProofSystem/Whir.lean @@ -0,0 +1,7 @@ +import ArkLib.ProofSystem.Whir.BlockRelDistance +import ArkLib.ProofSystem.Whir.Folding +import ArkLib.ProofSystem.Whir.GenMutualCorrAgreement +import ArkLib.ProofSystem.Whir.MainThm +import ArkLib.ProofSystem.Whir.OutofDomainSmpl +import ArkLib.ProofSystem.Whir.ProximityGap +import ArkLib.ProofSystem.Whir.ProximityGen diff --git a/ArkLib/ProofSystem/Whir/BlockRelDistance.lean b/ArkLib/ProofSystem/Whir/BlockRelDistance.lean new file mode 100644 index 000000000..c9b69f89b --- /dev/null +++ b/ArkLib/ProofSystem/Whir/BlockRelDistance.lean @@ -0,0 +1,156 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.CodingTheory.ListDecodability + +namespace BlockRelDistance + +/-!In the following, we define distances for smooth ReedSolomon codes wrt power and fiber domains, + as per Section 4.3.1, [ACFY24]. We have generalized the definitions for a generic i to present + (i,k)-wise distance measures. This modification is necessary to support following lemmas + from Section 4.3.2. The definitions from Section 4.3.1 correspond to i = 0. -/ + +open ListDecodable NNReal ReedSolomon + +variable {F : Type*} [Field F] + {ι : Type*} [Fintype ι] [Pow ι ℕ] + +/--The `2^k`-th power images over an embedding `φ : ι ↪ F` and a finite set + of elements `S : Finset ι`. + + In particular, it returns the set of field elements `y ∈ F` for which there exists `x ∈ S` + such that `y = (φ x)^(2ᵏ)`. It models the image of the map `x ↦ (φ x)^(2ᵏ)` restricted to `S`. + Semantically: `indexPowT S φ k = { (φ x)^(2ᵏ) | x ∈ S } ⊆ F`. +-/ +def indexPowT (S : Finset ι) (φ : ι ↪ F) (k : ℕ) := { y : F // ∃ x ∈ S, y = (φ x) ^ (2^k) } + +/--For i ≤ k, the generic `2^(k-i)`-th power fiber over `y ∈ indexPowT S φ k`. + For `φ' : ι^(2ⁱ) → F`, this defines the preimage of `y` under the map + `x^(2ⁱ) ↦ x^(2ᵏ)` restricted to `x^(2ⁱ) ∈ S'`. + + It returns the subset `S'` of elements of type `ι^(2ⁱ)` + such that `(x^(2ⁱ))^(2^(k-i)) = x^(2^k) = y`. + Example i = 0 : powFiberT 0 k S' φ' y = { x ∈ S' | (x)^(2^k) = y }. + Example i = 1 : powFiberT 1 k S' φ' y = { x^2 ∈ S' | (x^2)^(2^(k-1)) = y }. +-/ +def powFiberT (i : ℕ) {k : ℕ} {S : Finset ι} {φ : ι ↪ F} (S' : Finset (indexPowT S φ i)) + (φ' : (indexPowT S φ i) ↪ F) (y : indexPowT S φ k) := + { x : (indexPowT S φ i) // x ∈ S' ∧ (φ' x) ^ (2^(k-i)) = y.val } + +/--Definition 4.16 + For `ι` be a smooth evaluation domain, `k` be a folding parameter, `z ∈ (ι^(2ᵏ))`, + Block is the set of elements `{ y ∈ S', y ^ 2^(k-i) = z }`, for `S' : Finset ι^(2ⁱ)`.-/ +def block (i : ℕ) {k : ℕ} {S : Finset ι} {φ : ι ↪ F} + (S' : Finset (indexPowT S φ i)) + (φ' : (indexPowT S φ i) ↪ F) (z : indexPowT S φ k) + [DecidableEq F] [DecidableEq ι] [Smooth φ] := + powFiberT i S' φ' z + +/--The class DecidableBlockDisagreement provides a decidability instance for testing + pointwise inequality of two functions `f, g : ι^(2ⁱ) → F` on elements of `block i k S' φ' z`, + for all `z ∈ LpowT S' φ' k`. + + This class abstracts the decidability condition required to determine whether two + functions disagree on any point in the preimage of `z` under the map `x^(2ⁱ) ↦ x^(2ᵏ)` over the + evaluation domain `φ' : ι^(2ⁱ) ↪ F`. This is useful in defining sets of such `z`. +-/ +class DecidableBlockDisagreement + (i k : ℕ) {S : Finset ι} {φ : ι ↪ F} + [DecidableEq F] [DecidableEq ι] [Smooth φ] + (f : (indexPowT S φ i) → F) (S' : Finset (indexPowT S φ i)) + (φ' : (indexPowT S φ i) ↪ F) where + dec_inst : + ∀ z : indexPowT S φ k, ∀ g : (indexPowT S φ i) → F, + Decidable (∃ y : block i S' φ' z, f y.val ≠ g y.val) + +/--Let C be a smooth ReedSolomon code `C = RS[F, ι^(2ⁱ), φ', m]` and `f,g : ι^(2ⁱ) → F`, then + the (i,k)-wise block relative distance is defined as + Δᵣ(i, k, f, S', φ', g) = |{z ∈ ι ^ 2^k : ∃ y ∈ Block(i,k,S',φ',z) f(y) ≠ g(y)}| / |ι^(2^k)|. + + Below, we define a disagreementSet(i,k,f,S',φ') as a map (g → Finset (indexPow S φ k)) + using the class DecidableBlockDisagreement, to filter a finite subset of the Finset + (indexPow S φ k), as per {z ∈ ι ^ 2^k : ∃ y ∈ Block(i,k,S',φ',z) f(y) ≠ g(y)} for a given g. -/ +noncomputable def disagreementSet + (i k : ℕ) {S : Finset ι} {φ : ι ↪ F} + [DecidableEq F] [DecidableEq ι] [Smooth φ] + (f : (indexPowT S φ i) → F) (S' : Finset (indexPowT S φ i)) + (φ' : (indexPowT S φ i) ↪ F) [∀ i : ℕ, Fintype (indexPowT S φ i)] + [h : DecidableBlockDisagreement i k f S' φ'] : + (g : (indexPowT S φ i) → F) → Finset (indexPowT S φ k) := + fun g => + Finset.univ.filter (fun z => @decide _ (h.dec_inst z g)) + +/--Definition 4.17 + Given the disagreementSet from above, we obtain the block relative distance as + |disagreementSet|/ |ι ^ (2^k|.-/ +noncomputable def blockRelDistance + (i k : ℕ) {S : Finset ι} {φ : ι ↪ F} + [DecidableEq F] [DecidableEq ι] [Smooth φ] + (f : (indexPowT S φ i) → F) (S' : Finset (indexPowT S φ i)) + (φ' : (indexPowT S φ i) ↪ F) [∀ i : ℕ, Fintype (indexPowT S φ i)] + [h : DecidableBlockDisagreement i k f S' φ'] : + (g : (indexPowT S φ i) → F) → ℝ≥0 := + fun g => + (disagreementSet i k f S' φ' g).card / (Fintype.card (indexPowT S φ k) : ℝ≥0) + +/--notation `Δᵣ(i, k, f, S', φ', g)` is the (i,k)-wise block relative distance.-/ +scoped notation "Δᵣ( "i", "k", "f", "S'", "φ'", "g" )" => blockRelDistance i k f S' φ' g + +/--For the set S ⊆ F^ι, we define the minimum block relative distance wrt set S.-/ +noncomputable def minBlockRelDistance + (i k : ℕ) {S : Finset ι} {φ : ι ↪ F} + [DecidableEq F] [DecidableEq ι] [Smooth φ] + (f : (indexPowT S φ i) → F) (S' : Finset (indexPowT S φ i)) + (φ' : (indexPowT S φ i) ↪ F) (Set : Set ((indexPowT S φ i) → F)) + [∀ i : ℕ, Fintype (indexPowT S φ i)] + [h : DecidableBlockDisagreement i k f S' φ'] : ℝ≥0 := + sInf { d : ℝ≥0 | ∃ g ∈ Set, Δᵣ(i, k, f, S', φ', g) = d} + +/--notation `Δₛ(i, k, f, S', φ', Set)` denotes the minimum block relative distance wrt `Set`.-/ +scoped notation "Δₛ( "i", "k", "f", "S'", "φ'", "Set" )" => minBlockRelDistance i k f S' φ' Set + +/--Definition 4.18 + For a smooth ReedSolomon code C = RS[F, ι^(2ⁱ), φ', m], proximity parameter δ ∈ [0,1] + function f : ι^(2ⁱ) → F, we define the following as the list of codewords of C δ-close to f, + i.e., u ∈ C such that Δᵣ(i, k, f, S', φ', u) ≤ δ.-/ +noncomputable def listBlockRelDistance + (i k : ℕ) {S : Finset ι} {φ : ι ↪ F} {φ' : (indexPowT S φ i) ↪ F} + {m : ℕ} [DecidableEq F] [DecidableEq ι] [Smooth φ] + (f : (indexPowT S φ i) → F) (S' : Finset (indexPowT S φ i)) + [∀ i : ℕ, Fintype (indexPowT S φ i)] [DecidableEq (indexPowT S φ i)] [Smooth φ'] + (C : Set ((indexPowT S φ i) → F)) (hcode : C = smoothCode φ' m) (δ : ℝ≥0) + [h : DecidableBlockDisagreement i k f S' φ'] : (Set ((indexPowT S φ i) → F)) := + let hδLe := δ ≤ 1 + { u ∈ C | Δᵣ(i, k, f, S', φ', u) ≤ δ } + + /--`Λᵣ(i, k, f, S', C, hcode, δ)` denotes the list of codewords of C δ-close to f, + wrt to the block relative distance.-/ +scoped notation "Λᵣ( "i", "k", "f", "S'", "C", "hcode", "δ")" => + listBlockRelDistance i k f S' C hcode δ + +/--Claim 4.19 + For a smooth ReedSolomon code `C = RS[F, ι^(2ⁱ), m]`, codewords `f, g : ι^(2ⁱ) → F`, + we have that the relative Hamming distance `δᵣ(f,g)` is bounded by the + block relative distance `Δᵣ(i, k, f, S', φ', g)`. + As a result, we have + `Λᵣ(i, k, f, S', C, hcode, δ)` is bounded by + `Λ(f, C, δ)` (list of codewords of C δ-close to f, wrt relative Hamming distance) +-/ +lemma blockRelDistance_le_hammingDistance + (i k : ℕ) {S : Finset ι} {φ : ι ↪ F} {φ' : (indexPowT S φ i) ↪ F} + {m : ℕ} [DecidableEq F] [DecidableEq ι] [Smooth φ] + (f g : (indexPowT S φ i) → F) (S' : Finset (indexPowT S φ i)) + [∀ i : ℕ, Fintype (indexPowT S φ i)] [DecidableEq (indexPowT S φ i)] [Smooth φ'] + (C : Set ((indexPowT S φ i) → F)) (hcode : C = smoothCode φ' m) (δ : ℝ≥0) + [h : DecidableBlockDisagreement i k f S' φ'] + (hBound : (δᵣ(f, g) : ℝ) ≤ Δᵣ(i, k, f, S', φ', g)) : + ∀ {δ : ℝ≥0} (hδLe : δ ≤ 1), + let listHamming := relHammingBall C f δ + let listBlock := Λᵣ(i, k, f, S', C, hcode, δ) + listBlock ⊆ listHamming := by sorry + +end BlockRelDistance diff --git a/ArkLib/ProofSystem/Whir/Folding.lean b/ArkLib/ProofSystem/Whir/Folding.lean new file mode 100644 index 000000000..9ec7cb891 --- /dev/null +++ b/ArkLib/ProofSystem/Whir/Folding.lean @@ -0,0 +1,296 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.MvPolynomial.LinearMvExtension +import ArkLib.ProofSystem.Whir.BlockRelDistance +import ArkLib.ProofSystem.Whir.GenMutualCorrAgreement + +namespace Fold + +open BlockRelDistance Vector Finset + +variable {F : Type} [Field F] {ι : Type} [Pow ι ℕ] + +/-- `∃ x ∈ S`, such that `y = x ^ 2^(k+1)`. `extract_x` returns `z = x ^ 2^k` such that `y = z^2`. +-/ +noncomputable def extract_x + (S : Finset ι) (φ : ι ↪ F) (k : ℕ) (y : indexPowT S φ (k + 1)) : indexPowT S φ k := + let x := Classical.choose y.property + let hx := Classical.choose_spec y.property + let z := (φ x) ^ (2^k) + ⟨z, ⟨x, hx.1, rfl⟩⟩ + +/-- Given a function `f : (ι^(2ᵏ)) → F`, foldf operates on two inputs: + element `y ∈ LpowT S (k+1)`, hence `∃ x ∈ S, s.t. y = x ^ 2^(k+1)` and `α ∈ F`. + It obtains the square root of y as `xPow := extract_x S φ k y`, + here xPow is of the form `x ^ 2^k`. + It returns the value `f(xPow) + f(- xPow)/2 + α * (f(xPow) - f(- xPow))/ 2 * xPow`. -/ +noncomputable def foldf (S : Finset ι) (φ : ι ↪ F) + {k : ℕ} [ Neg (indexPowT S φ k) ] (y : indexPowT S φ (k+1)) + (f : indexPowT S φ k → F) (α : F) : F := + let xPow := extract_x S φ k y + let fx := f xPow + let f_negx := f (-xPow) + (fx + f_negx) / 2 + α * ((fx - f_negx) / (2 * (xPow.val : F))) + +/-- The function `fold_k_core` runs a recursion, + for a function `f : ι → F` and a vector `αs` of size i + For `i = 0`, `fold_k_core` returns `f` evaluated at `x ∈ S` + For `i = (k+1) ≠ 0`, + αs is parsed as α || αs', where αs' is of size k + function `fk : (ι^2ᵏ) → F` is obtained by making a recursive call to + `fold_k_core` on input `αs'` + we obtain the final function `(ι^(2^(k+1))) → F` by invoking `foldf` with `fk` and `α`. -/ +noncomputable def fold_k_core {S : Finset ι} {φ : ι ↪ F} (f : (indexPowT S φ 0) → F) + [∀ i : ℕ, Neg (indexPowT S φ i)] : (i : ℕ) → (αs : Fin i → F) → + indexPowT S φ i → F +| 0, _ => fun x₀ => f x₀ +| k+1, αs => fun y => + let α := αs 0 + let αs' : Fin k → F := fun i => αs (Fin.succ i) + let fk := fold_k_core f k αs' + foldf S φ y fk α + +/-- Definition 4.14, part 1 + fold_k takes a function `f : ι → F` and a vector `αs` of size k + and returns a function `Fold : (ι^2ᵏ) → F` -/ +noncomputable def fold_k + {S : Finset ι} {φ : ι ↪ F} {k : ℕ} + [∀ j : ℕ, Neg (indexPowT S φ j)] + (f : (indexPowT S φ 0) → F) (αs : Fin k → F) : indexPowT S φ k → F := + fold_k_core f k αs + +/-- Definition 4.14, part 2 + fold_k takes a set of functions `set : Set (ι → F)` and a vector `αs` of size k + and returns a set of functions `Foldset : Set ((ι^2ᵏ) → F)` -/ +noncomputable def fold_k_set + {S : Finset ι} {φ : ι ↪ F} {k : ℕ} + [∀ j : ℕ, Neg (indexPowT S φ j)] + (set : Set ((indexPowT S φ 0) → F)) (αs : Fin k → F) : Set (indexPowT S φ k → F) := + { g | ∃ f ∈ set, g = fold_k f αs } + +section FoldingLemmas + +open CorrelatedAgreement Generator LinearMvExtension ListDecodable + NNReal ReedSolomon ProbabilityTheory + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [Pow ι ℕ] [DecidableEq ι] + +/-- Claim 4.15 + Let `f : ι → F`, `α ∈ Fᵏ` is the folding randomness, and let `g : (ι^(2ᵏ) → F) = fold_k(f,α)` + for k ≤ m, `f ∈ RS[F,ι,m]` then we have `g ∈ RS[F,ι^(2ᵏ),(m-k)]` + if fPoly be the multilinear extension of f, then we have + (m-k)-variate multilinear extension of g as `gPoly = fPoly(α₁,α₂,...αₖ,X_k,..,X_{m-1})` -/ +lemma fold_f_g + {S : Finset ι} {φ : ι ↪ F} {k m : ℕ} + {φ_0 : (indexPowT S φ 0) ↪ F} {φ_k : (indexPowT S φ k) ↪ F} + [Fintype (indexPowT S φ 0)] [DecidableEq (indexPowT S φ 0)] [Smooth φ_0] + [Fintype (indexPowT S φ k)] [DecidableEq (indexPowT S φ k)] [Smooth φ_k] + [∀ i : ℕ, Neg (indexPowT S φ i)] + (αs : Fin k → F) (hk : k ≤ m) + (f : smoothCode φ_0 m) : + let f_fun := (f : (indexPowT S φ 0) → F) + let fPoly := mVdecode f + let g := fold_k f_fun αs + let gPoly := partialEval fPoly αs + g ∈ smoothCode φ_k (m-k) := +sorry + +/-- +The `GenMutualCorrParams` class captures the necessary parameters and assumptions +to model a sequence of proximity generators for a set of smooth ReedSolomon codes. +It contains the following: + +- `inst1`, `inst2`, `inst3`: typeclass instances required to operate on `ι^(2ⁱ)` + (finiteness, nonemptiness, and decidable equality). +- `φ_i`: per-round embeddings from `ι^(2ⁱ)` into `F`. +- `inst4`: smoothness assumption for each `φ_i`. +- `Gen i`: the base proximity generators for underlying code `Cᵢ` +- `Gen_α i`: the proximity generators wrt the generator function + `Gen(parℓ,α) : {1,α,α²,..,α^{parℓ-1}}` defined as per `hgen` for code `Cᵢ` +- `inst5`, `inst6` : typeclass instances denoting finiteness of `parℓ` + underlying `Genᵢ` and `Gen_αᵢ` +- `BStar`, `errStar`: parameters denoting code agreement and error probability thresholds per round. +- `h`: main agreement assumption, stating that each `Gen_α` satisfies mutual correlated agreement + for its underlying code. +- `hδLe` : `δ ∈ (0, max_{j : k} BStar(Cⱼ, parℓ = 2))` +-/ +class GenMutualCorrParams (S : Finset ι) (φ : ι ↪ F) (k : ℕ) where + m : ℕ + δ : ℝ≥0 + + inst1 : ∀ i : Fin (k+1), Fintype (indexPowT S φ i) + inst2 : ∀ i : Fin (k+1), Nonempty (indexPowT S φ i) + inst3 : ∀ i : Fin (k+1), DecidableEq (indexPowT S φ i) + + φ_i : ∀ i : Fin (k+1), (indexPowT S φ i) ↪ F + inst4 : ∀ i : Fin (k+1), Smooth (φ_i i) + + Gen : ∀ i : Fin (k+1), ProximityGenerator (indexPowT S φ i) F + Gen_α : ∀ i : Fin (k+1), ProximityGenerator (indexPowT S φ i) F + exp : ∀ i : Fin (k+1), (Gen i).parℓ → ℕ + + inst5 : ∀ i : Fin (k+1), Fintype (Gen i).parℓ + inst6 : ∀ i : Fin (k+1), Fintype (Gen_α i).parℓ + + hgen : ∀ i : Fin (k+1), ∀ α : F, Gen_α i = proximityGenerator_α (Gen i) α (φ_i i) (m-i) (exp i) + + BStar : ∀ i : Fin (k+1), (Set (indexPowT S φ i → F)) → Type → ℝ≥0 + errStar : ∀ i : Fin (k+1), (Set (indexPowT S φ i → F)) → Type → ℝ → ENNReal + + h : ∀ i : Fin (k+1), genMutualCorrAgreement (Gen_α i) + (BStar i (Gen_α i).C (Gen_α i).parℓ) + (errStar i (Gen_α i).C (Gen_α i).parℓ) + + h_card : ∀ j : Fin (k + 1), Fintype.card ((Gen_α j).parℓ) = 2 + hδLe : δ ≤ 1 - Finset.univ.sup (fun j => BStar j (Gen_α j).C (Gen_α j).parℓ) + +/-- Theorem 4.20 + Let C = RS[F,ι,m] be a smooth ReedSolomon code + For k ≤ m and 0 ≤ i < k, + let Cⁱ = RS[F,ι^(2ⁱ),m-i] and let Gen(parℓ,α) be a proxmity generator with + mutual correlated agreement for C⁰,...,C^{k-1} with proximity bounds BStar and errStar + Then for every `f : ι → F` and `δ ∈ (0, 1 - max {i : k} BStar(Cⁱ, 2))` + `Pr_{α ← F} [ fold_k_set(Λᵣ(0,k,f,S',C,hcode,δ),vecα) ≠ Λ(Cᵏ,fold_k(f,vecα),δ)]` + `< ∑ i : k errStar(Cⁱ,2,δ)`, + where fold_k_set and fold_k are as defined above, + vecα is generated from α as `{1,α,α²,..}` + `Λᵣ(0,k,f,S',C,hcode,δ)` corresponds to the list of codewords of C δ-close to f, + wrt (0,k)-wise block relative distance. + `Λ(Cᵏ,fold(f,vecα),δ)` is the list of codewords of Cᵏ δ-close to foldf(f, vecα), + wrt the relative Hamming distance + Below, we use an instance of the class `GenMutualCorrParams` to capture the + conditions of proxmity generator with mutual correlated agreement for codes + C⁰,...,C^{k-1}. +-/ +theorem folding_listdecoding_if_genMutualCorrAgreement + {S : Finset ι} {φ : ι ↪ F} [Smooth φ] {k m : ℕ} + {S' : Finset (indexPowT S φ 0)} {φ' : (indexPowT S φ 0) ↪ F} + [∀ i : ℕ, Fintype (indexPowT S φ i)] [DecidableEq (indexPowT S φ 0)] [Smooth φ'] + [h : ∀ {f : (indexPowT S φ 0) → F}, DecidableBlockDisagreement 0 k f S' φ'] + [∀ i : ℕ, Neg (indexPowT S φ i)] + {C : Set ((indexPowT S φ 0) → F)} (hcode : C = smoothCode φ' m) (hLe : k ≤ m) + {GenFun : F → Fin k → F} {δ : ℝ≥0} + {params : GenMutualCorrParams S φ k} : + + -- necessary typeclasses of underlying domain (ιᵢ)^2ʲ regarding finiteness, + -- non-emptiness and smoothness + let _ : ∀ j : Fin (k + 1), Fintype (indexPowT S φ j) := params.inst1 + let _ : ∀ j : Fin (k + 1), Nonempty (indexPowT S φ j) := params.inst2 + let _ : ∀ j : Fin (k + 1), Fintype (params.Gen_α j).parℓ := params.inst6 + + Pr_{let α ←$ᵖ F}[ ∀ {f : (indexPowT S φ 0) → F}, + + let listBlock : Set ((indexPowT S φ 0) → F) := Λᵣ(0, k, f, S', C, hcode, δ) + let vec_α := GenFun α + let fold := fold_k f vec_α + let foldSet := fold_k_set listBlock vec_α + let kFin : Fin (k + 1) := ⟨k, sorry⟩ + let Cₖ := (params.Gen_α kFin).C + let listHamming := relHammingBall Cₖ fold δ + + foldSet ≠ listHamming + ] < (∑ i : Fin (k+1), + params.errStar i (params.Gen_α i).C (params.Gen_α i).parℓ δ) +:= by sorry + +/-- Lemma 4.21 + Let `C = RS[F,ι,m]` be a smooth ReedSolomon code and k ≤ m + Denote `C' = RS[F,ι^2,m-1]`, then for every `f : ι → F` and `δ ∈ (0, 1 - BStar(C',2))` + `Pr_{α ← F} [ fold_k_set(Λᵣ(0,k,f,S_0,C,δ),α) ≠ Λᵣ(1,k-1,foldf(f,α),S_1,C',δ) ]` + `< errStar(C',2,δ)` + where `foldf(f,α)` returns a function `ι^2 → F`, + `S_0` and `S_1` denote finite sets of elements of type ι and ι², and + `Λᵣ` denotes the list of δ-close codewords wrt block relative distance. + `Λᵣ(0,k,f,S_0,C)` denotes Λᵣ at f : ι → F for code C and + `Λᵣ(1,k,foldf(f,α),S_1,C')` denotes Λᵣ at foldf : ι^2 → F for code C'. -/ +lemma folding_preserves_listdecoding_base + {S : Finset ι} {k m : ℕ} {φ : ι ↪ F} [Smooth φ] {δ : ℝ≥0} + {S_0 : Finset (indexPowT S φ 0)} {S_1 : Finset (indexPowT S φ 1)} + {φ_0 : (indexPowT S φ 0) ↪ F} {φ_1 : (indexPowT S φ 1) ↪ F} + [∀ i : ℕ, Fintype (indexPowT S φ i)] [∀ i : ℕ, DecidableEq (indexPowT S φ i)] + [Smooth φ_0] [Smooth φ_1] + [h : ∀ {f : (indexPowT S φ 0) → F}, DecidableBlockDisagreement 0 k f S_0 φ_0] + [h : ∀ {f : (indexPowT S φ 1) → F}, DecidableBlockDisagreement 1 k f S_1 φ_1] + [∀ i : ℕ, Neg (indexPowT S φ i)] + {C : Set ((indexPowT S φ 0) → F)} (hcode : C = smoothCode φ_0 m) + (C' : Set ((indexPowT S φ 1) → F)) (hcode' : C' = smoothCode φ_1 (m-1)) + {BStar : (Set (indexPowT S φ 1 → F)) → ℕ → ℝ≥0} + {errStar : (Set (indexPowT S φ 1 → F)) → ℕ → ℝ≥0 → ℝ≥0} : + Pr_{let α ←$ᵖ F}[ ∀ { f : (indexPowT S φ 0) → F} (hδLe: δ ≤ 1 - (BStar C' 2)), + + let listBlock : Set ((indexPowT S φ 0) → F) := Λᵣ(0, k, f, S_0, C, hcode, δ) + let vec_α : Fin 1 → F := (fun _ : Fin 1 => α) + let foldSet := fold_k_set listBlock vec_α + let fold := fold_k f vec_α + let listBlock' : Set ((indexPowT S φ 1) → F) := Λᵣ(1, k, fold, S_1, C', hcode', δ) + + foldSet ≠ listBlock' + ] < errStar C' 2 δ + := by sorry + +/-- Lemma 4.22 + Following same parameters as Lemma 4.21 above, and states + `∀ α : F, Λᵣ(0,k,f,S_0,C,δ),α) ⊆ Λᵣ(1,k-1,foldf(f,α),S_1,C',δ)` -/ +lemma folding_preserves_listdecoding_bound + {S : Finset ι} {k m : ℕ} {φ : ι ↪ F} [Smooth φ] {δ : ℝ≥0} {f : (indexPowT S φ 0) → F} + {S_0 : Finset (indexPowT S φ 0)} {S_1 : Finset (indexPowT S φ 1)} + {φ_0 : (indexPowT S φ 0) ↪ F} {φ_1 : (indexPowT S φ 1) ↪ F} + [∀ i : ℕ, Fintype (indexPowT S φ i)] [∀ i : ℕ, DecidableEq (indexPowT S φ i)] + [Smooth φ_0] [Smooth φ_1] + [h : ∀ {f : (indexPowT S φ 0) → F}, DecidableBlockDisagreement 0 k f S_0 φ_0] + [h : ∀ {f : (indexPowT S φ 1) → F}, DecidableBlockDisagreement 1 k f S_1 φ_1] + [∀ i : ℕ, Neg (indexPowT S φ i)] + {C : Set ((indexPowT S φ 0) → F)} (hcode : C = smoothCode φ_0 m) + (C' : Set ((indexPowT S φ 1) → F)) (hcode' : C' = smoothCode φ_1 (m-1)) + {BStar : (Set (indexPowT S φ 1 → F)) → ℕ → ℝ≥0} + {errStar : (Set (indexPowT S φ 1 → F)) → ℕ → ℝ≥0 → ℝ≥0} : + + ∀ α : F, + + let listBlock : Set ((indexPowT S φ 0) → F) := Λᵣ(0, k, f, S_0, C, hcode, δ) + let vec_α : Fin 1 → F := (fun _ : Fin 1 => α) + let foldSet := fold_k_set listBlock vec_α + let fold := fold_k f vec_α + let listBlock' : Set ((indexPowT S φ 1) → F) := Λᵣ(1, k, fold, S_1, C', hcode', δ) + + foldSet ⊆ listBlock' + := by sorry + +/-- Lemma 4.23 + Following same parameters as Lemma 4.21 above, and states + `Pr_{α ← F} [ Λᵣ(1,k-1,foldf(f,α),S_1,C',δ) ¬ ⊆ Λᵣ(0,k,f,S_0,C,δ),α) ] < errStar(C',2,δ)` -/ +lemma folding_preserves_listdecoding_base_ne_subset + {S : Finset ι} {k m : ℕ} {φ : ι ↪ F} [Smooth φ] {δ : ℝ≥0} + {S_0 : Finset (indexPowT S φ 0)} {S_1 : Finset (indexPowT S φ 1)} + {φ_0 : (indexPowT S φ 0) ↪ F} {φ_1 : (indexPowT S φ 1) ↪ F} + [∀ i : ℕ, Fintype (indexPowT S φ i)] [∀ i : ℕ, DecidableEq (indexPowT S φ i)] + [Smooth φ_0] [Smooth φ_1] + [h : ∀ {f : (indexPowT S φ 0) → F}, DecidableBlockDisagreement 0 k f S_0 φ_0] + [h : ∀ {f : (indexPowT S φ 1) → F}, DecidableBlockDisagreement 1 k f S_1 φ_1] + [∀ i : ℕ, Neg (indexPowT S φ i)] + {C : Set ((indexPowT S φ 0) → F)} (hcode : C = smoothCode φ_0 m) + (C' : Set ((indexPowT S φ 1) → F)) (hcode' : C' = smoothCode φ_1 (m-1)) + {BStar : (Set (indexPowT S φ 1 → F)) → ℕ → ℝ≥0} + {errStar : (Set (indexPowT S φ 1 → F)) → ℕ → ℝ≥0 → ℝ≥0} : + Pr_{let α ←$ᵖ F}[ ∀ { f : (indexPowT S φ 0) → F} (hδLe: δ ≤ 1 - (BStar C' 2)), + + let listBlock : Set ((indexPowT S φ 0) → F) := Λᵣ(0, k, f, S_0, C, hcode, δ) + let vec_α : Fin 1 → F := (fun _ : Fin 1 => α) + let foldSet := fold_k_set listBlock vec_α + let fold := fold_k f vec_α + let listBlock' : Set ((indexPowT S φ 1) → F) + := Λᵣ(1, k, fold, S_1, C', hcode', δ) + + ¬ (listBlock' ⊆ foldSet) + ] < errStar C' 2 δ + := by sorry + +end FoldingLemmas + +end Fold diff --git a/ArkLib/ProofSystem/Whir/GenMutualCorrAgreement.lean b/ArkLib/ProofSystem/Whir/GenMutualCorrAgreement.lean new file mode 100644 index 000000000..e15a72799 --- /dev/null +++ b/ArkLib/ProofSystem/Whir/GenMutualCorrAgreement.lean @@ -0,0 +1,192 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.Data.CodingTheory.InterleavedCode +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.Probability.Notation +import ArkLib.ProofSystem.Whir.ProximityGen + +namespace CorrelatedAgreement + +open NNReal Generator ProbabilityTheory ReedSolomon + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι parℓ : Type} [Fintype ι] [Nonempty ι] [Fintype parℓ] + +/-- For `parℓ` functions `fᵢ : ι → 𝔽`, distance `δ`, generator function `GenFun: 𝔽 → parℓ → 𝔽` + and linear code `C` the predicate `proximityCondition(r)` is true, if `∃ S ⊆ ι`, s.t. + the following three conditions hold + (i) `|S| > (1-δ)*|ι|` and + (ii) `∃ u ∈ C, u(S) = ∑ j : parℓ, rⱼ * fⱼ(S)` + (iii) `∃ i : parℓ, ∀ u' ∈ C, u'(S) ≠ fᵢ(S)` -/ +def proximityCondition (f : parℓ → ι → F) (δ : ℝ≥0) (GenFun : F → parℓ → F) + (C : LinearCode ι F): F → Prop + | r => + ∃ S : Finset ι, + (S.card : ℝ≥0) > (1-δ) * Fintype.card ι ∧ + ∃ u ∈ C, ∀ s ∈ S, u s = ∑ j : parℓ, GenFun r j * f j s ∧ + ∃ i : parℓ, ∀ u' ∈ C, ∀ s ∈ S, u' s ≠ f i s + +/-- Definition 4.9 + Let `C` be a linear code, then Gen is a proximity generator with mutual correlated agreement, + if for `parℓ` functions `fᵢ : ι → F` and distance `δ < 1 - B(C,parℓ)`, + `Pr_{ r ← F } [ proximityCondition(r) ] ≤ errStar(δ)`. -/ +noncomputable def genMutualCorrAgreement + (Gen : ProximityGenerator ι F) [Fintype Gen.parℓ] + (BStar : ℝ) (errStar : ℝ → ENNReal) := + ∀ (f : Gen.parℓ → ι → F) (δ : ℝ≥0) (_hδ : δ < 1 - (Gen.B Gen.C Gen.parℓ)), + Pr_{let r ←$ᵖ F}[ (proximityCondition f δ Gen.Fun Gen.C) r ] ≤ errStar δ + +/-- Lemma 4.10 + Let `C` be a linear code with minimum distance `δ_C`, `Gen` be a proximity generator for C + with parameters `B` and `err`, then Gen has mutual correlated agreement with proximity bounds + `BStar = min {1 - δ_C/2, B}` and `errStar = err`. -/ +lemma genMutualCorrAgreement_le_bound + (Gen : ProximityGenerator ι F) [Fintype Gen.parℓ] + (BStar : ℝ) (errStar : ℝ → ENNReal) + (C : Set (ι → F)) (hC : C = Gen.C) + (h: genMutualCorrAgreement Gen BStar errStar) : + BStar < min (1 - (δᵣ C) / 2 : ℝ) (Gen.B Gen.C Gen.parℓ) + ∧ + errStar = Gen.err Gen.C Gen.parℓ := by sorry + +/-- Corollary 4.11 + Let `C` be a (smooth) ReedSolomon Code with rate `ρ`, then the function + `Gen(parℓ,α)={1,α,..,α^(parℓ-1)}` is a proximity generator for Gen with + mutual correlated agreement with proximity bounds + `BStar = (1+ρ) / 2` + `errStar = (parℓ-1)*2^m / ρ*|F|`. + + function `Gen(parℓ,α)={1,α,..,α ^ parℓ-1}` +-/ +noncomputable def gen_α (α : F) (parℓ : Type) (exp : parℓ → ℕ): F → parℓ → F := + fun _ j => α ^ (exp j) + +/-- The proximity generator for smooth ReedSolomon codes wrt function + `Gen(parℓ,α)={1,α,..,α ^ parℓ-1}` +-/ +noncomputable def proximityGenerator_α + [DecidableEq ι] (Gen : ProximityGenerator ι F) [hℓ : Fintype Gen.parℓ] + (α : F) (φ : ι ↪ F) (m : ℕ) [Smooth φ] (exp : Gen.parℓ → ℕ) : + ProximityGenerator ι F := + { + C := smoothCode φ m, + parℓ := Gen.parℓ, + hℓ := hℓ, + Fun := gen_α α Gen.parℓ exp, + B := Gen.B, + err := Gen.err, + proximity := by + intro f δ hδ hprob + sorry + } + +/-- Corollary 4.11 + Let `C` be a smooth ReedSolomon code with rate `ρ`, then `Gen_α` is the proximity generator with + mutual correlated agreement with bounds + BStar = (1-ρ) / 2 + errStar = (parℓ-1)*2^m / ρ*|F|. -/ +lemma genMutualCorrAgreement_rsc_le_bound + [DecidableEq ι] (Gen Gen_α: ProximityGenerator ι F) + [Fintype Gen.parℓ] [Fintype Gen_α.parℓ] + (α : F) (φ : ι ↪ F) (m : ℕ) [Smooth φ] (exp : Gen.parℓ → ℕ) + (BStar rate : ℝ) (errStar : ℝ → ENNReal) + -- `Gen_α` is the proximity generator wrt function `Gen(parℓ, α)` + (hGen : Gen_α = proximityGenerator_α Gen α φ m exp) + -- the proof that `Gen_α` is the proximity generator with mutual correlated agreement + -- with proximity bound parameters BStar and errStar + (h : genMutualCorrAgreement Gen_α BStar errStar) + (hrate : rate = LinearCode.rate (smoothCode φ m)) : + BStar = (1 - rate) / 2 ∧ + errStar = fun _ => ENNReal.ofReal + ((Fintype.card Gen.parℓ - 1) * (2^m / rate * (Fintype.card F))) + := by sorry + + +/-- Conjecture 4.12 + The function `Gen(parℓ,α)={1,α,..,α ^ parℓ-1}` is a proximity generator with + mutual correlated agreement for every (smooth) ReedSolomon code `C` with rate `ρ = 2^m / |ι|`. + Below we state two conjectures for the parameters of the proximity bound. + + 1. Upto Johnson bound: BStar = √ρ and + errStar = (parℓ-1) * 2^2m / |F| * (2 * min {1 - √ρ - δ, √ρ/20}) ^ 7. -/ +theorem genMutualCorrAgreement_le_johnsonBound + [DecidableEq ι] (Gen Gen_α: ProximityGenerator ι F) + [Fintype Gen.parℓ] [Fintype Gen_α.parℓ] + (α : F) (φ : ι ↪ F) (m : ℕ) [Smooth φ] (exp : Gen.parℓ → ℕ) + (BStar rate δ: ℝ) (errStar : ℝ → ENNReal) + (hGen : Gen_α = proximityGenerator_α Gen α φ m exp) + (h : genMutualCorrAgreement Gen_α BStar errStar) + (hrate : rate = LinearCode.rate (smoothCode φ m)) : + let minval := min (1 - Real.sqrt rate - δ) (Real.sqrt rate / 20) + BStar = Real.sqrt rate ∧ + ∀ {η : ℝ≥0} (hδPos : δ > 0) (hδLe : δ < 1 - Real.sqrt rate - η), + errStar = fun δ => + ENNReal.ofReal + ((Fintype.card Gen.parℓ - 1) * 2 ^ (2 * m) / (Fintype.card ι * (2 * minval)^7)) + := by sorry + +/-- 2. Upto capacity: BStar = ρ and ∃ c₁,c₂,c₃ ∈ ℕ s.t. ∀ η > 0 and 0 < δ < 1 - ρ - η + errStar = (parℓ-1)^c₂ * d^c₂ / η^c₁ * ρ^(c₁+c₂) * |F|, where d = 2^m is the degree. -/ +theorem genMutualCorrAgreement_le_capacity + [DecidableEq ι] (Gen Gen_α: ProximityGenerator ι F) + [Fintype Gen.parℓ] [Fintype Gen_α.parℓ] + (α : F) (φ : ι ↪ F) (m : ℕ) [Smooth φ] (exp : Gen.parℓ → ℕ) + (BStar rate δ: ℝ) (errStar : ℝ → ENNReal) + (hGen : Gen_α = proximityGenerator_α Gen α φ m exp) + (h : genMutualCorrAgreement Gen_α BStar errStar) + (hrate : rate = LinearCode.rate (smoothCode φ m)) : + BStar = rate ∧ + ∃ (c₁ c₂ c₃ : ℕ), ∀ {η : ℝ≥0} (hδPos : δ > 0) (hδLe : δ < 1 - rate - η), + errStar = fun δ => ENNReal.ofReal + ((Fintype.card Gen.parℓ - 1)^c₂ * (2^m)^c₂ / (η^c₁ * rate^(c₁+c₂) * (Fintype.card ι))) + := by sorry + +section + +open InterleavedCode ListDecodable + +/-- For `parℓ` functions `{f₁,..,f_parℓ}`, `IC` be the `parℓ`-interleaved code from a linear code C, + with `Gen` as a proximity generator with mutual correlated agreement, + `proximityListDecodingCondition(r)` is true if, + List(C, ∑ⱼ rⱼ*fⱼ, δ) ≠ { ∑ⱼ rⱼ*uⱼ, where {u₁,..u_parℓ} ∈ Λᵢ({f₁,..,f_parℓ}, IC, δ) } -/ +def proximityListDecodingCondition + [Fintype ι] [Nonempty ι] + (Gen : ProximityGenerator ι F) [Fintype Gen.parℓ] + (δ : ℝ) (fs us : Matrix Gen.parℓ ι F) + (IC : InterleavedCode Gen.parℓ ι F) + (haveIC : IC = codeOfLinearCode Gen.parℓ Gen.C) : F → Prop := + fun r => + let f_r := fun x => ∑ j, Gen.Fun r j * fs j x + let listHamming := relHammingBall Gen.C f_r δ + let listIC := { fun x => ∑ j, Gen.Fun r j * us j x | us ∈ Λᵢ(fs, IC.MF, δ)} + listHamming ≠ listIC + + +/-- Lemma 4.13: Mutual correlated agreement preserves list decoding + Let C be a linear code with minimum distance δ_c and `Gen` be a proximity generator + with mutual correlated agreement for `C`. + Then for every `{f₁,..,f_parℓ}` and `δ ∈ (0, min δ_c (1 - BStar))`, + `Pr_{ r ← F} [ proximityListDecodingCondition(r) ] ≤ errStar(δ)`. -/ +lemma mutualCorrAgreement_list_decoding + [Fintype ι] [Nonempty ι] + (Gen : ProximityGenerator ι F) [Fintype Gen.parℓ] + (δ BStar : ℝ) (errStar : ℝ → ENNReal) + (fs us : Matrix Gen.parℓ ι F) + (IC : InterleavedCode Gen.parℓ ι F) + (haveIC : IC = codeOfLinearCode Gen.parℓ Gen.C) + (hGen : genMutualCorrAgreement Gen BStar errStar) + (C : Set (ι → F)) (hC : C = Gen.C) : + ∀ {fs : Matrix Gen.parℓ ι F} + (hδPos : δ > 0) (hδLt : δ < min (δᵣ C : ℝ) (1 - BStar)), + Pr_{let r ←$ᵖ F}[ proximityListDecodingCondition Gen δ fs us IC haveIC r] + ≤ errStar δ + := by sorry + +end + +end CorrelatedAgreement diff --git a/ArkLib/ProofSystem/Whir/MainThm.lean b/ArkLib/ProofSystem/Whir/MainThm.lean new file mode 100644 index 000000000..f95db1e4f --- /dev/null +++ b/ArkLib/ProofSystem/Whir/MainThm.lean @@ -0,0 +1,226 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.OracleReduction.VectorIOR +import ArkLib.ProofSystem.Whir.BlockRelDistance +import ArkLib.ProofSystem.Whir.GenMutualCorrAgreement +import ArkLib.ProofSystem.Whir.ProximityGen + +namespace WhirIOP + +open BigOperators BlockRelDistance CorrelatedAgreement Generator Finset + ListDecodable NNReal ReedSolomon + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {M : ℕ} (ι : Fin (M + 1) → Type) [∀ i : Fin (M + 1), Fintype (ι i)] + [∀ i : Fin (M + 1), DecidableEq (ι i)] + +/-- ** Per‑round protocol parameters. ** +For a fixed depth `M`, the reduction runs `M + 1` rounds. +In round `i ∈ {0,…,M}` we fold by a factor `foldingParamᵢ`, +evaluate on the point set `ιᵢ` through the embedding `φᵢ : ιᵢ ↪ F`, +and repeat certain proximity checks `repeatParamᵢ` times. -/ +structure Params (F : Type) where + foldingParam : Fin (M + 1) → ℕ + varCount : Fin (M + 1) → ℕ + φ : (i : Fin (M + 1)) → (ι i) ↪ F + repeatParam : Fin (M + 1) → ℕ + +/-- ** Conditions that protocol parameters must satisfy. ** + h_m : m = varCount₀ + h_sumkLt : ∑ i : Fin (M + 1), varCountᵢ ≤ m + h_varCount_i : ∀ i : Fin (M + 1), i ≠ 0, varCountᵢ = m - ∑ j < i foldingParamⱼ + h_smooth : each φᵢ must embed a smooth evaluation domain + h_repeatPLt : ∀ i : Fin (M + 1), repeatParamᵢ ≤ |ιᵢ| -/ +structure ParamConditions (P : Params ι F) where + m : ℕ -- m = P.varCount 0 + h_m : m = P.varCount 0 + h_sumkLt : ∑ i : Fin (M + 1), P.foldingParam i ≤ m + h_varCount_i : ∀ i : Fin (M + 1), + P.varCount i = m - ∑ j : Fin i, P.foldingParam (Fin.castLT j (Nat.lt_trans j.isLt i.isLt)) + h_smooth : ∀ i : Fin (M + 1), Smooth (P.φ i) + h_repeatPLt : ∀ i : Fin (M + 1), P.repeatParam i ≤ Fintype.card (ι i) + +/-- `GenMutualCorrParams` binds together a set of smooth ReedSolomon codes + `C_{i : M + 1, j : foldingParamᵢ + 1} = RS[F, ιᵢ^(2ʲ), (varCountᵢ - j)]` with + `Gen_α_ij` which is a proximity generator with mutual correlated agreement + for `C_ij` with proximity parameters `BStar_ij` and `errStar_ij`. + + Additionally, it includes the condition that + C_ij is `(δᵢ, dist_ij)`-list decodeable, + where `δᵢ = 1 - max_{j : foldingParamᵢ + 1} BStar(C_ij,2)` +-/ +class GenMutualCorrParams (P : Params ι F) (S : ∀ i : Fin (M + 1), Finset (ι i)) where + + δ : Fin (M + 1) → ℝ≥0 + dist : (i : Fin (M + 1)) → Fin ((P.foldingParam i) + 1) → ℝ≥0 + +-- φ i j : ιᵢ^(2ʲ) ↪ F + φ : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), (indexPowT (S i) (P.φ i) j) ↪ F + + inst1 : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), Fintype (indexPowT (S i) (P.φ i) j) + inst2 : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + Nonempty (indexPowT (S i) (P.φ i) j) + inst3 : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + DecidableEq (indexPowT (S i) (P.φ i) j) + inst4 : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), Smooth (φ i j) + + Gen : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + ProximityGenerator (indexPowT (S i) (P.φ i) j) F + Gen_α : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + ProximityGenerator (indexPowT (S i) (P.φ i) j) F + + inst5 : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), Fintype (Gen i j).parℓ + inst6 : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), Fintype (Gen_α i j).parℓ + + exp : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), (Gen i j).parℓ → ℕ + +-- this ensures that Gen_α_ij is a proxmity generator for C_ij = RS[F, ιᵢ^(2^j), (varCountᵢ - j)] +-- wrt proximity function Gen_α (α,l) = {1,α²,...,α^{parℓ-1}} + hgen : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), ∀ α : F, Gen_α i j = + proximityGenerator_α (Gen i j) α (φ i j) (P.varCount i - j) (exp i j) + + BStar : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + (Set ((indexPowT (S i) (P.φ i) j) → F)) → Type → ℝ≥0 + errStar : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + (Set ((indexPowT (S i) (P.φ i) j) → F)) → Type → ℝ → ENNReal + + C : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), Set ((indexPowT (S i) (P.φ i) j) → F) + hcode : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), (C i j) = (Gen_α i j).C + + h : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + genMutualCorrAgreement (Gen_α i j) + (BStar i j (C i j) (Gen_α i j).parℓ) + (errStar i j (C i j) (Gen_α i j).parℓ) + + hℓ_bound : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), Fintype.card (Gen i j).parℓ = 2 + hδLe : ∀ i : Fin (M + 1), (δ i) ≤ 1 - Finset.univ.sup (fun j => BStar i j (C i j) (Gen i j).parℓ) + + hlistDecode : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + listDecodable (C i j) (δ i) (dist i j) + +section RBR + +open NNRat OracleComp OracleSpec ProtocolSpec VectorIOP + +/-- `OracleStatement` defines the oracle message type for a multi-indexed setting: + given base input type `ι`, and field `F`, the output type at each index + is a function `ι → F` representing an evaluation over `ι`. +-/ +@[reducible] +def OracleStatement (ι F : Type) : Unit → Type := + fun _ => ι → F + +/-- Provides a default OracleInterface instance that leverages + the oracle statement defined above. The oracle simply applies + the function `f : ι → F` to the query input `i : ι`, + producing the response. -/ +instance {ι : Type} : OracleInterface (OracleStatement ι F ()) where + Query := ι + Response := F + oracle := fun f i => f i + +/-- WHIR relation: the oracle's output is δᵣ-close to a codeword of a smooth ReedSolomon code +with number of variables at most `varCount` over domain `φ`, within error `err`. +-/ +def whirRelation + {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [Nonempty ι] + (varCount : ℕ) (φ : ι ↪ F) [Smooth φ] (err : ℝ) + : Set ((Unit × ∀ i, (OracleStatement ι F i)) × Unit) := + { ⟨⟨_, oracle⟩, _⟩ | δᵣ(oracle (), smoothCode φ varCount) ≤ err } + +/-- Theorem 5.2: **Round-by-round soundness of the WHIR Vector IOPP** -/ +theorem whir_rbr_soundness + [VCVCompatible F] {d dstar : ℕ} + -- P : set of M + 1 parameters including foldingParamᵢ, varCountᵢ, φᵢ, repeatParamᵢ, + -- where foldingParamᵢ > 0 + {P : Params ι F} {S : ∀ i : Fin (M + 1), Finset (ι i)} + -- hParams : a set of conditions that parameters in P must satisfy + -- h : a set of smooth ReedSolomon codes C_ij bundled with its proximity generators + -- and condition for list decodeability + {hParams : ParamConditions ι P} {h : GenMutualCorrParams ι P S} + {m_0 : ℕ} (hm_0 : m_0 = P.varCount 0) {σ₀ : F} + {wPoly₀ : MvPolynomial (Fin (m_0 + 1)) F} {δ : ℝ} + [Smooth (P.φ 0)] [Nonempty (ι 0)] + -- ∀ f₀ : ι₀ → F, f₀ ∉ CRS[F,ι₀,m₀,wPoly₀,σ₀] + (h_not_code : ∀ f_0 : (ι 0) → F, f_0 ∉ (constrainedCode (P.φ 0) m_0 wPoly₀ σ₀)) + -- ∀ f₀ : ι₀ → F, δ₀ < δᵣ(f₀, CRS[F,ι₀,m₀,wPoly₀,σ₀]), + -- where δᵣ denotes the relative Hamming distance + (hδ₀Lt : ∀ f_0 : (ι 0) → F, + (h.δ 0) < (δᵣ(f_0, (constrainedCode (P.φ 0) m_0 wPoly₀ σ₀)) : ℝ)) + (ε_fold : (i : Fin (M + 1)) → Fin (P.foldingParam i) → ℝ≥0) (ε_out : Fin (M + 1) → ℝ≥0) + (ε_shift : Fin M → ℝ≥0) (ε_fin : ℝ≥0) : + ∃ n : ℕ, + -- There exists an `n`-message vector IOPP, + ∃ vPSpec : ProtocolSpec.VectorSpec n, + -- such that there are `2 * M + 2` challenges from the verifier to the prover, + Fintype.card (vPSpec.ChallengeIdx) = 2 * M + 2 ∧ + -- ∃ a Vector IOPP π with Statement = Unit, Witness = Unit, OracleStatement = (ι₀ F) + ∃ π : + VectorIOP []ₒ Unit (OracleStatement (ι 0) F) Unit vPSpec F, + let max_ε_folds : (i : Fin (M + 1)) → ℝ≥0 := + fun i => (univ : Finset (Fin (P.foldingParam i))).sup (ε_fold i) + let ε_rbr : vPSpec.ChallengeIdx → ℝ≥0 := + fun _ => (univ.image max_ε_folds ∪ {ε_fin} ∪ univ.image ε_out ∪ univ.image ε_shift).max' + (by simp) + (IsSecureWithGap (whirRelation m_0 (P.φ 0) 0) + (whirRelation m_0 (P.φ 0) (h.δ 0)) + ε_rbr π) ∧ + + let maxDeg := (Finset.univ : Finset (Fin m_0)).sup (fun i => wPoly₀.degreeOf (Fin.succ i)) + -- dstar = (1 + deg_Z(wPoly₀) + max_{i < m_0} deg_X(wPoly₀)) + let dstar := 1 + (wPoly₀.degreeOf 0) + maxDeg + let d := max dstar 3 + + --necessary typeclasses for Gen_0j stating finiteness and non-emptiness of underlying ι₀^2ʲ + let _ : ∀ j : Fin ((P.foldingParam 0)+1), Fintype (indexPowT (S 0) (P.φ 0) j) := h.inst1 0 + let _ : ∀ j : Fin ((P.foldingParam 0)+1), Nonempty (indexPowT (S 0) (P.φ 0) j) := h.inst2 0 + + -- ε_fold(0,j) ≤ dstar * dist(0,j-1) / |F| + errStar(C_0j, 2, δ₀), + -- here j runs from 1 to (P.foldingParam 0) for ε_fold(0,j) + ∀ j : Fin ((P.foldingParam 0) + 1), + let errStar_0 j := h.errStar 0 j (h.C 0 j) (h.Gen 0 j).parℓ (h.δ 0) + ∀ j : Fin (P.foldingParam 0), + ε_fold 0 j ≤ ((dstar * (h.dist 0 j.castSucc)) / Fintype.card F) + (errStar_0 j.succ) + ∧ + -- ε_out(i) ≤ 2^(varCountᵢ) * dist(i,0)^2 / 2 * |F| + ∀ i : Fin (M + 1), + ε_out i ≤ + 2^(P.varCount i) * (h.dist i 0)^2 / (2 * Fintype.card F) + ∧ + -- ε_shift(i+1) ≤ (1 - δ_{i})^(repeatParam_{i}) + -- + (dist(i+1,0) * (repeatParam_{i} + 1)) / |F| + -- Note here that `i : Fin M`, so we need to cast into `Fin (M + 1)` for indexing of + -- `h.δ`, `h.dist` and `P.repeatParam`. + -- To get `i`, we use `.castSucc`, whereas to get `i + 1`, we use `.succ`. + ∀ i : Fin M, + ε_shift i ≤ (1 - (h.δ i.castSucc))^(P.repeatParam i.castSucc) + + ((h.dist i.succ 0) * (P.repeatParam i.castSucc) + 1) / Fintype.card F + ∧ + + -- necessary typeclasses for Gen_ij stating finiteness and non-emptiness of underlying ιᵢ^2ʲ + let _ : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + Fintype (indexPowT (S i) (P.φ i) j) := h.inst1 + let _ : ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + Nonempty (indexPowT (S i) (P.φ i) j) := h.inst2 + + -- ε_fold(i,j) ≤ d * dist(i,j-1) / |F| + errStar(C_ij,2,δᵢ) + -- here j runs from 1 to (P.foldingParam 0) for ε_fold(i,j) + ∀ i : Fin (M + 1), ∀ j : Fin ((P.foldingParam i) + 1), + let errStar i j := h.errStar i j (h.C i j) (h.Gen i j).parℓ (h.δ i) + ∀ i : Fin (M + 1), ∀ j : Fin (P.foldingParam i), + ε_fold i j ≤ d * (h.dist i j.castSucc) / Fintype.card F + errStar i j.succ + ∧ + -- ε_fin ≤ (1 - δ_{M})^(repeatParam_{M}) + ε_fin ≤ 1 - (h.δ (Fin.last M))^(P.repeatParam (Fin.last M)) + := by sorry + +end RBR + +end WhirIOP diff --git a/ArkLib/ProofSystem/Whir/OutofDomainSmpl.lean b/ArkLib/ProofSystem/Whir/OutofDomainSmpl.lean new file mode 100644 index 000000000..f4aa83dae --- /dev/null +++ b/ArkLib/ProofSystem/Whir/OutofDomainSmpl.lean @@ -0,0 +1,99 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Poulami Das (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.CodingTheory.ListDecodability +import ArkLib.Data.MvPolynomial.Multilinear +import ArkLib.Data.Probability.Notation + +namespace OutOfDomSmpl + +open ListDecodable MvPolynomial NNReal ProbabilityTheory ReedSolomon + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [DecidableEq ι] + +/--Lemma 4.24 + Let `f : ι → F`, `m` be the number of variables, `s` be a repetition parameter + and `δ ∈ [0,1]` be a distance parameter, then for every `r₁,...,rₛ ∈ Fᵐ` + the following statements are equivalent: + - ∃ distinct u, u' ∈ Λ(C,f,δ) such that ∀ i < s, uPoly(rᵢ) = uPoly'(rᵢ) + where, Λ(C,f,δ) denotes the list of codewords of C δ-close to f + uPoly, uPoly' denotes the decoded multivariate polynomials of u and u' + - ∃ σ₁,..,σₛ : F, such that |Λ(C',f,δ)| > 1 + where, C' is a multiconstrained RS = MCRS[F, ι, φ, m, s, w, σ] + σ = {σ₁,..,σₛ}, w = {w₁,..,wₛ}, wᵢ = Z * eqPolynomial(rᵢ) -/ +lemma crs_equiv_rs_randpompt_agreement + {f : ι → F} {m s : ℕ} {φ : ι ↪ F} [Smooth φ] : + ∀ (r : Fin s → Fin m → F) (δ : ℝ≥0) (hδLe : δ ≤ 1), + (∃ u u' : smoothCode φ m, + u.val ≠ u'.val ∧ + u.val ∈ relHammingBall (smoothCode φ m) f δ ∧ + u'.val ∈ relHammingBall (smoothCode φ m) f δ ∧ + ∀ i : Fin s, (mVdecode u).eval (r i) = (mVdecode u').eval (r i)) + ↔ + (∃ σ : Fin s → F, + let w : Fin s → MvPolynomial (Fin (m + 1)) F := + fun i => MvPolynomial.X (Fin.last m) * rename Fin.castSucc (eqPolynomial (r i)) + let multiCRSCode := multiConstrainedCode φ m s w σ + ∃ g : ι → F, g ∈ relHammingBall multiCRSCode f δ) + := by sorry + +/--Lemma 4.25 part 1 + Let `f : ι → F`, `m` be the number of variables, `s` be a repetition parameter + and `δ ∈ [0,1]` be a distance parameter, + if `C = RS [F, ι, m]` is `(δ,l)`-list decodable then + `Pr_{r ← F} [ ∃ σ₁,..,σₛ : F, |Λ(C',f,δ)| > 1 ] =` + `Pr_{r ← F} [ ∃ distinct u, u' ∈ RS[F, ι, φ, m] s.t. uPoly(pow(r)) = uPoly'(pow(r))]` + where, pow(x,m) = {x^2⁰,x^2¹,....,x^2^{m-1}} + C' = CRS [F, ι, φ, m, s, w, σ] + σ = {σ₁,..,σₛ}, w = {w₁,..,wₛ}, wᵢ = Z * eqPolynomial(pow(r,m)) + -/ +lemma out_of_domain_sampling_crs_eq_rs + {f : ι → F} {m s : ℕ} {φ : ι ↪ F} [Smooth φ] + {GenFun : F → Fin s → F} (l δ : ℝ≥0) (hδLe : δ ≤ 1) + {C : Set (ι → F)} (hcode : C = smoothCode φ m ∧ listDecodable C δ l) : + Pr_{ let r ←$ᵖ F }[ (∃ σ : Fin s → F, + ∀ i : Fin s, + let ri := GenFun r i + let rVec := fun j : Fin m => ri ^ (2^(j : ℕ)) + let w : Fin s → MvPolynomial (Fin (m + 1)) F := + fun i => + MvPolynomial.X (Fin.last m) * rename Fin.castSucc (eqPolynomial (rVec)) + let multiCRSCode := multiConstrainedCode φ m s w σ + ∃ g : ι → F, g ∈ relHammingBall multiCRSCode f δ)] + = + Pr_{ let r ←$ᵖ F }[ (∃ u u' : smoothCode φ m, + u.val ≠ u'.val ∧ + u.val ∈ relHammingBall C f δ ∧ + u'.val ∈ relHammingBall C f δ ∧ + ∀ i : Fin s, + let ri := GenFun r i + let rVec := fun j : Fin m => ri ^ (2^(j : ℕ)) + (mVdecode u).eval (rVec) = (mVdecode u').eval (rVec))] + := by sorry + +/--Lemma 4.25 part 2 + Let `f : ι → F`, `m` be the number of variables, `s` be a repetition parameter + and `δ ∈ [0,1]` be a distance parameter, + if `C = RS [F, ι, m]` is `(δ,l)`-list decodable then + the above equation is bounded as `≤ l²/2 * (2ᵐ/|F|)ˢ` -/ +lemma out_of_domain_sampling_rs_le_bound + {f : ι → F} {k m s : ℕ} {φ : ι ↪ F} [Smooth φ] + {GenFun : F → Fin s → F} (δ l : ℝ≥0) (hδLe : δ ≤ 1) + (C : Set (ι → F)) (hcode : C = smoothCode φ m ∧ listDecodable C δ l) : + Pr_{ let r ←$ᵖ F }[ ∃ u u' : smoothCode φ m, + u.val ≠ u'.val ∧ + u.val ∈ relHammingBall C f δ ∧ + u'.val ∈ relHammingBall C f δ ∧ + ∀ i : Fin s, + let ri := GenFun r i + let rVec := fun j : Fin m => ri ^ (2^(j : ℕ)) + (mVdecode u).eval (rVec) = (mVdecode u').eval (rVec) + ] ≤ l^2 / 2 * ((2^m) / Fintype.card F)^s +:= by sorry + +end OutOfDomSmpl diff --git a/ArkLib/ProofSystem/Whir/ProximityGap.lean b/ArkLib/ProofSystem/Whir/ProximityGap.lean new file mode 100644 index 000000000..a8b466b09 --- /dev/null +++ b/ArkLib/ProofSystem/Whir/ProximityGap.lean @@ -0,0 +1,48 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter (Least Authority) +-/ + +import ArkLib.ProofSystem.Whir.ProximityGen +import ArkLib.Data.CodingTheory.ReedSolomon + +namespace RSGenerator + +open Generator NNReal ReedSolomon + +variable {F : Type} [Field F] [Fintype F] [DecidableEq F] + {ι : Type} [Fintype ι] [DecidableEq ι] [Nonempty ι] + +noncomputable def rate (φ : ι ↪ F) (m : ℕ) [Smooth φ] : ℝ := + LinearCode.rate (smoothCode φ m) + + +/- Theorem 4.8 [BCIKS20] Proxmity Gap Theorem + Smooth Reed Solomon codes C:= RSC[F,ι,m] have proximity generators for any given `parℓ` + with generator function Gen(parℓ) : 𝔽 → parℓ → 𝔽 ; α → (1,α, α², …, α^{parℓ - 1}), + Bstar(C,parℓ) := √ρ + err(C,parℓ,δ) := (parℓ-1)2ᵐ / ρ * |F| for δ in (0, (1-ρ)/2] + (parℓ-1)*2²ᵐ / (|F|(2 min{1-√ρ-δ, √ρ/20})⁷) + for δ in ((1-ρ)/ 2, 1 - B(C,parℓ)) -/ +noncomputable def proximityGapTheorem + (parℓ : Type) [hℓ : Fintype parℓ] (φ : ι ↪ F) [Smooth φ] + (m : ℕ) {exp : parℓ ↪ ℕ} : ProximityGenerator ι F := + { C := smoothCode φ m, + parℓ := parℓ, + hℓ := hℓ, + Fun := fun r j => r ^ (exp j), + B := fun _ _ => (Real.sqrt (rate φ m)), + err := fun _ _ δ => + ENNReal.ofReal ( + if δ ≤ (1 - (rate φ m)) / 2 then + ((Fintype.card parℓ - 1) * 2^m) / ((rate φ m) * Fintype.card F) + else + let min_val := min (1 - (Real.sqrt (rate φ m)) - δ) + ((Real.sqrt (rate φ m)) / 20) + ((Fintype.card parℓ - 1) * (2^(2 * m))) / ((Fintype.card F) * (2 * min_val)^7) + ), + proximity := by sorry + } + +end RSGenerator diff --git a/ArkLib/ProofSystem/Whir/ProximityGen.lean b/ArkLib/ProofSystem/Whir/ProximityGen.lean new file mode 100644 index 000000000..80336b4be --- /dev/null +++ b/ArkLib/ProofSystem/Whir/ProximityGen.lean @@ -0,0 +1,57 @@ +/- +Copyright (c) 2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Mirco Richter (Least Authority) +-/ + +import ArkLib.Data.CodingTheory.ReedSolomon +import ArkLib.Data.Probability.Notation + +namespace Generator + +open NNReal ProbabilityTheory + +variable {F : Type*} [Semiring F] [Fintype F] [DecidableEq F] + {ι : Type*} [Fintype ι] [Nonempty ι] + {parℓ : Type*} [Fintype parℓ] + +/-- For `l` functions `fᵢ : ι → 𝔽`, distance `δ`, generator function `GenFun: 𝔽 → parℓ → 𝔽ˡ` + and linear code `C` the predicate `proximityCondition(r)` is true, if the linear + combination f := ∑ⱼ rⱼ * fⱼ is within relative Hamming distance `δ` to the linear + code `C`. -/ +noncomputable def proximityCondition + (f : parℓ → ι → F) (δ : ℝ≥0) (GenFun : F → parℓ → F) (C : LinearCode ι F): F → Prop + | r => δᵣ( (fun x => ∑ j : parℓ, (GenFun r j) * f j x) , C ) ≤ (δ : ℝ) + + +/-- A proximity generator for a linear code `C`, Definition 4.7 -/ +structure ProximityGenerator + (ι : Type) [Fintype ι] [Nonempty ι] + (F : Type) [Semiring F] [Fintype F] [DecidableEq F] where + -- Underlying linear code + C : LinearCode ι F + -- Number of functions + parℓ : Type + hℓ : Fintype parℓ + -- Generator function maps sampled randomness `r : 𝔽 ` to `parℓ`-tuples of field elements + Fun : F → parℓ → F + -- Distance threshold parameter + B : (LinearCode ι F) → Type → ℝ + -- Error function bounding the probability of distance within `δ` + err : (LinearCode ι F) → Type → ℝ → ENNReal + /- Proximity: + For all `parℓ`-tuples of functions `fᵢ : ι → 𝔽` + and distance parameter `δ ∈ (0, 1-BStar(C,parℓ))` : + If the probability that `proximityCondition(r)` is true for uniformly random + sampled `r ← 𝔽 `, exceeds `err(C,parℓ,δ)`, then there exists a subset `S ⊆ ι ` of size + `|S| ≥ (1-δ)⬝|ι|`) on which each `fᵢ` agrees with some codeword in `C`. -/ + proximity: + ∀ (f : parℓ → ι → F) + (δ : ℝ≥0) + (_hδ : δ < 1 - (B C parℓ)) , + Pr_{ let r ← $ᵖ F }[ (proximityCondition f δ Fun C r) ] > (err C parℓ δ) → + ∃ S : Finset ι, + S.card ≥ (1 - δ) * (Fintype.card ι) ∧ + ∀ i : parℓ, ∃ u ∈ C, ∀ x ∈ S, f i x = u x + +end Generator diff --git a/ArkLib/ToVCVio/DistEq.lean b/ArkLib/ToVCVio/DistEq.lean new file mode 100644 index 000000000..f5291bbd3 --- /dev/null +++ b/ArkLib/ToVCVio/DistEq.lean @@ -0,0 +1,68 @@ +/- +Copyright (c) 2024-2025 ArkLib Contributors. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Quang Dao +-/ +import ArkLib.ToVCVio.SimOracle + +/-! + # Distributional Equality of Oracle Computations + + We define distributional equality of oracle computations (or more generally, any monad `m` with + an `HasEvalDist` instance). +-/ + +universe u v w + +open OracleComp SimOracle + +namespace HasEvalDist + +variable {m : Type u → Type v} [Monad m] [HasEvalDist m] + +def eq {α : Type u} (mx my : m α) : Prop := + evalDist mx = evalDist my + +@[simp] +theorem eq_refl {α : Type u} (mx : m α) : eq mx mx := by + simp [eq] + +theorem eq_symm {α : Type u} (mx my : m α) : eq mx my → eq my mx := by + intro i; simp_all [eq] + +theorem eq_trans {α : Type u} (mx my mz : m α) (hxy : eq mx my) (hyz : eq my mz) : eq mx mz := by + simp [eq] + rw [hxy, hyz] + +end HasEvalDist + +namespace OracleComp + +-- Shouldn't have to define this separately once we have an instance `HasEvalDist (OracleComp spec)` + +variable {ι : Type u} {spec : OracleSpec ι} [spec.FiniteRange] {α : Type u} + +def distEq (mx my : OracleComp spec α) : Prop := + evalDist mx = evalDist my + +@[simp] +theorem distEq_refl (mx : OracleComp spec α) : distEq mx mx := by + simp [distEq] + +theorem distEq_symm (mx my : OracleComp spec α) : distEq mx my → distEq my mx := by + intro i; simp_all [distEq] + +theorem distEq_trans (mx my mz : OracleComp spec α) + (hxy : distEq mx my) (hyz : distEq my mz) : distEq mx mz := by + simp [distEq] + rw [hxy, hyz] + +-- universe level issue +-- /-- Functional equality of oracle computations. This is *different* from distributional equality, +-- since the distribution of each new query when applying `evalDist` is independently random, unlike +-- a function which always returns the same value. -/ +-- def fnEquiv (oa ob : OracleComp spec α) : Prop := +-- ∀ f : (i : ι) → spec.domain i → spec.range i, +-- simulateQ (fnOracle spec f) oa = simulateQ (fnOracle spec f) ob + +end OracleComp diff --git a/ArkLib/ToVCVio/Lemmas.lean b/ArkLib/ToVCVio/Lemmas.lean new file mode 100644 index 000000000..65856fff6 --- /dev/null +++ b/ArkLib/ToVCVio/Lemmas.lean @@ -0,0 +1,47 @@ +import VCVio + +open OracleComp OracleSpec + +section simp_lemmas -- Some extra lemmas that still need to move to vcv + +universe u v w + +variable {ι : Type u} {spec : OracleSpec ι} {α β γ ω : Type u} + +@[simp] +lemma probFailure_bind_eq_zero_iff [spec.FiniteRange] + (oa : OracleComp spec α) (ob : α → OracleComp spec β) : + [⊥ | oa >>= ob] = 0 ↔ [⊥ | oa] = 0 ∧ ∀ x ∈ oa.support, [⊥ | ob x] = 0 := by + simp [probFailure_bind_eq_tsum, ← imp_iff_not_or] + +@[simp] -- TODO: more general version/class for query impls that never have failures +lemma loggingOracle.probFailure_simulateQ [spec.FiniteRange] (oa : OracleComp spec α) : + [⊥ | (simulateQ loggingOracle oa).run] = [⊥ | oa] := by + induction oa using OracleComp.induction with + | pure a => simp + | query_bind i t oa ih => simp [ih, probFailure_bind_eq_tsum] + | failure => simp + +@[simp] +lemma probFailure_liftComp {ι' : Type w} {superSpec : OracleSpec ι'} + [spec.FiniteRange] [superSpec.FiniteRange] + [h : MonadLift (OracleQuery spec) (OracleQuery superSpec)] + (oa : OracleComp spec α) : [⊥ | liftComp oa superSpec] = [⊥ | oa] := by + simp only [OracleComp.probFailure_def, OracleComp.evalDist_liftComp] + +@[simp] +lemma liftComp_support {ι' : Type w} {superSpec : OracleSpec ι'} + [h : MonadLift (OracleQuery spec) (OracleQuery superSpec)] + (oa : OracleComp spec α) : (liftComp oa superSpec).support = oa.support := by + induction oa using OracleComp.induction with + | pure a => simp + | query_bind i t oa ih => simp [ih] + | failure => simp + +-- Stub lemma for now, will be available in the next VCVio update +lemma neverFails_map_iff' (oa : OracleComp spec α) (f : α → β) : + neverFails (f <$> oa) ↔ neverFails oa := by + rw [map_eq_bind_pure_comp] + simp [neverFails, neverFailsWhen, Function.comp_apply, implies_true, and_true] + +end simp_lemmas diff --git a/ArkLib/ToVCVio/Oracle.lean b/ArkLib/ToVCVio/Oracle.lean index ddbd7997f..9a584c3e0 100644 --- a/ArkLib/ToVCVio/Oracle.lean +++ b/ArkLib/ToVCVio/Oracle.lean @@ -14,8 +14,7 @@ open OracleSpec OracleComp variable {ι : Type} {α β γ : Type} -/-- - A function that implements the oracle interface specified by `spec`, and queries no further +/-- A function that implements the oracle interface specified by `spec`, and queries no further oracles. -/ def Oracle (spec : OracleSpec ι) := (i : ι) → spec.domain i → spec.range i @@ -37,15 +36,42 @@ namespace OracleComp variable {ι : Type} {spec : OracleSpec ι} {α σ : Type} -/-- - Run an oracle computation `OracleComp spec α` with an oracle coming from +/-- Run an oracle computation `OracleComp spec α` with an oracle coming from a (deterministic) function `f` that queries no further oracles. TODO: add state for `f` -/ def runWithOracle (f : Oracle spec) : OracleComp spec α → Option α := - OracleComp.construct' (spec := spec) (C := fun _ => Option α) (fun x => some x) - (fun i q _ g => g (f i q)) (none) + OracleComp.construct' (spec := spec) (C := fun _ => Option α) + -- For a pure value, return that value successfully + (fun x => some x) + -- When a query bind is made, run the oracle function `f` and compute on the result + (fun i q _ g => g (f i q)) + -- If the computation fails, return `none` + (none) + +@[simp] +theorem runWithOracle_pure (f : Oracle spec) (a : α) : + runWithOracle f (pure a) = some a := by + unfold runWithOracle OracleComp.construct' + simp only [construct_pure] + +@[simp] +theorem runWithOracle_freeMonad_pure_some (f : Oracle spec) (a : α) : + runWithOracle f (FreeMonad.pure (a : Option α)) = a := by + exact rfl + +@[simp] +theorem runWithOracle_freeMonad_pure_none (f : Oracle spec) : + runWithOracle f (FreeMonad.pure (none : Option α)) = none := by + exact rfl + +@[simp] +theorem runWithOracle_freeMonad_pure (f : Oracle spec) (a : Option α) : + runWithOracle f (FreeMonad.pure a) = a := by + cases a with + | none => simp only [runWithOracle_freeMonad_pure_none] + | some val => simp only [runWithOracle_freeMonad_pure_some] -- Oracle with bounded use; returns `default` if the oracle is used more than `bound` times. -- We could then have the range be an `Option` type, so that `default` is `none`. diff --git a/ArkLib/ToVCVio/SimOracle.lean b/ArkLib/ToVCVio/SimOracle.lean new file mode 100644 index 000000000..bfbbe7ccf --- /dev/null +++ b/ArkLib/ToVCVio/SimOracle.lean @@ -0,0 +1,184 @@ +import VCVio + +/-! + +# Some special cases of `QueryImpl` where the monad is another (stateful) oracle computation + +TODO: figure out a better organization / naming scheme for this + +-/ + + +universe u v + +open OracleComp OracleSpec + +variable {ι ι' ιₜ : Type*} {spec : OracleSpec ι} + {spec' : OracleSpec ι'} {specₜ : OracleSpec ιₜ} + {m : Type u → Type v} {α β γ σ : Type u} + +#check SubSpec + +instance : IsEmpty (OracleQuery []ₒ α) where + false := by simp only [IsEmpty.forall_iff] + +-- TODO: revamp the `SubSpec` stuff to work for `MonadLiftT` instead of `MonadLift` +instance (priority := high) : MonadLiftT (OracleComp []ₒ) (OracleComp spec) where + monadLift := OracleComp.construct pure (fun a => by contradiction) failure + +section SimulateNeverFails + +variable [Monad m] (so : QueryImpl spec m) + +/-- Canonical lifting of a function `OracleQuery spec α → m α`, for an arbitrary monad `m`, +to a new function on computations `OracleComp spec α` that _never_ fails. -/ +def simulateQ' (oa : OracleComp spec α) (h : oa.neverFails) : m α := by + induction oa using OracleComp.construct with + | pure x => exact pure x + | query_bind q r ih => exact (do let b ← so.impl q; ih b (by simp at h; exact h b)) + | failure => simp [neverFails] at h + +@[simp] +theorem simulateQ'_pure (x : α) : simulateQ' so (pure x) (by simp) = pure x := by + simp [simulateQ'] + +@[simp] +theorem simulateQ'_query_bind (q : OracleQuery spec α) + (ob : α → OracleComp spec β) (h : ∀ x, (ob x).neverFails) : + simulateQ' so (liftM q >>= ob) (by simp [h]) = + so.impl q >>= (fun x => simulateQ' so (ob x) (h x)) := by + simp only [simulateQ', query_bind_eq_roll, OptionT.mk] + congr + +variable [LawfulMonad m] + +@[simp] +theorem simulateQ'_query (q : OracleQuery spec α) : + simulateQ' so q (by simp) = so.impl q := by + simp [simulateQ'] + +@[simp] +theorem simulateQ'_bind (oa : OracleComp spec α) (ob : α → OracleComp spec β) + -- Could potentially be weakened to `∀ x ∈ oa.support, (ob x).neverFails` + -- Would require `bindOnSupport` instead of just `bind` + (ha : oa.neverFails) (hb : ∀ x, (ob x).neverFails) : + simulateQ' so (oa >>= ob) (by simp; exact ⟨ha, fun x _ => hb x⟩) = + simulateQ' so oa ha >>= fun x ↦ simulateQ' so (ob x) (hb x) := by + induction oa using OracleComp.inductionOn with + | pure x => simp [simulateQ']; congr + | query_bind i q r ih => + simp at ih ha ⊢; rw [simulateQ'_query_bind] + · sorry + · simp; intro x; refine ⟨ha x, ?_⟩; sorry + | failure => simp [neverFails] at ha + +-- An alternate approach is just to show that `simulateQ` is not failure if `oa.neverFails`? +-- Not very well-defined since `OptionT m` pushes option _inside_ the monad + +-- theorem simulateQ_ne_none_of_neverFails [Monad m] (so : QueryImpl spec (OptionT m)) +-- (oa : OracleComp spec α) +-- (h : oa.neverFails) : simulateQ so oa ≠ Option.none := by +-- induction oa using OracleComp.construct with +-- | pure x => simp +-- | query_bind q r ih => simp [simulateQ] +-- | failure => simp [neverFails] at h + +end SimulateNeverFails + +-- We can then use the rest of the `simulateQ` lemmas +theorem simulateQ'_eq_simulateQ [AlternativeMonad m] [LawfulMonad m] + {so : QueryImpl spec m} (oa : OracleComp spec α) (h : oa.neverFails) : + simulateQ' so oa h = simulateQ so oa := by + induction oa using OracleComp.inductionOn with + | pure x => simp [simulateQ_pure] + | query_bind i q r ih => + simp at h ⊢; rw [simulateQ'_query_bind (h := h)] + conv => + enter [1, 2, x] + apply ih x (h x) + congr + | failure => simp [neverFails] at h + +@[reducible] +def SimOracle.Stateful (spec : OracleSpec ι) (specₜ : OracleSpec ιₜ) (σ : Type) := + QueryImpl spec (StateT σ (OracleComp specₜ)) + +@[reducible] +def SimOracle.Stateless (spec : OracleSpec ι) (specₜ : OracleSpec ιₜ) := + QueryImpl spec (OracleComp specₜ) + +set_option pp.universes true + +@[reducible] +def SimOracle.Impl (spec : OracleSpec ι) := SimOracle.Stateless spec []ₒ + +#print SimOracle.Stateless + +namespace SimOracle + +variable {ι₁ ι₂ ιₜ₁ ιₜ₂ : Type} {spec : OracleSpec ι} {spec₁ : OracleSpec ι₁} + {spec₂ : OracleSpec ι₂} {specₜ : OracleSpec ιₜ} {specₜ₁ : OracleSpec ιₜ₁} + {specₜ₂ : OracleSpec ιₜ₂} {σ τ α β : Type} + +variable [DecidableEq ι] + +open OracleSpec + +def fnOracle (spec : OracleSpec ι) (f : (i : ι) → spec.domain i → spec.range i) : + SimOracle.Impl spec where + impl | query i t => pure (f i t) + +def statelessOracle (baseSpec : OracleSpec ιₜ) (spec : OracleSpec ι) + (f : (i : ι) → spec.domain i → spec.range i) : + SimOracle.Stateless (baseSpec ++ₒ spec) baseSpec where + impl + | query (.inl i) t => query i t + | query (.inr i) t => pure (f i t) + +-- instance : (loggingOracle (spec := spec)).IsTracking where +-- state_indep | query _ _, _ => rfl + +def append' (so₁ : SimOracle.Stateful spec₁ specₜ₁ σ) (so₂ : SimOracle.Stateful spec₂ specₜ₂ τ) : + SimOracle.Stateful (spec₁ ++ₒ spec₂) (specₜ₁ ++ₒ specₜ₂) (σ × τ) where + impl + | query (.inl i) t => fun (s₁, s₂) ↦ do + let (u, s₁') ← so₁.impl (query i t) s₁; return (u, s₁', s₂) + | query (.inr i) t => fun (s₁, s₂) ↦ do + let (u, s₂') ← so₂.impl (query i t) s₂; return (u, s₁, s₂') + +def dedup {ι : Type} (spec : OracleSpec ι) : SimOracle.Stateless (spec ++ₒ spec) spec where + impl + | query (.inl i) t => query i t + | query (.inr i) t => query i t + +-- theorem append'_dedup (so₁ : SimOracle spec₁ specₜ σ) (so₂ : SimOracle spec₂ specₜ τ) : +-- append so₁ so₂ = (dedup specₜ ∘ₛ append' so₁ so₂).equivState (.prodPUnit _) := by +-- sorry + +-- /-- Answer all oracle queries to `oSpec` with a deterministic function `f` having the same domain +-- and range as `oSpec`. -/ +-- def fnOracle {ι : Type} (spec : OracleSpec ι) +-- (f : (i : ι) → spec.domain i → spec.range i) : SimOracle spec []ₒ PUnit := +-- statelessOracle fun (query i q) ↦ pure (f i q) + +def lift {ι₁ ι₂ ι : Type} {σ : Type} (oSpec₁ : OracleSpec ι₁) (oSpec₂ : OracleSpec ι₂) + (oSpec : OracleSpec ι) (so : SimOracle.Stateful oSpec₁ oSpec₂ σ) : + SimOracle.Stateful (oSpec ++ₒ oSpec₁) (oSpec ++ₒ oSpec₂) σ where + impl := fun q s => match q with + | query (.inl i) q => do return ⟨← query i q, s⟩ + | query (.inr i) q => so.impl (query (spec := oSpec₁) i q) s + +-- def liftLeft' {ι₁ ι₂ ι : Type} {σ : Type} {oSpec₁ : OracleSpec ι₁} {oSpec₂ : OracleSpec ι₂} +-- (oSpec : OracleSpec ι) (so : SimOracle oSpec₁ oSpec₂ σ) : +-- SimOracle (oSpec ++ₒ oSpec₁) (oSpec ++ₒ oSpec₂) σ := +-- (append' idOracle so).equivState (.punitProd σ) + +def liftLeftNil {ι : Type} {σ : Type} (oSpec : OracleSpec ι) : + SimOracle.Stateful ([]ₒ ++ₒ oSpec) oSpec σ where impl + | query (.inr i) q => fun s ↦ do return ⟨← query i q, s⟩ + +def liftRightNil {ι : Type} {σ : Type} (oSpec : OracleSpec ι) : + SimOracle.Stateful (oSpec ++ₒ []ₒ) oSpec σ where impl + | query (.inl i) q => fun s ↦ do return ⟨← query i q, s⟩ + +end SimOracle diff --git a/blueprint/lean_decls b/blueprint/lean_decls index 9c6b2cb45..3815755a1 100644 --- a/blueprint/lean_decls +++ b/blueprint/lean_decls @@ -1,30 +1,193 @@ -ProtocolSpec OracleInterface +ProtocolSpec +ProtocolSpec.getDir +ProtocolSpec.getType +ProtocolSpec.MessageIdx +ProtocolSpec.ChallengeIdx +ProtocolSpec.Message +ProtocolSpec.Challenge ProtocolSpec.Transcript ProtocolSpec.Message ProtocolSpec.Challenge Prover +ProverState +ProverIn +ProverRound +ProverOut +OracleProver Verifier +OracleVerifier +OracleVerifier.toVerifier +Reduction OracleReduction +Prover.runToRound +Prover.processRound +Prover.run +Verifier.run +OracleVerifier.run +Reduction.run OracleReduction.run Reduction.completeness +Reduction.perfectCompleteness +Extractor.Straightline +Extractor.RoundByRound +Extractor.Rewinding +Prover.StateRestoration Verifier.soundness Verifier.knowledgeSoundness Verifier.StateFunction +Verifier.KnowledgeStateFunction Verifier.rbrSoundness Verifier.rbrKnowledgeSoundness +Verifier.Extractor.Straightline.IsMonotone +Extractor.RoundByRound.IsMonotone +Verifier.knowledgeSoundness_implies_soundness +Verifier.rbrSoundness_implies_soundness +Verifier.rbrKnowledgeSoundness_implies_rbrSoundness +Verifier.rbrKnowledgeSoundness_implies_knowledgeSoundness +Reduction.Simulator +OracleReduction.completeness +OracleVerifier.soundness +OracleVerifier.knowledgeSoundness +OracleVerifier.rbrSoundness +OracleVerifier.rbrKnowledgeSoundness +ProtocolSpec.append +ProtocolSpec.FullTranscript.append +Prover.append +Verifier.append +Reduction.append +OracleReduction.append +ProtocolSpec.seqCompose +Prover.seqCompose +Verifier.seqCompose +Reduction.seqCompose +Reduction.completeness_append +Reduction.perfectCompleteness_append +Verifier.append_soundness +Verifier.append_knowledgeSoundness +Verifier.append_rbrSoundness +Verifier.append_rbrKnowledgeSoundness +Reduction.completeness_seqCompose +Verifier.seqCompose_soundness +Verifier.seqCompose_knowledgeSoundness +Statement.Lens +Witness.Lens +Context.Lens +OracleContext.Lens +Prover.liftContext +Verifier.liftContext +Reduction.liftContext +Context.Lens.IsComplete +Statement.Lens.IsSound +Statement.Lens.IsSound +Extractor.Lens.IsKnowledgeSound +Reduction.liftContext_completeness +Verifier.liftContext_soundness +Verifier.liftContext_knowledgeSoundness +Verifier.liftContext_rbr_soundness +Verifier.liftContext_rbr_knowledgeSoundness +Extractor.Straightline.liftContext +Extractor.RoundByRound.liftContext +Verifier.StateFunction.liftContext +ProtocolSpec.fiatShamirSpec +Prover.processRoundFS +Prover.runToRoundFS +Prover.fiatShamir +ProtocolSpec.Messages.deriveTranscriptFS +Verifier.fiatShamir +Reduction.fiatShamir +fiatShamir_completeness +DoNothing.reduction +DoNothing.oracleReduction +DoNothing.reduction_completeness +DoNothing.oracleReduction_completeness +SendWitness.reduction +SendWitness.reduction_completeness +SendSingleWitness.oracleReduction +SendSingleWitness.oracleReduction_completeness +RandomQuery.oracleReduction +RandomQuery.oracleReduction_completeness +RandomQuery.rbr_knowledge_soundness +SendClaim.oracleReduction +SendClaim.completeness +ReduceClaim.reduction +ReduceClaim.reduction_completeness +ReduceClaim.oracleReduction +CheckClaim.reduction +CheckClaim.reduction_completeness +CheckClaim.oracleReduction +CheckClaim.oracleReduction_completeness +BinaryTower.BTField +BinaryTower.polyIrreducible +BinaryTower.BTFieldIsField +BinaryTower.BTFieldCard +BinaryTower.BTFieldChar2 +BinaryTower.ConcreteDefinition.instFieldConcrete +proximity_gap +Quotienting.funcQuotient +Quotienting.polyQuotient +Quotienting.quotienting +OutOfDomSmpl.out_of_dom_smpl_1 +OutOfDomSmpl.out_of_dom_smpl_2 +Folding.exists_unique_bivariate +Folding.degree_bound_bivariate +Folding.polyFold +Folding.fold +Folding.folding +Combine.geometric_sum_units +Combine.combineInterm +Combine.combineFinal +Combine.degreeCorrInterm +Combine.degreeCorrFinal +Combine.combine +StirIOP.stir_main +StirIOP.stir_rbr_soundness +Generator.ProximityGenerator +RSGenerator.proximityGapTheorem +CorrelatedAgreement.genMutualCorrAgreement +CorrelatedAgreement.genMutualCorrAgreement_le_bound +CorrelatedAgreement.genMutualCorrAgreement_rsc_le_bound +CorrelatedAgreement.genMutualCorrAgreement_le_johnsonBound +CorrelatedAgreement.genMutualCorrAgreement_le_capacity +CorrelatedAgreement.mutualCorrAgreement_list_decoding +Fold.extract_x +Fold.foldf +Fold.fold_k_core +Fold.fold_k +Fold.fold_k_set +Fold.fold_f_g +BlockRelDistance.block +BlockRelDistance.blockRelDistance +BlockRelDistance.minBlockRelDistance +BlockRelDistance.listBlockRelDistance +BlockRelDistance.blockRelDistance_le_hammingDistance +Fold.folding_listdecoding_if_genMutualCorrAgreement +Fold.folding_preserves_listdecoding_base +Fold.folding_preserves_listdecoding_bound +Fold.folding_preserves_listdecoding_base_ne_subset +OutOfDomSmpl.crs_equiv_rs_randpompt_agreement +OutOfDomSmpl.out_of_domain_sampling_crs_eq_rs +WhirIOP.whir_rbr_soundness MvPolynomial.MLE MvPolynomial.schwartz_zippel_sum_degreeOf UniPoly MlPoly -codeDist -distFromCode -codeByGenMatrix -codeByCheckMatrix -interleaveCode +Code.dist +Code.distFromCode +LinearCode.fromRowGenMat +LinearCode.fromColGenMat +LinearCode.byCheckMatrix +ListDecodable.Code +LinearCode +InterleavedCode ReedSolomon.code +smoothCode +constrainedCode +multiConstrainedCode proximityMeasure proximityGap +ListDecodable.listDecodable +ListDecodable.relHammingBall OracleSpec emptySpec singletonSpec diff --git a/blueprint/print/print.pdf b/blueprint/print/print.pdf index 713866d98..c2193d408 100644 Binary files a/blueprint/print/print.pdf and b/blueprint/print/print.pdf differ diff --git a/blueprint/src/coding_theory/defs.tex b/blueprint/src/coding_theory/defs.tex index 0dd99d841..8456d287b 100644 --- a/blueprint/src/coding_theory/defs.tex +++ b/blueprint/src/coding_theory/defs.tex @@ -4,27 +4,37 @@ \section{Coding Theory}\label{sec:coding_theory} \begin{definition}[Code Distance] \label{def:code_distance} - \lean{codeDist} + \lean{Code.dist} \end{definition} \begin{definition}[Distance from a Code] \label{def:distance_from_code} - \lean{distFromCode} + \lean{Code.distFromCode} \end{definition} \begin{definition}[Generator Matrix] \label{def:generator_matrix} - \lean{codeByGenMatrix} + \lean{LinearCode.fromRowGenMat, LinearCode.fromColGenMat} \end{definition} \begin{definition}[Parity Check Matrix] \label{def:parity_check_matrix} - \lean{codeByCheckMatrix} + \lean{LinearCode.byCheckMatrix} +\end{definition} + +\begin{definition}[Code] + \label{def:code} + \lean{ListDecodable.Code} +\end{definition} + +\begin{definition}[Linear Code] + \label{def:linear_code} + \lean{LinearCode} \end{definition} \begin{definition}[Interleaved Code] \label{def:interleaved_code} - \lean{interleaveCode} + \lean{InterleavedCode} \end{definition} \begin{definition}[Reed-Solomon Code] @@ -32,6 +42,21 @@ \section{Coding Theory}\label{sec:coding_theory} \lean{ReedSolomon.code} \end{definition} +\begin{definition}[Smooth Reed-Solomon Code] + \label{def:smooth_rs_code} + \lean{smoothCode} +\end{definition} + +\begin{definition}[Constrained Code] + \label{def:constrained_code} + \lean{constrainedCode} +\end{definition} + +\begin{definition}[Multi-constrained Code] + \label{def:multi_constrained_code} + \lean{multiConstrainedCode} +\end{definition} + \begin{definition}[Proximity Measure] \label{def:proximity_measure} \lean{proximityMeasure} @@ -43,3 +68,15 @@ \section{Coding Theory}\label{sec:coding_theory} \lean{proximityGap} \uses{def:distance_from_code} \end{definition} + +\begin{definition}[List Decodability] + \label{def:list_decodable} + \lean{ListDecodable.listDecodable} + \uses{def:distance_from_code,def:code} +\end{definition} + +\begin{definition}[List of Close Codewords] + \label{def:list_close_codewords} + \lean{ListDecodable.relHammingBall} + \uses{def:distance_from_code,def:code} +\end{definition} diff --git a/blueprint/src/content.tex b/blueprint/src/content.tex index dd3e71acf..870e6accd 100644 --- a/blueprint/src/content.tex +++ b/blueprint/src/content.tex @@ -20,12 +20,22 @@ \chapter{Oracle Reductions}\label{chap:oracle_reductions} \input{oracle_reductions/composition} +\input{oracle_reductions/fiat_shamir} + \chapter{Proof Systems}\label{chap:proof_systems} \input{proof_systems/simple_protocols} \input{proof_systems/sumcheck} +\input{proof_systems/binius} + +\input{proof_systems/spartan} + +\input{proof_systems/stir} + +\input{proof_systems/whir} + \section{The Spartan Protocol} \section{The Ligero Polynomial Commitment Scheme} @@ -49,4 +59,3 @@ \chapter{Supporting Theories}\label{chap:supporting_theories} \input{vcv/defs} \chapter{References}\label{chap:references} - diff --git a/blueprint/src/figures/spartan-description.png b/blueprint/src/figures/spartan-description.png new file mode 100644 index 000000000..122784fe6 Binary files /dev/null and b/blueprint/src/figures/spartan-description.png differ diff --git a/blueprint/src/macros/common.tex b/blueprint/src/macros/common.tex index 249d25712..67976e01f 100644 --- a/blueprint/src/macros/common.tex +++ b/blueprint/src/macros/common.tex @@ -4,7 +4,7 @@ % The theorem-like environments defined below are those that appear by default % in the dependency graph. See the README of leanblueprint if you need help to -% customize this. +% customize this. % The configuration below use the theorem counter for all those environments % (this is what the [theorem] arguments mean) and never resets it. % If you want for instance to number them within chapters then you can add @@ -21,9 +21,52 @@ \newcommand{\Type}{\mathsf{Type}} \newcommand{\Dir}{\mathsf{Dir}} \newcommand{\OI}{\mathsf{OI}} -\newcommand{\PSpec}{\mathsf{PSpec}} -\newcommand{\OSpec}{\mathsf{OSpec}} -\newcommand{\OComp}{\mathsf{OComp}} +\newcommand{\PSpec}{\mathsf{ProtocolSpec}} +\newcommand{\ProtocolSpec}{\mathsf{ProtocolSpec}} +\newcommand{\OracleInterface}{\mathsf{OracleInterface}} +\newcommand{\OracleSpec}{\mathsf{OracleSpec}} +\newcommand{\OracleComp}{\mathsf{OracleComp}} + +\newcommand{\pSpec}{\mathsf{pSpec}} +\newcommand{\oSpec}{\mathsf{oSpec}} + +% Custom command for quotient type notation +\newcommand{\quotient}{\mathbin{/\!/}} + +% Common type shortcuts +\newcommand{\Fin}{\mathsf{Fin}} +\newcommand{\Unit}{\mathsf{Unit}} +\newcommand{\Bool}{\mathsf{Bool}} +\newcommand{\Prop}{\mathsf{Prop}} + +% Protocol and oracle related shortcuts +\newcommand{\Direction}{\mathsf{Direction}} +\newcommand{\PtoV}{\mathsf{P\_to\_V}} +\newcommand{\VtoP}{\mathsf{V\_to\_P}} +\newcommand{\MessageIdx}{\mathsf{MessageIdx}} +\newcommand{\ChallengeIdx}{\mathsf{ChallengeIdx}} +\newcommand{\Message}{\mathsf{Message}} +\newcommand{\Challenge}{\mathsf{Challenge}} +\newcommand{\Messages}{\mathsf{Messages}} +\newcommand{\Challenges}{\mathsf{Challenges}} +\newcommand{\Transcript}{\mathsf{Transcript}} +\newcommand{\FullTranscript}{\mathsf{FullTranscript}} +\newcommand{\Prover}{\mathsf{Prover}} +\newcommand{\Verifier}{\mathsf{Verifier}} +\newcommand{\OracleVerifier}{\mathsf{OracleVerifier}} +\newcommand{\OracleProver}{\mathsf{OracleProver}} +\newcommand{\Reduction}{\mathsf{Reduction}} +\newcommand{\OracleReduction}{\mathsf{OracleReduction}} + +% Oracle computation shortcuts +\newcommand{\OracleQuery}{\mathsf{OracleQuery}} +\newcommand{\query}{\mathsf{query}} +\newcommand{\pure}{\mathsf{pure}} +\newcommand{\bind}{\mathsf{bind}} + +% Common mathematical operators and relations +\newcommand{\defeq}{\mathrel{:=}} +\newcommand{\eqdef}{\mathrel{=:}} \newcommand{\B}{\mathbb{B}} \newcommand{\F}{\mathbb{F}} @@ -33,6 +76,60 @@ \newcommand{\Q}{\mathbb{Q}} \newcommand{\C}{\mathbb{C}} +\newcommand{\bba}{\mathbbm{a}} +\newcommand{\bbb}{\mathbbm{b}} +\newcommand{\bbc}{\mathbbm{c}} +\newcommand{\bbd}{\mathbbm{d}} +\newcommand{\bbe}{\mathbbm{e}} +\newcommand{\bbf}{\mathbbm{f}} +\newcommand{\bbg}{\mathbbm{g}} +\newcommand{\bbh}{\mathbbm{h}} +\newcommand{\bbi}{\mathbbm{i}} +\newcommand{\bbj}{\mathbbm{j}} +\newcommand{\bbk}{\mathbbm{k}} +\newcommand{\bbl}{\mathbbm{l}} +\newcommand{\bbm}{\mathbbm{m}} +\newcommand{\bbn}{\mathbbm{n}} +\newcommand{\bbo}{\mathbbm{o}} +\newcommand{\bbp}{\mathbbm{p}} +\newcommand{\bbq}{\mathbbm{q}} +\newcommand{\bbr}{\mathbbm{r}} +\newcommand{\bbs}{\mathbbm{s}} +\newcommand{\bbt}{\mathbbm{t}} +\newcommand{\bbu}{\mathbbm{u}} +\newcommand{\bbv}{\mathbbm{v}} +\newcommand{\bbw}{\mathbbm{w}} +\newcommand{\bbx}{\mathbbm{x}} +\newcommand{\bby}{\mathbbm{y}} +\newcommand{\bbz}{\mathbbm{z}} + +\newcommand{\bbA}{\mathbb{A}} +\newcommand{\bbB}{\mathbb{B}} +\newcommand{\bbC}{\mathbb{C}} +\newcommand{\bbD}{\mathbb{D}} +\newcommand{\bbE}{\mathbb{E}} +\newcommand{\bbF}{\mathbb{F}} +\newcommand{\bbG}{\mathbb{G}} +\newcommand{\bbH}{\mathbb{H}} +\newcommand{\bbI}{\mathbb{I}} +\newcommand{\bbJ}{\mathbb{J}} +\newcommand{\bbK}{\mathbb{K}} +\newcommand{\bbL}{\mathbb{L}} +\newcommand{\bbM}{\mathbb{M}} +\newcommand{\bbN}{\mathbb{N}} +\newcommand{\bbO}{\mathbb{O}} +\newcommand{\bbP}{\mathbb{P}} +\newcommand{\bbQ}{\mathbb{Q}} +\newcommand{\bbR}{\mathbb{R}} +\newcommand{\bbS}{\mathbb{S}} +\newcommand{\bbT}{\mathbb{T}} +\newcommand{\bbU}{\mathbb{U}} +\newcommand{\bbV}{\mathbb{V}} +\newcommand{\bbW}{\mathbb{W}} +\newcommand{\bbX}{\mathbb{X}} +\newcommand{\bbY}{\mathbb{Y}} +\newcommand{\bbZ}{\mathbb{Z}} + \newcommand{\calA}{\mathcal{A}} \newcommand{\calB}{\mathcal{B}} \newcommand{\calC}{\mathcal{C}} @@ -59,3 +156,103 @@ \newcommand{\calX}{\mathcal{X}} \newcommand{\calY}{\mathcal{Y}} \newcommand{\calZ}{\mathcal{Z}} + +% Additional protocol and oracle shortcuts from Basic.lean +\newcommand{\ProverState}{\mathsf{ProverState}} +\newcommand{\ProverIn}{\mathsf{ProverIn}} +\newcommand{\ProverRound}{\mathsf{ProverRound}} +\newcommand{\ProverOut}{\mathsf{ProverOut}} +\newcommand{\NonAdaptive}{\mathsf{NonAdaptive}} + +% Protocol specification components +\newcommand{\MessagesUpTo}{\mathsf{MessagesUpTo}} +\newcommand{\ChallengesUpTo}{\mathsf{ChallengesUpTo}} +\newcommand{\MessageIdxUpTo}{\mathsf{MessageIdxUpTo}} +\newcommand{\ChallengeIdxUpTo}{\mathsf{ChallengeIdxUpTo}} + +% Non-interactive protocol components +\newcommand{\NonInteractiveProver}{\mathsf{NonInteractiveProver}} +\newcommand{\NonInteractiveVerifier}{\mathsf{NonInteractiveVerifier}} +\newcommand{\NonInteractiveReduction}{\mathsf{NonInteractiveReduction}} + +% Common Lean types and functions +\newcommand{\List}{\mathsf{List}} +\newcommand{\Option}{\mathsf{Option}} +\newcommand{\Sum}{\mathsf{Sum}} +\newcommand{\Prod}{\mathsf{Prod}} +% \Sigma and \Pi already defined as mathematical symbols +\newcommand{\LeanSigma}{\mathsf{Sigma}} +\newcommand{\LeanPi}{\mathsf{Pi}} + +% Oracle computation related +\newcommand{\simulateQ}{\mathsf{simulateQ}} +\newcommand{\liftM}{\mathsf{liftM}} + +% Handle \mathbbm for plastex compatibility +% plastex doesn't support \mathbbm, so we ensure it falls back to \mathbb +\providecommand{\mathbbm}{\mathbb} + +% Note: For PDF compilation, bbm package in packages.tex will override this +% For web compilation with plastex, this provides the fallback + +% Protocol flow notation (from Basic.lean) +\newcommand{\PtoVmsg}[1]{\mathcal{P}——⟦#1⟧⟶\mathcal{V}} +\newcommand{\VtoPmsg}[1]{\mathcal{P}⟵⟦#1⟧——\mathcal{V}} + +% Security properties +\newcommand{\completeness}{\mathsf{completeness}} +\newcommand{\soundness}{\mathsf{soundness}} +\newcommand{\knowledgeSoundness}{\mathsf{knowledgeSoundness}} +\newcommand{\rbrSoundness}{\mathsf{rbrSoundness}} +\newcommand{\rbrKnowledgeSoundness}{\mathsf{rbrKnowledgeSoundness}} +\newcommand{\StateFunction}{\mathsf{StateFunction}} + +% Direction notation shortcuts +\newcommand{\PtoVdir}{\mathsf{P \!\!\rightarrow\!\! V}} +\newcommand{\VtoPdir}{\mathsf{V \!\!\rightarrow\!\! P}} + +\newcommand{\Statement}{\mathsf{Statement}} +\newcommand{\StmtIn}{\mathsf{StmtIn}} +\newcommand{\StmtOut}{\mathsf{StmtOut}} +\newcommand{\Witness}{\mathsf{Witness}} +\newcommand{\WitIn}{\mathsf{WitIn}} +\newcommand{\WitOut}{\mathsf{WitOut}} +\newcommand{\OStatement}{\mathsf{OStatement}} +\newcommand{\OStmtIn}{\mathsf{OStmtIn}} +\newcommand{\OStmtOut}{\mathsf{OStmtOut}} + +% Composition and lifting +\newcommand{\append}{\mathsf{append}} +\newcommand{\compose}{\mathsf{compose}} +\newcommand{\liftContext}{\mathsf{liftContext}} +\newcommand{\ContextLens}{\mathsf{Context.Lens}} +\newcommand{\StatementLens}{\mathsf{Statement.Lens}} +\newcommand{\WitnessLens}{\mathsf{Witness.Lens}} +\newcommand{\OracleContextLens}{\mathsf{OracleContext.Lens}} +\newcommand{\projStmt}{\mathsf{projStmt}} +\newcommand{\liftStmt}{\mathsf{liftStmt}} +\newcommand{\projWit}{\mathsf{projWit}} +\newcommand{\liftWit}{\mathsf{liftWit}} +\newcommand{\ContextLensIsComplete}{\mathsf{Context.Lens.IsComplete}} +\newcommand{\StatementLensIsSound}{\mathsf{Statement.Lens.IsSound}} +\newcommand{\StatementLensIsRBRSound}{\mathsf{Statement.Lens.IsSound}} +\newcommand{\ContextLensIsKnowledgeSound}{\mathsf{Context.Lens.IsKnowledgeSound}} +\newcommand{\ContextLensIsRBRKnowledgeSound}{\mathsf{Context.Lens.IsRBRKnowledgeSound}} + +% coding theory related parameters +\newcommand{\code}{\mathcal{C}} +\newcommand{\listcode}{\mathsf{List}} +\newcommand{\field}{\mathbb{F}} +\newcommand{\evaldomain}{\mathcal{L}} +\newcommand{\degree}{d} +\newcommand{\rscode}{\mathrm{RS}} +\newcommand{\crscode}{\mathrm{CRS}} +\newcommand{\rate}{\rho} +\newcommand{\distance}{\delta} +\newcommand{\err}{\mathsf{err}} +\newcommand{\bound}{\mathsf{B}} +\newcommand{\polydeg}{\mathsf{deg}} +\newcommand{\combine}{\mathsf{Combine}} +\newcommand{\degcorr}{\mathsf{DegCor}} +\newcommand{\parl}{\mathsf{par}\ell} +\newcommand{\gen}{\mathsf{Gen}} diff --git a/blueprint/src/macros/packages.tex b/blueprint/src/macros/packages.tex index aeb2fdafb..a9f92a022 100644 --- a/blueprint/src/macros/packages.tex +++ b/blueprint/src/macros/packages.tex @@ -1,4 +1,6 @@ \usepackage{amssymb, amsthm, mathrsfs, mathtools} +\usepackage{bbm} +\usepackage{dsfont} % [unicode,colorlinks=true,linkcolor=blue,urlcolor=magenta, citecolor=blue] \usepackage{hyperref} \usepackage{cleveref} @@ -7,5 +9,6 @@ \usepackage{listings} \usepackage{tikz} \usepackage{tikz-cd} +\usepackage{xspace} % \usepackage{mathpartir} -\usepackage{comment} \ No newline at end of file +\usepackage{comment} diff --git a/blueprint/src/oracle_reductions/composition.tex b/blueprint/src/oracle_reductions/composition.tex index 5bc2530c2..39ac2d3d5 100644 --- a/blueprint/src/oracle_reductions/composition.tex +++ b/blueprint/src/oracle_reductions/composition.tex @@ -1,171 +1,495 @@ -\section{A Program Logic for Oracle Reductions} - -In this section, we describe a program logic for reasoning about oracle reductions. In other words, we define a number of rules (or theorems) that govern how oracle reductions can be composed to form larger reductions, and how the resulting reduction inherits the security properties of the components. - -The first group of rules changes relations and shared oracles. - -\subsection{Changing Relations and Oracles} - -Here we express the consequence rule. Namely, if we have an oracle reduction for -\(\mathcal{R}_1 \implies \mathcal{R}_2\), along with -\(\mathcal{R}_1' \implies \mathcal{R}_1\) and \(\mathcal{R}_2 \implies \mathcal{R}_2'\), -then we obtain an oracle reduction for \(\mathcal{R}_1' \implies \mathcal{R}_2'\). - -\[ -\frac{% - \begin{array}{c} - \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\}\; \langle \mathcal{P},\, \mathcal{V},\, \mathcal{E}\rangle^{\mathcal{O}} : \tau \;\{\!\!\{\mathcal{R}_2; \,\mathsf{St};\, \epsilon\}\!\!\} \\[1.5ex] - \mathcal{R}_1' \implies \mathcal{R}_1 \\[1.5ex] - \mathcal{R}_2 \implies \mathcal{R}_2' - \end{array}% -}{% - \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1'\}\; \langle \mathcal{P},\, \mathcal{V},\, \mathcal{E}\rangle^{\mathcal{O}} : \tau \;\{\!\!\{\mathcal{R}_2'; \,\mathsf{St};\, \epsilon\}\!\!\} -} \quad \text{(Conseq)} -\] - -% Lifting shared oracles: if $\mathcal{O}_1 \subset \mathcal{O}_2$, then reductions with $\mathcal{O}_1$ lift to reductions with $\mathcal{O}_2$ with the same security properties - -% Frame rule -\[ -\frac{% - \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} -}{% - \Psi; \Theta; \Sigma \vdash \{\mathcal{R} \times \mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R} \times \mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} -} \quad \text{(Frame)} -\] - -% Oracle lifting rule -\[ -\frac{% - \begin{array}{c} - \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}_1} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} \\[1.5ex] - \mathcal{O}_1 \subset \mathcal{O}_2 - \end{array}% -}{% - \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}_2} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} -} \quad \text{(Oracle-Lift)} -\] - -TODO: figure out how the state function needs to change for these rules (they are basically the same, but not exactly) - -\subsection{Sequential Composition} - -The reason why we consider interactive (oracle) reductions at the core of our formalism is that we -can \emph{compose} these reductions to form larger reductions. Equivalently, we can take a complex -\emph{interactive (oracle) proof} (which differs only in that it reduces a relation to the -\emph{trivial} relation that always outputs true) and break it down into a series of smaller -reductions. The advantage of this approach is that we can prove security properties (completeness -and soundness) for each of the smaller reductions, and these properties will automatically transfer -to the larger reductions. - -This section is devoted to the composition of interactive (oracle) reductions, and proofs that the -resulting reductions inherit the security properties of the two (or more) constituent reductions. - -Sequential composition can be expressed as the folowing rule: - -\[ -\frac{% - \begin{array}{c} - \Psi; \Theta; \varSigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}_1, \mathcal{V}_1, \mathcal{E}_1\rangle^{\mathcal{O}} : \tau_1 \; \{\!\!\{\mathcal{R}_2; \mathsf{St}_1; \epsilon_1\}\!\!\} \\[1.5ex] - \Psi; (\Theta :: \tau_1) ; \varSigma \vdash \{\mathcal{R}_2\} \; \langle\mathcal{P}_2, \mathcal{V}_2, \mathcal{E}_2\rangle^{\mathcal{O}} : \tau_2 \; \{\!\!\{\mathcal{R}_3; \mathsf{St}_2; \epsilon_2\}\!\!\} - \end{array}% -}{% - \Psi; (\Theta :: \tau_1 :: \tau_2) \ ; \varSigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}_1 \circ \mathcal{P}_2, \mathcal{V}_1 \circ \mathcal{V}_2, \mathcal{E}_1 \circ_{\mathcal{V}_2} \mathcal{E}_2\rangle^{\mathcal{O}} : \tau_1 \oplus \tau_2 \; \{\!\!\{\mathcal{R}_3; \mathsf{St}_1 \oplus \mathsf{St}_2; \epsilon_1 \oplus \epsilon_2\}\!\!\} -} \quad \text{(Seq-Comp)} -\] - - -% \begin{definition}[Sequential Composition of Protocol Type Signatures] -% \label{def:protocol_spec_composition} -% \lean{ProtocolSpec.append} -% \end{definition} +\section{Composition of Oracle Reductions}\label{sec:composition_oracle_reductions} -% \begin{definition}[Sequential Composition of Provers] -% \label{def:prover_composition} -% \lean{Prover.append} -% \end{definition} +In this section, we describe a suite of composition operators for building secure oracle reductions from simpler secure components. In other words, we define a number of definitions that govern how oracle reductions can be composed to form larger reductions, and how the resulting reduction inherits the security properties of the components. -% \begin{definition}[Sequential Composition of Oracle Verifiers] -% \label{def:oracle_verifier_composition} -% \lean{OracleVerifier.append} -% \end{definition} +% The first group of rules changes relations and shared oracles. -% \begin{definition}[Sequential Composition of Oracle Reductions] -% \label{def:oracle_reduction_composition} -% \lean{Reduction.append} -% \end{definition} +% \subsection{Changing Relations and Oracles} -\subsection{Virtualization} +% Here we express the consequence rule. Namely, if we have an oracle reduction for +% \(\mathcal{R}_1 \implies \mathcal{R}_2\), along with +% \(\mathcal{R}_1' \implies \mathcal{R}_1\) and \(\mathcal{R}_2 \implies \mathcal{R}_2'\), +% then we obtain an oracle reduction for \(\mathcal{R}_1' \implies \mathcal{R}_2'\). -Another tool we will repeatedly use is the ability to change the context of an oracle reduction. This is often needed when we want to adapt an oracle reduction in a simple context into one for a more complex context. +% \[ +% \frac{% +% \begin{array}{c} +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\}\; \langle \mathcal{P},\, \mathcal{V},\, \mathcal{E}\rangle^{\mathcal{O}} : \tau \;\{\!\!\{\mathcal{R}_2; \,\mathsf{St};\, \epsilon\}\!\!\} \\[1.5ex] +% \mathcal{R}_1' \implies \mathcal{R}_1 \\[1.5ex] +% \mathcal{R}_2 \implies \mathcal{R}_2' +% \end{array}% +% }{% +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1'\}\; \langle \mathcal{P},\, \mathcal{V},\, \mathcal{E}\rangle^{\mathcal{O}} : \tau \;\{\!\!\{\mathcal{R}_2'; \,\mathsf{St};\, \epsilon\}\!\!\} +% } \quad \text{(Conseq)} +% \] -See the section on sum-check~\ref{sec:sumcheck} for an example. +% % Lifting shared oracles: if $\mathcal{O}_1 \subset \mathcal{O}_2$, then reductions with $\mathcal{O}_1$ lift to reductions with $\mathcal{O}_2$ with the same security properties -\begin{definition}[Mapping into Virtual Context] - \label{def:virtual_context_mapping} - In order to apply an oracle reduction on virtual data, we will need to provide a mapping from the current context to the virtual context. This includes: - \begin{itemize} - \item A mapping from the current public inputs to the virtual public inputs. - \item A simulation of the oracle inputs for the virtual context using the public and oracle - inputs for the current context. - \item A mapping from the current private inputs to the virtual private inputs. - \item A simulation of the shared oracle for the virtual context using the shared oracle for - the current context. - \end{itemize} +% % Frame rule +% \[ +% \frac{% +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} +% }{% +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R} \times \mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R} \times \mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} +% } \quad \text{(Frame)} +% \] + +% % Oracle lifting rule +% \[ +% \frac{% +% \begin{array}{c} +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}_1} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} \\[1.5ex] +% \mathcal{O}_1 \subset \mathcal{O}_2 +% \end{array}% +% }{% +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}_2} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} +% } \quad \text{(Oracle-Lift)} +% \] + +% TODO: figure out how the state function needs to change for these rules (they are basically the same, but not exactly) + +% \subsection{Sequential Composition} + +% The reason why we consider interactive (oracle) reductions at the core of our formalism is that we +% can \emph{compose} these reductions to form larger reductions. Equivalently, we can take a complex +% \emph{interactive (oracle) proof} (which differs only in that it reduces a relation to the +% \emph{trivial} relation that always outputs true) and break it down into a series of smaller +% reductions. The advantage of this approach is that we can prove security properties (completeness +% and soundness) for each of the smaller reductions, and these properties will automatically transfer +% to the larger reductions. + +% This section is devoted to the composition of interactive (oracle) reductions, and proofs that the +% resulting reductions inherit the security properties of the two (or more) constituent reductions. + +% Sequential composition can be expressed as the folowing rule: + +% \[ +% \frac{% +% \begin{array}{c} +% \Psi; \Theta; \varSigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}_1, \mathcal{V}_1, \mathcal{E}_1\rangle^{\mathcal{O}} : \tau_1 \; \{\!\!\{\mathcal{R}_2; \mathsf{St}_1; \epsilon_1\}\!\!\} \\[1.5ex] +% \Psi; (\Theta :: \tau_1) ; \varSigma \vdash \{\mathcal{R}_2\} \; \langle\mathcal{P}_2, \mathcal{V}_2, \mathcal{E}_2\rangle^{\mathcal{O}} : \tau_2 \; \{\!\!\{\mathcal{R}_3; \mathsf{St}_2; \epsilon_2\}\!\!\} +% \end{array}% +% }{% +% \Psi; (\Theta :: \tau_1 :: \tau_2) \ ; \varSigma \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}_1 \circ \mathcal{P}_2, \mathcal{V}_1 \circ \mathcal{V}_2, \mathcal{E}_1 \circ_{\mathcal{V}_2} \mathcal{E}_2\rangle^{\mathcal{O}} : \tau_1 \oplus \tau_2 \; \{\!\!\{\mathcal{R}_3; \mathsf{St}_1 \oplus \mathsf{St}_2; \epsilon_1 \oplus \epsilon_2\}\!\!\} +% } \quad \text{(Seq-Comp)} +% \] + +% % \begin{definition}[Sequential Composition of Protocol Type Signatures] +% % \label{def:protocol_spec_composition} +% % \lean{ProtocolSpec.append} +% % \end{definition} + +% % \begin{definition}[Sequential Composition of Provers] +% % \label{def:prover_composition} +% % \lean{Prover.append} +% % \end{definition} + +% % \begin{definition}[Sequential Composition of Oracle Verifiers] +% % \label{def:oracle_verifier_composition} +% % \lean{OracleVerifier.append} +% % \end{definition} + +% % \begin{definition}[Sequential Composition of Oracle Reductions] +% % \label{def:oracle_reduction_composition} +% % \lean{Reduction.append} +% % \end{definition} + +\subsection{Sequential Composition}\label{sec:sequential_composition} + +Sequential composition allows us to chain together oracle reductions where the output context of one reduction becomes the input context of the next reduction. This is fundamental for building complex protocols from simpler components. + +\subsubsection{Composition of Protocol Specifications} + +We begin by defining how to compose protocol specifications and their associated structures. + +\begin{definition}[Protocol Specification Append] + \label{def:protocol_spec_append} + Given two protocol specifications $\pSpec_1 : \ProtocolSpec\ m$ and $\pSpec_2 : \ProtocolSpec\ n$, their sequential composition is: + \[ \pSpec_1 \mathrel{++_\mathsf{p}} \pSpec_2 : \ProtocolSpec\ (m + n) \] + \lean{ProtocolSpec.append} + \uses{def:protocol_spec} +\end{definition} + +\begin{definition}[Full Transcript Append] + \label{def:transcript_append} + Given full transcripts $T_1 : \FullTranscript\ \pSpec_1$ and $T_2 : \FullTranscript\ \pSpec_2$, their sequential composition is: + \[ T_1 \mathrel{++_\mathsf{t}} T_2 : \FullTranscript\ (\pSpec_1 \mathrel{++_\mathsf{p}} \pSpec_2) \] + \lean{ProtocolSpec.FullTranscript.append} + \uses{def:transcript} \end{definition} -\begin{definition}[Virtual Oracle Reduction] - \label{def:virtual_oracle_reduction} - Given a suitable mapping into a virtual context, we may define an oracle reduction via the following construction: +\subsubsection{Composition of Provers and Verifiers} + +\begin{definition}[Prover Append] + \label{def:prover_append} + Given provers $P_1 : \Prover\ \pSpec_1\ \oSpec\ \StmtIn_1\ \WitIn_1\ \StmtOut_1\ \WitOut_1$ and $P_2 : \Prover\ \pSpec_2\ \oSpec\ \StmtOut_1\ \WitOut_1\ \StmtOut_2\ \WitOut_2$, their sequential composition is: + \[ P_1.\append\ P_2 : \Prover\ (\pSpec_1 \mathrel{++_\mathsf{p}} \pSpec_2)\ \oSpec\ \StmtIn_1\ \WitIn_1\ \StmtOut_2\ \WitOut_2 \] + + The composed prover works by: \begin{itemize} - \item The prover first applies the mappings to obtain the virtual context. The verifier does the same, but only for the non-private inputs. - \item The prover and verifier then run the virtual oracle reduction on the virtual context. + \item Running $P_1$ on the input context to produce an intermediate context + \item Using this intermediate context as input to $P_2$ + \item Outputting the final context from $P_2$ \end{itemize} + \lean{Prover.append} + \uses{def:prover, def:protocol_spec_append} \end{definition} -We will show security properties for this virtualization process. One can see that completeness and soundness are inherited from the completeness and soundness of the virtual oracle reduction. However, (round-by-round) knowledge soundness is more tricky; this is because we must extract back to the witness of the original context from the virtual context. +\begin{definition}[Verifier Append] + \label{def:verifier_append} + Given verifiers $V_1 : \Verifier\ \pSpec_1\ \oSpec\ \StmtIn_1\ \StmtOut_1$ and $V_2 : \Verifier\ \pSpec_2\ \oSpec\ \StmtOut_1\ \StmtOut_2$, their sequential composition is: + \[ V_1.\append\ V_2 : \Verifier\ (\pSpec_1 \mathrel{++_\mathsf{p}} \pSpec_2)\ \oSpec\ \StmtIn_1\ \StmtOut_2 \] -% virtual-ctx rule of the form: if we have an oracle reduction from R1 to R2 for context prime, a mapping `f` from ctx to ctx prime, and an inverse mapping `g` from witness of ctx prime to witness of ctx, then we have an oracle reduction from (R1 \circ f) to (R2 \circ g) for ctx. The prover & verifier & the state function are composed with `f`, and the extractor needs to be composed with both `f` and `g`. + The composed verifier first runs $V_1$ on the first part of the transcript, then runs $V_2$ on the second part using the intermediate statement from $V_1$. + \lean{Verifier.append} + \uses{def:verifier, def:protocol_spec_append} +\end{definition} -% Virtualization rule -\[ -\frac{% - \begin{array}{c} - \Psi'; \Theta'; \Sigma' \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} \\[1.5ex] - f : (\Psi, \Theta, \Sigma) \to (\Psi', \Theta', \Sigma') \\[1.5ex] - g : \Psi' \to \Psi \\[1.5ex] - f.\mathsf{fst} \circ g = \mathsf{id} - \end{array}% -}{% - \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1 \circ f\} \; \langle\mathcal{P} \circ f, \mathcal{V} \circ f, \mathcal{E} \circ (f, g)\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R}_2 \circ f; \mathsf{St} \circ f; \epsilon\}\!\!\} -} \quad \text{(Virtual-Ctx)} -\] +\begin{definition}[Reduction Append] + \label{def:reduction_append} + Sequential composition of reductions combines the corresponding provers and verifiers: + \[ R_1.\append\ R_2 : \Reduction\ (\pSpec_1 \mathrel{++_\mathsf{p}} \pSpec_2)\ \oSpec\ \StmtIn_1\ \WitIn_1\ \StmtOut_2\ \WitOut_2 \] + \lean{Reduction.append} + \uses{def:reduction, def:prover_append, def:verifier_append} +\end{definition} -\subsection{Substitution} +\begin{definition}[Oracle Reduction Append] + \label{def:oracle_reduction_append} + Sequential composition extends naturally to oracle reductions by composing the oracle provers and oracle verifiers. + \lean{OracleReduction.append} + \uses{def:oracle_reduction, def:prover_append, def:verifier_append} +\end{definition} + +\subsubsection{General Sequential Composition} + +For composing an arbitrary number of reductions, we provide a general composition operation. + +\begin{definition}[General Protocol Specification Composition] + \label{def:protocol_spec_compose} + Given a family of protocol specifications $\pSpec : \forall i : \Fin(m+1), \ProtocolSpec\ (n\ i)$, their composition is: + \[ \compose\ m\ n\ \pSpec : \ProtocolSpec\ (\sum_{i} n\ i) \] + \lean{ProtocolSpec.seqCompose} + \uses{def:protocol_spec} +\end{definition} + +\begin{definition}[General Prover Composition] + \label{def:prover_compose} + \lean{Prover.seqCompose} + \uses{def:prover, def:protocol_spec_compose} +\end{definition} + +\begin{definition}[General Verifier Composition] + \label{def:verifier_compose} + \lean{Verifier.seqCompose} + \uses{def:verifier, def:protocol_spec_compose} +\end{definition} + +\begin{definition}[General Reduction Composition] + \label{def:reduction_compose} + \lean{Reduction.seqCompose} + \uses{def:reduction, def:prover_compose, def:verifier_compose} +\end{definition} + +\subsubsection{Security Properties of Sequential Composition} + +The key insight is that security properties are preserved under sequential composition. + +\begin{theorem}[Completeness Preservation under Append] + \label{thm:completeness_append} + If reductions $R_1$ and $R_2$ satisfy completeness with compatible relations and respective errors $\epsilon_1$ and $\epsilon_2$, then their sequential composition $R_1.\append\ R_2$ satisfies completeness with error $\epsilon_1 + \epsilon_2$. + \lean{Reduction.completeness_append} + \uses{def:completeness, def:reduction_append} +\end{theorem} -Finally, we need a transformation / inference rule that allows us to change the message type in a given round of an oracle reduction. In other words, we substitute a value in the round with another value, followed by a reduction establishing the relationship between the new and old values. +\begin{theorem}[Perfect Completeness Preservation under Append] + \label{thm:perfect_completeness_append} + If reductions $R_1$ and $R_2$ satisfy perfect completeness with compatible relations, then their sequential composition also satisfies perfect completeness. + \lean{Reduction.perfectCompleteness_append} + \uses{def:perfect_completeness, def:reduction_append} +\end{theorem} -% May need multiple rules for different types of substitutions (i.e. whether in context or in protocol type, where replacing oracle / public message with oracle / public / private message, etc.) +\begin{theorem}[Soundness Preservation under Append] + \label{thm:soundness_append} + If verifiers $V_1$ and $V_2$ satisfy soundness with respective errors $\epsilon_1$ and $\epsilon_2$, then their sequential composition satisfies soundness with error $\epsilon_1 + \epsilon_2$. + \lean{Verifier.append_soundness} + \uses{def:soundness, def:verifier_append} +\end{theorem} -Examples include: -\begin{enumerate} - \item Substituting an oracle input by a public input: +\begin{theorem}[Knowledge Soundness Preservation under Append] + \label{thm:knowledge_soundness_append} + If verifiers $V_1$ and $V_2$ satisfy knowledge soundness with respective errors $\epsilon_1$ and $\epsilon_2$, then their sequential composition satisfies knowledge soundness with error $\epsilon_1 + \epsilon_2$. + \lean{Verifier.append_knowledgeSoundness} + \uses{def:knowledge_soundness, def:verifier_append} +\end{theorem} + +\begin{theorem}[Round-by-Round Soundness Preservation under Append] + \label{thm:rbr_soundness_append} + If verifiers $V_1$ and $V_2$ satisfy round-by-round soundness, then their sequential composition also satisfies round-by-round soundness. + \lean{Verifier.append_rbrSoundness} + \uses{def:round_by_round_soundness, def:verifier_append} +\end{theorem} + +\begin{theorem}[Round-by-Round Knowledge Soundness Preservation under Append] + \label{thm:rbr_knowledge_soundness_append} + If verifiers $V_1$ and $V_2$ satisfy round-by-round knowledge soundness, then their sequential composition also satisfies round-by-round knowledge soundness. + \lean{Verifier.append_rbrKnowledgeSoundness} + \uses{def:round_by_round_knowledge_soundness, def:verifier_append} +\end{theorem} + +Similar preservation theorems hold for the general composition of multiple reductions: + +\begin{theorem}[General Completeness Preservation] + \label{thm:completeness_compose} + \lean{Reduction.completeness_seqCompose} + \uses{def:completeness, def:reduction_compose} +\end{theorem} + +\begin{theorem}[General Soundness Preservation] + \label{thm:soundness_compose} + \lean{Verifier.seqCompose_soundness} + \uses{def:soundness, def:verifier_compose} +\end{theorem} + +\begin{theorem}[General Knowledge Soundness Preservation] + \label{thm:knowledge_soundness_compose} + \lean{Verifier.seqCompose_knowledgeSoundness} + \uses{def:knowledge_soundness, def:verifier_compose} +\end{theorem} + +\subsection{Lifting Contexts}\label{sec:lifting_contexts} + +Another essential tool for modular oracle reductions is the ability to adapt reductions from one context to another. This allows us to apply reductions designed for simple contexts to more complex scenarios. + +\subsubsection{Context Lenses} + +The fundamental abstraction for context adaptation is a \emph{context lens}, which provides bidirectional mappings between outer and inner contexts. + +\begin{definition}[Statement Lens] + \label{def:statement_lens} + A statement lens between outer context types $(\StmtIn_{\mathsf{outer}}, \StmtOut_{\mathsf{outer}})$ and inner context types $(\StmtIn_{\mathsf{inner}}, \StmtOut_{\mathsf{inner}})$ consists of: \begin{itemize} - \item Often by just revealing the underlying data. This has no change on the prover, and for - the verifier, this means that any query to the oracle input can be locally computed. - \item A variant of this is when the oracle input consists of a data along with a proof that - the data satisfies some predicate. In this case, the verifier needs to additionally check - that the predicate holds for the substituted data. - \item Another common substitution is to replace a vector with its Merkle commitment, or a - polynomial with its polynomial commitment. + \item $\projStmt : \StmtIn_{\mathsf{outer}} \to \StmtIn_{\mathsf{inner}}$ (projection to inner context) + \item $\liftStmt : \StmtIn_{\mathsf{outer}} \times \StmtOut_{\mathsf{inner}} \to \StmtOut_{\mathsf{outer}}$ (lifting back to outer context) \end{itemize} - \item Substituting an oracle input by another oracle input, followed by a reduction for each - oracle query the verifier makes to the old oracle: + \lean{Statement.Lens} +\end{definition} + +\begin{definition}[Witness Lens] + \label{def:witness_lens} + A witness lens between outer witness types $(\WitIn_{\mathsf{outer}}, \WitOut_{\mathsf{outer}})$ and inner witness types $(\WitIn_{\mathsf{inner}}, \WitOut_{\mathsf{inner}})$ consists of: \begin{itemize} - \item This is also a variant of the previous case, where we do not fully substitute with a - public input, but do a ``half-substitution'' by substituting with another oracle input. This - happens e.g. when using a polynomial commitment scheme that is itself based on a vector - commitment scheme. One can cast protocols like Ligero / Brakedown / FRI / STIR in this - two-step process. + \item $\projWit : \WitIn_{\mathsf{outer}} \to \WitIn_{\mathsf{inner}}$ (projection to inner context) + \item $\liftWit : \WitIn_{\mathsf{outer}} \times \WitOut_{\mathsf{inner}} \to \WitOut_{\mathsf{outer}}$ (lifting back to outer context) \end{itemize} -\end{enumerate} \ No newline at end of file + \lean{Witness.Lens} +\end{definition} + +\begin{definition}[Context Lens] + \label{def:context_lens} + A context lens combines a statement lens and a witness lens for adapting complete reduction contexts. + \lean{Context.Lens} + \uses{def:statement_lens, def:witness_lens} +\end{definition} + +\begin{definition}[Oracle Context Lens] + \label{def:oracle_context_lens} + For oracle reductions, we additionally need lenses for oracle statements that can simulate oracle access between contexts. + \lean{OracleContext.Lens} + \uses{def:context_lens, def:oracle_interface} +\end{definition} + +\subsubsection{Lifting Reductions} + +Given a context lens, we can lift reductions from inner contexts to outer contexts. + +\begin{definition}[Prover Context Lifting] + \label{def:prover_lift_context} + Given a prover $P$ for the inner context and a context lens, the lifted prover works by: + \begin{itemize} + \item Projecting the outer input to the inner context + \item Running the inner prover + \item Lifting the output back to the outer context + \end{itemize} + \lean{Prover.liftContext} + \uses{def:prover, def:context_lens} +\end{definition} + +\begin{definition}[Verifier Context Lifting] + \label{def:verifier_lift_context} + \lean{Verifier.liftContext} + \uses{def:verifier, def:statement_lens} +\end{definition} + +\begin{definition}[Reduction Context Lifting] + \label{def:reduction_lift_context} + \lean{Reduction.liftContext} + \uses{def:reduction, def:prover_lift_context, def:verifier_lift_context} +\end{definition} + +\subsubsection{Conditions for Security Preservation} + +For lifting to preserve security properties, the context lens must satisfy certain conditions. + +\begin{definition}[Completeness-Preserving Context Lens] + \label{def:context_lens_is_complete} + A context lens preserves completeness if it maintains relation satisfaction under projection and lifting. + \lean{Context.Lens.IsComplete} + \uses{def:context_lens} +\end{definition} + +\begin{definition}[Soundness-Preserving Statement Lens] + \label{def:statement_lens_is_sound} + A statement lens preserves soundness if it maps invalid statements to invalid statements. + \lean{Statement.Lens.IsSound} + \uses{def:statement_lens} +\end{definition} + +\begin{definition}[RBR Soundness-Preserving Statement Lens] + \label{def:statement_lens_is_rbr_sound} + For round-by-round soundness, we need a slightly relaxed soundness condition. + \lean{Statement.Lens.IsSound} + \uses{def:statement_lens} +\end{definition} + +\begin{definition}[Knowledge Soundness-Preserving Context Lens] + \label{def:context_lens_is_knowledge_sound} + A context lens preserves knowledge soundness if it maintains witness extractability. + \lean{Extractor.Lens.IsKnowledgeSound} + \uses{def:context_lens} +\end{definition} + +\subsubsection{Security Preservation Theorems for Context Lifting} + +\begin{theorem}[Completeness Preservation under Context Lifting] + \label{thm:lift_context_completeness} + If a reduction satisfies completeness and the context lens is completeness-preserving, then the lifted reduction also satisfies completeness. + \lean{Reduction.liftContext_completeness} + \uses{def:completeness, def:reduction_lift_context, def:context_lens_is_complete} +\end{theorem} + +\begin{theorem}[Soundness Preservation under Context Lifting] + \label{thm:lift_context_soundness} + If a verifier satisfies soundness and the statement lens is soundness-preserving, then the lifted verifier also satisfies soundness. + \lean{Verifier.liftContext_soundness} + \uses{def:soundness, def:verifier_lift_context, def:statement_lens_is_sound} +\end{theorem} + +\begin{theorem}[Knowledge Soundness Preservation under Context Lifting] + \label{thm:lift_context_knowledge_soundness} + If a verifier satisfies knowledge soundness and the context lens is knowledge soundness-preserving, then the lifted verifier also satisfies knowledge soundness. + \lean{Verifier.liftContext_knowledgeSoundness} + \uses{def:knowledge_soundness, def:verifier_lift_context, def:context_lens_is_knowledge_sound} +\end{theorem} + +\begin{theorem}[RBR Soundness Preservation under Context Lifting] + \label{thm:lift_context_rbr_soundness} + If a verifier satisfies round-by-round soundness and the statement lens is RBR soundness-preserving, then the lifted verifier also satisfies round-by-round soundness. + \lean{Verifier.liftContext_rbr_soundness} + \uses{def:round_by_round_soundness, def:verifier_lift_context, def:statement_lens_is_rbr_sound} +\end{theorem} + +\begin{theorem}[RBR Knowledge Soundness Preservation under Context Lifting] + \label{thm:lift_context_rbr_knowledge_soundness} + If a verifier satisfies round-by-round knowledge soundness and the context lens is knowledge soundness-preserving, then the lifted verifier also satisfies round-by-round knowledge soundness. + \lean{Verifier.liftContext_rbr_knowledgeSoundness} + \uses{def:round_by_round_knowledge_soundness, def:verifier_lift_context, def:context_lens_is_knowledge_sound} +\end{theorem} + +\subsubsection{Extractors and State Functions} + +Context lifting also applies to extractors and state functions used in knowledge soundness and round-by-round soundness. + +\begin{definition}[Straightline Extractor Lifting] + \label{def:straightline_extractor_lift_context} + \lean{Extractor.Straightline.liftContext} + \uses{def:straightline_extractor, def:context_lens} +\end{definition} + +\begin{definition}[Round-by-Round Extractor Lifting] + \label{def:rbr_extractor_lift_context} + \lean{Extractor.RoundByRound.liftContext} + \uses{def:rbr_extractor, def:context_lens} +\end{definition} + +\begin{definition}[State Function Lifting] + \label{def:state_function_lift_context} + \lean{Verifier.StateFunction.liftContext} + \uses{def:state_function, def:statement_lens} +\end{definition} + +These composition and lifting operators provide the essential building blocks for constructing complex oracle reductions from simpler components while preserving their security properties. + +% \subsection{Virtualization} + +% Another tool we will repeatedly use is the ability to change the context of an oracle reduction. This is often needed when we want to adapt an oracle reduction in a simple context into one for a more complex context. + +% See the section on sum-check~\ref{sec:sumcheck} for an example. + +% \begin{definition}[Mapping into Virtual Context] +% \label{def:virtual_context_mapping} +% In order to apply an oracle reduction on virtual data, we will need to provide a mapping from the current context to the virtual context. This includes: +% \begin{itemize} +% \item A mapping from the current public inputs to the virtual public inputs. +% \item A simulation of the oracle inputs for the virtual context using the public and oracle +% inputs for the current context. +% \item A mapping from the current private inputs to the virtual private inputs. +% \item A simulation of the shared oracle for the virtual context using the shared oracle for +% the current context. +% \end{itemize} +% \end{definition} + +% \begin{definition}[Virtual Oracle Reduction] +% \label{def:virtual_oracle_reduction} +% Given a suitable mapping into a virtual context, we may define an oracle reduction via the following construction: +% \begin{itemize} +% \item The prover first applies the mappings to obtain the virtual context. The verifier does the same, but only for the non-private inputs. +% \item The prover and verifier then run the virtual oracle reduction on the virtual context. +% \end{itemize} +% \end{definition} + +% We will show security properties for this virtualization process. One can see that completeness and soundness are inherited from the completeness and soundness of the virtual oracle reduction. However, (round-by-round) knowledge soundness is more tricky; this is because we must extract back to the witness of the original context from the virtual context. + +% % virtual-ctx rule of the form: if we have an oracle reduction from R1 to R2 for context prime, a mapping `f` from ctx to ctx prime, and an inverse mapping `g` from witness of ctx prime to witness of ctx, then we have an oracle reduction from (R1 \circ f) to (R2 \circ g) for ctx. The prover & verifier & the state function are composed with `f`, and the extractor needs to be composed with both `f` and `g`. + +% % Virtualization rule +% \[ +% \frac{% +% \begin{array}{c} +% \Psi'; \Theta'; \Sigma' \vdash \{\mathcal{R}_1\} \; \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} \\[1.5ex] +% f : (\Psi, \Theta, \Sigma) \to (\Psi', \Theta', \Sigma') \\[1.5ex] +% g : \Psi' \to \Psi \\[1.5ex] +% f.\mathsf{fst} \circ g = \mathsf{id} +% \end{array}% +% }{% +% \Psi; \Theta; \Sigma \vdash \{\mathcal{R}_1 \circ f\} \; \langle\mathcal{P} \circ f, \mathcal{V} \circ f, \mathcal{E} \circ (f, g)\rangle^{\mathcal{O}} : \tau \; \{\!\!\{\mathcal{R}_2 \circ f; \mathsf{St} \circ f; \epsilon\}\!\!\} +% } \quad \text{(Virtual-Ctx)} +% \] + +% \subsection{Substitution} + +% Finally, we need a transformation / inference rule that allows us to change the message type in a given round of an oracle reduction. In other words, we substitute a value in the round with another value, followed by a reduction establishing the relationship between the new and old values. + +% % May need multiple rules for different types of substitutions (i.e. whether in context or in protocol type, where replacing oracle / public message with oracle / public / private message, etc.) + +% Examples include: +% \begin{enumerate} +% \item Substituting an oracle input by a public input: +% \begin{itemize} +% \item Often by just revealing the underlying data. This has no change on the prover, and for +% the verifier, this means that any query to the oracle input can be locally computed. +% \item A variant of this is when the oracle input consists of a data along with a proof that +% the data satisfies some predicate. In this case, the verifier needs to additionally check +% that the predicate holds for the substituted data. +% \item Another common substitution is to replace a vector with its Merkle commitment, or a +% polynomial with its polynomial commitment. +% \end{itemize} +% \item Substituting an oracle input by another oracle input, followed by a reduction for each +% oracle query the verifier makes to the old oracle: +% \begin{itemize} +% \item This is also a variant of the previous case, where we do not fully substitute with a +% public input, but do a ``half-substitution'' by substituting with another oracle input. This +% happens e.g. when using a polynomial commitment scheme that is itself based on a vector +% commitment scheme. One can cast protocols like Ligero / Brakedown / FRI / STIR in this +% two-step process. +% \end{itemize} +% \end{enumerate} diff --git a/blueprint/src/oracle_reductions/defs.tex b/blueprint/src/oracle_reductions/defs.tex index b86a65243..dc13e23c2 100644 --- a/blueprint/src/oracle_reductions/defs.tex +++ b/blueprint/src/oracle_reductions/defs.tex @@ -1,51 +1,143 @@ -\section{Definitions} +\section{Definitions}\label{sec:oracle_reductions_defs} In this section, we give the basic definitions of a public-coin interactive oracle reduction -(henceforth called an oracle reduction or IOR). In particular, we will define its building blocks, -and various security properties. +(henceforth called an oracle reduction or IOR). We will define its building blocks, and various +security properties. -\subsection{Format} +\subsection{Format}\label{sec:oracle_reductions_defs_format} -An \textbf{(interactive) oracle reduction} is an interactive protocol between two parties, a \emph{prover} $\mathcal{P}$ -and a \emph{verifier} $\mathcal{V}$. It takes place in the following setting: +An \textbf{(interactive) oracle reduction (IOR)} is an interactive protocol between two parties, a +\emph{prover} $\mathcal{P}$ and a \emph{verifier} $\mathcal{V}$. In ArkLib, IORs are defined in the +following setting: \begin{enumerate} \item We work in an ambient dependent type theory (in our case, Lean). + \item The protocol flow is fixed and defined by a given \emph{type signature}, which describes in each round which party sends a message to the other, and the type of that message. - \item The prover and verifier has access to some inputs (called the \emph{context}) at the beginning of the protocol. These inputs are classified as follows: + + \item The prover and verifier has access to some inputs (called the \emph{(oracle) context}) at + the beginning of the protocol. These inputs are classified as follows: \begin{itemize} - \item \emph{Public inputs}: available to both parties; - \item \emph{Private inputs} (or \emph{witness}): available only to the prover; - \item \emph{Oracle inputs}: the underlying data is available to the prover, but it's only - exposed as an oracle to the verifier. An oracle interface for such inputs consists of a query type, a response type, and a function that takes in the underlying input, a query, and outputs a response. - \item \emph{Shared oracle}: the oracle is available to both parties via an interface; in most cases, it is either empty, a probabilistic sampling oracle, or a random oracle. See~\Cref{sec:vcvio} for more information on oracle computations. + \item \emph{Public inputs} (or \emph{statement}) $\mathbbm{x}$: available to both parties; + \item \emph{Private inputs} (or \emph{witness}) $\mathbbm{w}$: available only to the prover; + \item \emph{Oracle inputs} (or \emph{oracle statement}) $\mathbbm{ox}$: the underlying data + is available to the prover, but it's only exposed as an oracle to the verifier. See~\Cref{def:oracle_interface} for more information. + \item \emph{Shared oracle} $\mathcal{O}$: the oracle is available to both parties via an + interface; in most cases, it is either empty, a probabilistic sampling oracle, a random + oracle, or a group oracle (for the Algebraic Group Model). See~\Cref{sec:vcvio} for more + information on oracle computations. \end{itemize} + \item The messages sent from the prover may either: 1) be seen directly by the verifier, or 2) only available to a verifier through an \emph{oracle interface} (which specifies the type for the query and response, and the oracle's behavior given the underlying message). - \item All messages from $\mathcal{V}$ are chosen uniformly at random from the finite type corresponding to that round. This property is called \emph{public-coin} in the literature. + + Currently, in the oracle reduction setting, we \emph{only} allow messages sent to be available + through oracle interfaces. In the (non-oracle) reduction setting, all messages are available + directly. Future extensions may allow for mixed visibility for prover's messages. + + \item $\mathcal{V}$ is assumed to be \emph{public-coin}, meaning that its challenges are chosen + uniformly at random from the finite type corresponding to that round, and it uses no randomness + otherwise, except from those coming from the shared oracle. + + \item At the end of the protocol, the prover and verifier outputs a new (oracle) context, which consists of: + \begin{itemize} + \item The verifier takes in the input statement and the challenges, performs an \emph{oracle} computation on the input oracle statements and the oracle messages, and outputs a new output statement. + + The verifier also outputs the new oracle statement in an implicit manner, by specifying a + subset of the input oracle statements \& the oracle messages. Future extensions may allow for more flexibility in specifying output oracle statements (i.e. not just a subset, but a linear combination, or any other function). + \item The prover takes in some final private state (maintained during protocol execution), and outputs a new output statement, new output oracle statement, and new output witness. + \end{itemize} \end{enumerate} -We now go into more details on these objects. +\begin{remark}[Literature Comparison] +In the literature, our definition corresponds to the notion of \emph{functional} IORs. Historically, +(vector) IOPs were the first notion to be introduced by~\cite{IOPs}; these are IORs where the output +statement is true/false, all oracle statements and messages are vectors over some alphabet $\Sigma$, +and the oracle interfaces are for querying specific positions in the vector. More recent works have +considered other oracle interfaces, e.g., polynomial oracles~\cite{Marlin, DARK}, generalized proofs +to reductions~\cite{ARoK, WARP, Arc, fics-facs}, and considered general oracle +interfaces~\cite{WHIR}. Most of the IOP theory has been distilled in the +textbook~\cite{ChiesaYogev2024}. + +We have not seen any work that considers our most general setting, of IORs with arbitrary oracle interfaces. +\end{remark} + +We now go into more details on these objects, and how they are represented in Lean. Our description will aim to be as close as possible to the Lean code, and hence may differ somewhat from ``mainstream'' mathematical \& cryptographic notation. -\begin{definition}[Type Signature of an Oracle Reduction] - \label{def:oracle_reduction_type_signature} - A protocol specification $\rho : \mathsf{PSpec}\; n$ of an oracle reduction is parametrized by a fixed number of messages sent in total. For each step of interaction, it specifies a direction for the message (prover to verifier, or vice versa), and a type for the message. If a message from the prover to the verifier is further marked as oracle, then we also expect an oracle interface for that message. In Lean, we handle these oracle interfaces via the $\mathsf{OracleInterface}$ type class. - \lean{ProtocolSpec, OracleInterface} +\begin{definition}[Oracle Interface] + \label{def:oracle_interface} + An oracle interface for an underlying data type $\mathsf{D}$ consists of the following: + \begin{itemize} + \item A type $\mathsf{Q}$ for queries to the oracle, + \item A type $\mathsf{R}$ for responses from the oracle, + \item A function $\mathsf{oracle} : \mathsf{D} \to \mathsf{Q} \to \mathsf{R}$ that specifies + the oracle's behavior given the underlying data and a query. + \end{itemize} + \lean{OracleInterface} \end{definition} -\begin{definition}[Context]\label{def:context} - In an oracle reduction, its \emph{context} (denoted $\Gamma$) consists of a list of public inputs, a list of witness inputs, a list of oracle inputs, and a shared oracle (possibly represented as a list of lazily sampled query-response pairs). These inputs have the expected visibility. +See \texttt{OracleInterface.lean} for common instances of $\mathsf{OracleInterface}$. -For simplicity, we imagine the context as \emph{append-only}, as we add new messages from the protocol execution. + +\begin{definition}[Context] + \label{def:context} + In an (oracle) reduction, its \emph{(oracle) context} consists of a statement type, a witness + type, and (in the oracle case) an indexed list of oracle statement types. + + Currently, we do not abstract out / bundle the context as a separate structure, but rather + specifies the types explicitly. This may change in the future. +\end{definition} + +\begin{definition}[Protocol Specification] + \label{def:protocol_spec} + A protocol specification for an $n$-message (oracle) reduction, is an element of the following type: + \begin{align*} + \ProtocolSpec\ n &:= \Fin\ n \to \Direction \times \Type. + \end{align*} + In the above, $\Direction := \{ \PtoVdir, \VtoPdir \}$ is the type of possible directions of messages, and $\Fin\ n := \{ i : \bbN \quotient i < n \}$ is the type of all natural numbers less than $n$. + + In other words, for each step $i$ of interaction, the protocol specification describes the \emph{direction} of the message sent in that step, i.e., whether it is from the prover or from the verifier. It also describes the \emph{type} of that message. + + In the oracle setting, we also expect an oracle interface for each message from the prover to the verifier. + \lean{ProtocolSpec} \end{definition} We define some supporting definitions for a protocol specification. -\begin{definition}[Transcript \& Related Definitions] - \label{def:transcript_and_related_defs} - Given protocol specification $\rho : \mathsf{PSpec}\; n$, its \emph{transcript} up to round $i$ is an element of type $\rho_{[:i]} ::= \prod_{j < i} \rho\; j$. We define the type of all challenges sent by the verifier as $\rho.\mathsf{Chals} ::= \prod_{i \text{ s.t. } (\rho\; i).\mathsf{fst} = \mathsf{V2P}} (\rho\; i).\mathsf{snd}$. +\begin{definition}[Protocol Specification Components] + \label{def:protocol_spec_components} + Given a protocol spec $\pSpec : \ProtocolSpec\ n$, we define: + \begin{itemize} + \item $\pSpec.\Dir\ i := (\pSpec\ i).\mathsf{fst}$ extracts the direction of the $i$-th message. + \item $\pSpec.\Type\ i := (\pSpec\ i).\mathsf{snd}$ extracts the type of the $i$-th message. + \item $\pSpec.\MessageIdx := \{i : \Fin\ n \quotient \pSpec.\Dir\ i = \PtoVdir\}$ is the subtype of indices corresponding to prover messages. + \item $\pSpec.\ChallengeIdx := \{i : \Fin\ n \quotient \pSpec.\Dir\ i = \VtoPdir\}$ is the subtype of indices corresponding to verifier challenges. + \item $\pSpec.\mathsf{Message}\ i := (i : \pSpec.\MessageIdx) \to \pSpec.\Type\ i.\mathsf{val}$ is an indexed family of message types in the protocol. + \item $\pSpec.\mathsf{Challenge}\ i := (i : \pSpec.\ChallengeIdx) \to \pSpec.\Type\ i.\mathsf{val}$ is an indexed family of challenge types in the protocol. + \end{itemize} + \lean{ProtocolSpec.getDir, ProtocolSpec.getType, ProtocolSpec.MessageIdx, ProtocolSpec.ChallengeIdx, ProtocolSpec.Message, ProtocolSpec.Challenge} + \uses{def:protocol_spec} +\end{definition} + +\begin{definition}[Protocol Transcript] + \label{def:transcript} + Given protocol specification $\pSpec : \ProtocolSpec\ n$, we define: + \begin{itemize} + \item A \emph{transcript} up to round $k : \Fin\ (n + 1)$ is an element of type + \[ \Transcript\ k\ \pSpec := (i : \Fin\ k) \to \pSpec.\Type\ (\uparrow i : \Fin\ n) \] + where $\uparrow i : \Fin\ n$ denotes casting $i : \Fin\ k$ to $\Fin\ n$ (valid since $k \leq n + 1$). + + \item A \emph{full transcript} is $\FullTranscript\ \pSpec := (i : \Fin\ n) \to \pSpec.\Type\ i$. + + \item The type of all \emph{messages} from prover to verifier is + \[ \pSpec.\Messages := \prod_{i : \pSpec.\MessageIdx} \pSpec.\Message\ i \] + + \item The type of all \emph{challenges} from verifier to prover is + \[ \pSpec.\Challenges := \prod_{i : \pSpec.\ChallengeIdx} \pSpec.\Challenge\ i \] + \end{itemize} \lean{ProtocolSpec.Transcript, ProtocolSpec.Message, ProtocolSpec.Challenge} + \uses{def:protocol_spec, def:protocol_spec_components} \end{definition} % In the interactive protocols we consider, both parties $P$ and $V$ may have access to a shared @@ -61,17 +153,45 @@ \subsection{Format} \end{remark} \begin{definition}[Type Signature of a Prover] - \label{def:prover_type_signature} - A prover $\mathcal{P}$ in an oracle reduction, given a context, is a stateful oracle computation - that at each step of the protocol, either takes in a new message from the verifier, or sends a - new message to the verifier. - \lean{Prover} + \label{def:prover} + A prover $\mathcal{P}$ in a reduction consists of the following components: + + \begin{itemize} + \item \textbf{Prover State}: A family of types $\mathsf{PrvState} : \Fin(n+1) \to \Type$ representing the prover's internal state at each round of the protocol. + + \item \textbf{Input Processing}: A function + \[ \mathsf{input} : \StmtIn \to \WitIn \to \mathsf{PrvState}(0) \] + that initializes the prover's state from the input statement and witness. + + \item \textbf{Message Sending}: For each message index $i : \pSpec.\MessageIdx$, a function + \[ \mathsf{sendMessage}_i : \mathsf{PrvState}(i.\mathsf{val}.\mathsf{castSucc}) \to \OracleComp(\oSpec, \pSpec.\Message(i) \times \mathsf{PrvState}(i.\mathsf{val}.\mathsf{succ})) \] + that generates the message and updates the prover's state. + + \item \textbf{Challenge Processing}: For each challenge index $i : \pSpec.\ChallengeIdx$, a function + \[ \mathsf{receiveChallenge}_i : \mathsf{PrvState}(i.\mathsf{val}.\mathsf{castSucc}) \to \pSpec.\Challenge(i) \to \mathsf{PrvState}(i.\mathsf{val}.\mathsf{succ}) \] + that updates the prover's state upon receiving a challenge. + + \item \textbf{Output Generation}: A function + \[ \mathsf{output} : \mathsf{PrvState}(\Fin.\mathsf{last}(n)) \to \StmtOut \times \WitOut \] + that produces the final output statement and witness from the prover's final state. + \end{itemize} + \lean{Prover, ProverState, ProverIn, ProverRound, ProverOut} +\end{definition} + +\begin{definition}[Type Signature of an Oracle Prover] + \label{def:oracle_prover} + An oracle prover is a prover whose input statement includes the underlying data for oracle statements, and whose output includes oracle statements. Formally, it is a prover with input statement type $\StmtIn \times (\forall i : \iota_{\mathsf{si}}, \OStmtIn(i))$ and output statement type $\StmtOut \times (\forall i : \iota_{\mathsf{so}}, \OStmtOut(i))$, where: + \begin{itemize} + \item $\OStmtIn : \iota_{\mathsf{si}} \to \Type$ are the input oracle statement types + \item $\OStmtOut : \iota_{\mathsf{so}} \to \Type$ are the output oracle statement types + \end{itemize} + \lean{OracleProver} \end{definition} -Our modeling of oracle reductions only consider \emph{public-coin} verifiers; that is, verifiers who -only outputs uniformly random challenges drawn from the (finite) types, and uses no other -randomness. Because of this fixed functionality, we can bake the verifier's behavior in the -interaction phase directly into the protocol execution semantics. +% Our modeling of oracle reductions only consider \emph{public-coin} verifiers; that is, verifiers who +% only outputs uniformly random challenges drawn from the (finite) types, and uses no other +% randomness. Because of this fixed functionality, we can bake the verifier's behavior in the +% interaction phase directly into the protocol execution semantics. After the interaction phase, the verifier may then run some verification procedure to check the validity of the prover's responses. In this procedure, the verifier gets access to the public part @@ -82,81 +202,93 @@ \subsection{Format} % \emph{interactive reduction (IR)}. \begin{definition}[Type Signature of a Verifier] - \label{def:verifier_type_signature} - A verifier $\mathcal{V}$ in an oracle reduction is an oracle computation that may perform a - series of checks (i.e. `Bool`-valued, or `Option Unit`) on the given context. + \label{def:verifier} + A verifier $\mathcal{V}$ in a reduction is specified by a single function: + \[ \mathsf{verify} : \StmtIn \to \FullTranscript(\pSpec) \to \OracleComp(\oSpec, \StmtOut) \] + + This function takes the input statement and the complete transcript of the protocol interaction, and performs an oracle computation (potentially querying the shared oracle $\oSpec$) to produce an output statement. + + The verifier is assumed to be \emph{public-coin}, meaning it only sends uniformly random challenges and uses no other randomness beyond what is provided by the shared oracle. \lean{Verifier} \end{definition} -% \begin{definition}[Type Signature of an Oracle Verifier] -% \label{def:oracle_verifier_type_signature} -% \lean{OracleVerifier} -% \end{definition} +\begin{definition}[Type Signature of an Oracle Verifier] + \label{def:oracle_verifier} + An oracle verifier $\mathcal{V}$ consists of the following components: + + \begin{itemize} + \item \textbf{Verification Logic}: A function + \[ \mathsf{verify} : \StmtIn \to \pSpec.\Challenges \to \OracleComp(\oSpec \mathrel{++_\mathsf{o}} ([\OStmtIn]_\mathsf{o} \mathrel{++_\mathsf{o}} [\pSpec.\Message]_\mathsf{o}), \StmtOut) \] + that takes the input statement and verifier challenges, and performs oracle queries to the shared oracle, input oracle statements, and prover messages to produce an output statement. + + \item \textbf{Output Oracle Embedding}: An injective function + \[ \mathsf{embed} : \iota_{\mathsf{so}} \hookrightarrow \iota_{\mathsf{si}} \oplus \pSpec.\MessageIdx \] + that specifies how each output oracle statement is derived from either an input oracle statement or a prover message. + + \item \textbf{Type Compatibility}: A proof term + \[ \mathsf{hEq} : \forall i : \iota_{\mathsf{so}}, \OStmtOut(i) = \begin{cases} + \OStmtIn(j) & \text{if } \mathsf{embed}(i) = \mathsf{inl}(j) \\ + \pSpec.\Message(k) & \text{if } \mathsf{embed}(i) = \mathsf{inr}(k) + \end{cases} \] + ensuring that output oracle statement types match their sources. + \end{itemize} + + This design ensures that output oracle statements are always a subset of the available input oracle statements and prover messages. + \lean{OracleVerifier} +\end{definition} + +\begin{definition}[Oracle Verifier to Verifier Conversion] + \label{def:oracle_verifier_to_verifier} + An oracle verifier can be converted to a standard verifier through a natural simulation process. The key insight is that while an oracle verifier only has oracle access to certain data (input oracle statements and prover messages), a standard verifier can be given the actual underlying data directly. + + The conversion works as follows: when the oracle verifier needs to make an oracle query to some data, the converted verifier can respond to this query immediately using the actual underlying data it possesses. This is accomplished through the \texttt{OracleInterface} type class, which specifies for each data type how to respond to queries given the underlying data. + + Specifically, given an oracle verifier $\mathcal{V}_{\text{oracle}}$: + \begin{itemize} + \item The converted verifier $\mathcal{V}_{\text{oracle}}.\mathsf{toVerifier}$ takes as input both the statement \emph{and} the actual underlying data for all oracle statements + \item When $\mathcal{V}_{\text{oracle}}$ attempts to query an oracle statement or prover message, the converted verifier uses the corresponding \texttt{OracleInterface} instance to compute the response from the actual data + \item The output oracle statements are constructed according to the embedding specification, selecting the appropriate subset of input oracle statements and prover messages + \end{itemize} + \lean{OracleVerifier.toVerifier} + \uses{def:oracle_verifier} +\end{definition} An oracle reduction then consists of a type signature for the interaction, and a pair of prover and verifier for that type signature. -% \begin{definition}[Interactive Reduction] -% \label{def:interactive_reduction} -% \lean{Reduction} -% An interactive reduction is a combination of a type signature \verb|ProtocolSpec|, a prover for \verb|ProtocolSpec|, and a verifier for \verb|ProtocolSpec|. -% \end{definition} -l +\begin{definition}[Interactive Reduction] + \label{def:reduction} + An interactive reduction for protocol specification $\pSpec : \ProtocolSpec(n)$ and oracle specification $\oSpec$ consists of: + \begin{itemize} + \item A \textbf{prover} $\mathcal{P} : \Prover(\pSpec, \oSpec, \StmtIn, \WitIn, \StmtOut, \WitOut)$ + \item A \textbf{verifier} $\mathcal{V} : \Verifier(\pSpec, \oSpec, \StmtIn, \StmtOut)$ + \end{itemize} + + The reduction establishes a relationship between input relations on $(\StmtIn, \WitIn)$ and output relations on $(\StmtOut, \WitOut)$ through the interactive protocol defined by $\pSpec$. + \lean{Reduction} + \uses{def:prover, def:verifier} +\end{definition} + \begin{definition}[Interactive Oracle Reduction] - \label{def:interactive_oracle_reduction} + \label{def:oracle_reduction} + An interactive oracle reduction for protocol specification $\pSpec : \ProtocolSpec(n)$ with oracle interfaces for all prover messages, and oracle specification $\oSpec$, consists of: + \begin{itemize} + \item An \textbf{oracle prover} $\mathcal{P} : \OracleProver(\pSpec, \oSpec, \StmtIn, \WitIn, \StmtOut, \WitOut, \OStmtIn, \OStmtOut)$ + \item An \textbf{oracle verifier} $\mathcal{V} : \OracleVerifier(\pSpec, \oSpec, \StmtIn, \StmtOut, \OStmtIn, \OStmtOut)$ + \end{itemize} + + where: + \begin{itemize} + \item $\OStmtIn : \iota_{\mathsf{si}} \to \Type$ are the input oracle statement types with oracle interfaces + \item $\OStmtOut : \iota_{\mathsf{so}} \to \Type$ are the output oracle statement types + \end{itemize} + + The oracle reduction allows the verifier to access prover messages and oracle statements only through specified oracle interfaces, enabling more flexible and composable protocol designs. \lean{OracleReduction} - An interactive oracle reduction for a given context $\Gamma$ is a combination a prover and a verifier of the types specified above. - \uses{def:oracle_reduction_type_signature} -\end{definition} - -\textbf{PL Formalization.} We write our definitions in PL notation in~\Cref{fig:type-defs}. The set of types $\Type$ is the same as Lean's dependent type theory (omitting universe levels); in particular, we care about basic dependent types (Pi and Sigma), finite natural numbers, finite fields, lists, vectors, and polynomials. - -\begin{figure}[t] - \[\begin{array}{rcl} - % Basic types - \mathsf{Type} &::=& \mathsf{Unit} \mid \mathsf{Bool} \mid \mathbb{N} \mid \mathsf{Fin}\; n \mid \mathbb{F}_q \mid \mathsf{List}\;(\alpha : \mathsf{Type}) \mid (i : \iota) \to \alpha\; i \mid (i : \iota) \times \alpha\; i \mid \dots \\[1em] - % Protocol message types - \mathsf{Dir} &::=& \mathsf{P2V.Pub} \mid \mathsf{P2V.Orac} \mid \mathsf{V2P} \\ - \mathsf{OI}\; (\mathrm{M} : \Type) &::=& \langle \mathrm{Q}, \mathrm{R}, \mathrm{M} \to \mathrm{Q} \to \mathrm{R} \rangle \\ - % Protocol type signature - \mathsf{PSpec}\; (n : \mathbb{N}) &::=& \mathsf{Fin}\; n \to (d : \mathsf{Dir}) \times (M : \Type) \times (\mathsf{if}\; d = \mathsf{P2V.Orac} \; \mathsf{then} \; \mathsf{OI}(M) \; \mathsf{else} \; \mathsf{Unit}) \\ - % Oracle type signature - \mathsf{OSpec} \; (\iota : \mathsf{Type}) &::=& (i : \iota) \to \mathsf{dom}\; i \times \mathsf{range}\; i \\[1em] - % Contexts - \varSigma &::=& \emptyset \mid \varSigma \times \Type \\ - \Omega &::=& \emptyset \mid \Omega \times \langle \mathrm{M} : \Type, \mathsf{OI}(\mathrm{M}) \rangle \\ - \Psi &::=& \emptyset \mid \Psi \times \Type\\ - \end{array}\] - \[\begin{array}{rcl} - \Gamma &::=& (\Psi; \Omega; \varSigma; \rho; \mathcal{O})\\ - \mathsf{OComp}^{\mathcal{O}}\; (\alpha : \Type) &::=& \mid\; \mathsf{pure}\; (a : \alpha) \\ - && \mid\; \mathsf{queryBind}\;(i : \iota)\; (q : \mathsf{dom}\; i)\; (k : \mathsf{range}\; i \to \mathsf{OComp}^{\mathcal{O}}\; \alpha) \\ - && \mid\; \mathsf{fail} \\[1em] - \tau_{\mathsf{P}}(\Gamma) &::=& (i : \mathsf{Fin}\; n) \to (h : (\rho \; i).\mathsf{fst} = \mathsf{P2V}) \to \\ - && \varSigma \to \Omega \to \Psi \to \rho_{[:i]} \to \mathsf{OComp}^{\mathcal{O}}\;\left( (\rho \; i).\mathsf{snd}\right) \\[1em] - - \tau_{\mathsf{V}}(\Gamma) &::=& \varSigma \to (\rho.\mathsf{Chals}) \to \mathsf{OComp}^{\mathcal{O} :: \OI(\Omega) :: \OI(\rho.\mathsf{Msg.Orac})}\; \mathsf{Unit} \\[1em] - \tau_{\mathsf{E}}(\Gamma) &::=& \varSigma \to \Omega \to \rho.\mathsf{Transcript} \to \calO.\mathsf{QueryLog} \to \Psi - \end{array}\] - \caption{Type definitions for interactive oracle reductions} - \label{fig:type-defs} -\end{figure} - -Using programming language notation, we can express an interactive oracle reduction as a typing judgment: -\[ - \Gamma := (\Psi; \Theta; \varSigma; \rho; \mathcal{O}) \vdash \mathcal{P} : \tau_{\mathsf{P}}(\Gamma), \; \mathcal{V} : \tau_{\mathsf{V}}(\Gamma) -\] -where: -\begin{itemize} - \item $\Psi$ represents the witness (private) inputs - \item $\Theta$ represents the oracle inputs - \item $\varSigma$ represents the public inputs (i.e. statements) - \item $\mathcal{O} : \mathsf{OSpec}\; \iota$ represents the shared oracle - \item $\rho : \mathsf{PSpec}\; n$ represents the protocol type signature - \item $\mathcal{P}$ and $\mathcal{V}$ are the prover and verifier, respectively, being of the given types $\tau_{\mathsf{P}}(\Gamma)$ and $\tau_{\mathsf{V}}(\Gamma)$. -\end{itemize} + \uses{def:oracle_prover, def:oracle_verifier} +\end{definition} -To exhibit valid elements for the prover and verifier types, we will use existing functions in the ambient programming language (e.g. Lean). +\subsection{Execution Semantics}\label{sec:execution_semantics} We now define what it means to execute an oracle reduction. This is essentially achieved by first executing the prover, interspersed with oracle queries to get the verifier's challenges (these will @@ -165,22 +297,103 @@ \subsection{Format} about the execution, such as the log of oracle queries for the shared oracles, for analysis purposes (i.e. feeding information into the extractor). -\begin{definition}[Execution of an Oracle Reduction] - \label{def:oracle_reduction_execution} - \lean{OracleReduction.run} +\begin{definition}[Prover Execution to Round] + \label{def:prover_run_to_round} + The execution of a prover up to round $i : \Fin(n+1)$ is defined inductively: + + \[ \mathsf{Prover}.\mathsf{runToRound}(i, \mathsf{stmt}, \mathsf{wit}) := \] + \[ \mathsf{Fin}.\mathsf{induction}( \] + \[ \quad \mathsf{pure}(\langle \mathsf{default}, \mathsf{prover}.\mathsf{input}(\mathsf{stmt}, \mathsf{wit}) \rangle), \] + \[ \quad \mathsf{prover}.\mathsf{processRound}, \] + \[ \quad i \] + \[ ) \] + + where $\mathsf{processRound}$ handles individual rounds by either: + \begin{itemize} + \item \textbf{Verifier Challenge} ($\pSpec.\mathsf{getDir}(j) = \mathsf{V\_to\_P}$): Query for a challenge and update prover state + \item \textbf{Prover Message} ($\pSpec.\mathsf{getDir}(j) = \mathsf{P\_to\_V}$): Generate message via $\mathsf{sendMessage}$ and update state + \end{itemize} + + Returns the transcript up to round $i$ and the prover's state after round $i$. + \lean{Prover.runToRound, Prover.processRound} + \uses{def:prover, def:protocol_spec, def:transcript} \end{definition} -\begin{remark}[More efficient representation of oracle reductions] - The presentation of oracle reductions as protocols on an append-only context is useful for - reasoning, but it does not lead to the most efficient implementation for the prover and - verifier. In particular, the prover cannot keep intermediate state, and thus needs to recompute - everything from scratch for each new message. +\begin{definition}[Complete Prover Execution] + \label{def:prover_run} + The complete execution of a prover is defined as: - To fix this mismatch, we will also define a stateful variant of the prover, and define a notion - of observational equivalence between the stateless and stateful reductions. -\end{remark} + \[ \mathsf{Prover}.\mathsf{run}(\mathsf{stmt}, \mathsf{wit}) := \mathsf{do} \; \{ \] + \[ \quad \langle \mathsf{transcript}, \mathsf{state} \rangle \leftarrow \mathsf{prover}.\mathsf{runToRound}(\Fin.\mathsf{last}(n), \mathsf{stmt}, \mathsf{wit}) \] + \[ \quad \langle \mathsf{stmtOut}, \mathsf{witOut} \rangle := \mathsf{prover}.\mathsf{output}(\mathsf{state}) \] + \[ \quad \mathsf{return} \; \langle \mathsf{stmtOut}, \mathsf{witOut}, \mathsf{transcript} \rangle \] + \[ \} \] + + Returns the output statement, output witness, and complete transcript. + \lean{Prover.run} + \uses{def:prover, def:prover_run_to_round} +\end{definition} + +\begin{definition}[Verifier Execution] + \label{def:verifier_run} + The execution of a verifier is simply the application of its verification function: + + \[ \mathsf{Verifier}.\mathsf{run}(\mathsf{stmt}, \mathsf{transcript}) := \mathsf{verifier}.\mathsf{verify}(\mathsf{stmt}, \mathsf{transcript}) \] + + This takes the input statement and full transcript, and returns the output statement via an oracle computation. + \lean{Verifier.run} + \uses{def:verifier} +\end{definition} + +\begin{definition}[Oracle Verifier Execution] + \label{def:oracle_verifier_run} + The execution of an oracle verifier is defined as: + + \[ \mathsf{OracleVerifier}.\mathsf{run}(\mathsf{stmt}, \mathsf{oStmtIn}, \mathsf{transcript}) := \mathsf{do} \; \{ \] + \[ \quad \mathsf{f} := \mathsf{simOracle2}(\oSpec, \mathsf{oStmtIn}, \mathsf{transcript}.\mathsf{messages}) \] + \[ \quad \mathsf{stmtOut} \leftarrow \mathsf{simulateQ}(\mathsf{f}, \mathsf{verifier}.\mathsf{verify}(\mathsf{stmt}, \mathsf{transcript}.\mathsf{challenges})) \] + \[ \quad \mathsf{return} \; \mathsf{stmtOut} \] + \[ \} \] + + This simulates the oracle access to input oracle statements and prover messages, then executes the verification logic. + \lean{OracleVerifier.run} + \uses{def:oracle_verifier, def:oracle_interface} +\end{definition} + +\begin{definition}[Interactive Reduction Execution] + \label{def:reduction_run} + The execution of an interactive reduction consists of running the prover followed by the verifier: + + \[ \mathsf{Reduction}.\mathsf{run}(\mathsf{stmt}, \mathsf{wit}) := \mathsf{do} \; \{ \] + \[ \quad \langle \mathsf{prvStmtOut}, \mathsf{witOut}, \mathsf{transcript} \rangle \leftarrow \mathsf{reduction}.\mathsf{prover}.\mathsf{run}(\mathsf{stmt}, \mathsf{wit}) \] + \[ \quad \mathsf{stmtOut} \leftarrow \mathsf{reduction}.\mathsf{verifier}.\mathsf{run}(\mathsf{stmt}, \mathsf{transcript}) \] + \[ \quad \mathsf{return} \; ((\mathsf{prvStmtOut}, \mathsf{witOut}), \mathsf{stmtOut}, \mathsf{transcript}) \] + \[ \} \] + + Returns both the prover's output (statement and witness) and the verifier's output statement, along with the complete transcript. + \lean{Reduction.run} + \uses{def:reduction, def:prover_run, def:verifier_run} +\end{definition} + +\begin{definition}[Oracle Reduction Execution] + \label{def:oracle_reduction_run} + The execution of an interactive oracle reduction is similar to a standard reduction but includes logging of oracle queries: -\subsection{Security properties} + \[ \mathsf{OracleReduction}.\mathsf{run}(\mathsf{stmt}, \mathsf{wit}, \mathsf{oStmt}) := \mathsf{do} \; \{ \] + \[ \quad \langle \langle \mathsf{prvStmtOut}, \mathsf{witOut}, \mathsf{transcript} \rangle, \mathsf{proveQueryLog} \rangle \leftarrow \] + \[ \qquad (\mathsf{simulateQ}(\mathsf{loggingOracle}, \mathsf{reduction}.\mathsf{prover}.\mathsf{run}(\langle \mathsf{stmt}, \mathsf{oStmt} \rangle, \mathsf{wit}))).\mathsf{run} \] + \[ \quad \langle \mathsf{stmtOut}, \mathsf{verifyQueryLog} \rangle \leftarrow \] + \[ \qquad (\mathsf{simulateQ}(\mathsf{loggingOracle}, \mathsf{reduction}.\mathsf{verifier}.\mathsf{run}(\mathsf{stmt}, \mathsf{oStmt}, \mathsf{transcript}))).\mathsf{run} \] + \[ \quad \mathsf{return} \; ((\mathsf{prvStmtOut}, \mathsf{witOut}), \mathsf{stmtOut}, \mathsf{transcript}, \mathsf{proveQueryLog}, \mathsf{verifyQueryLog}) \] + \[ \} \] + + Returns the same outputs as a standard reduction, plus logs of all oracle queries made by both the prover and verifier. + \lean{OracleReduction.run} + \uses{def:oracle_reduction, def:prover_run, def:oracle_verifier_run} +\end{definition} + + +\subsection{Security Properties}\label{sec:security} We can now define properties of interactive reductions. The two main properties we consider in this project are completeness and various notions of soundness. We will cover zero-knowledge at a later @@ -194,13 +407,106 @@ \subsection{Security properties} \begin{definition}[Completeness] \label{def:completeness} + A reduction satisfies \textbf{completeness} with error $\epsilon \geq 0$ and with respect to + input relation $R_{\text{in}}$ and output relation $R_{\text{out}}$, if for all valid statement-witness pair + $(x_{\text{in}}, w_{\text{in}})$ for $R_{\text{in}}$, the execution between the honest prover and the honest verifier + will result in a tuple $((x_{\text{out}}^P, w_{\text{out}}), x_{\text{out}}^V)$ such that: + \begin{itemize} + \item $R_{\text{out}}(x_{\text{out}}^V, w_{\text{out}}) = \text{True}$ (the output statement-witness pair is valid), and + \item $x_{\text{out}}^P = x_{\text{out}}^V$ (the output statements are the same from both prover and verifier) + \end{itemize} + except with probability $\epsilon$. \lean{Reduction.completeness} - \uses{def:interactive_oracle_reduction} + \uses{def:reduction, def:reduction_run} +\end{definition} + +\begin{definition}[Perfect Completeness] + \label{def:perfect_completeness} + A reduction satisfies \textbf{perfect completeness} if it satisfies completeness with error $0$. + This means that the probability of the reduction outputting a valid statement-witness pair is + \emph{exactly} 1 (instead of at least $1 - 0$). + \lean{Reduction.perfectCompleteness} + \uses{def:completeness} \end{definition} Almost all oracle reductions we consider actually satisfy \emph{perfect completeness}, which simplifies the proof obligation. In particular, this means we only need to show that no matter what challenges are chosen, the verifier will always accept given messages from the honest prover. +\subsubsection{Extractors} + +For knowledge soundness, we need to consider different types of extractors that can recover witnesses from malicious provers. + +\begin{definition}[Straightline Extractor] + \label{def:straightline_extractor} + A \textbf{straightline, deterministic, non-oracle-querying extractor} takes in: + \begin{itemize} + \item the output witness $w_{\text{out}}$, + \item the initial statement $x_{\text{in}}$, + \item the IOR transcript $\tau$, + \item the query logs from the prover and verifier + \end{itemize} + and returns a corresponding initial witness $w_{\text{in}}$. + + Note that the extractor does not need to take in the output statement, since it can be derived + via re-running the verifier on the initial statement, the transcript, and the verifier's query + log. + + This form of extractor suffices for proving knowledge soundness of most hash-based IOPs. + \lean{Extractor.Straightline} + \uses{def:transcript} +\end{definition} + +\begin{definition}[Round-by-Round Extractor] + \label{def:rbr_extractor} + A \textbf{round-by-round extractor} with index $m$ is given: + \begin{itemize} + \item the input statement $x_{\text{in}}$, + \item a partial transcript of length $m$, + \item the prover's query log + \end{itemize} + and returns a witness to the statement. + + Note that the RBR extractor does not need to take in the output statement or witness. + \lean{Extractor.RoundByRound} + \uses{def:transcript} +\end{definition} + +\begin{definition}[Rewinding Extractor] + \label{def:rewinding_extractor} + A \textbf{rewinding extractor} consists of: + \begin{itemize} + \item An extractor state type + \item Simulation oracles for challenges and oracle queries for the prover + \item A function that runs the extractor with the prover's oracle interface, allowing for calling the prover multiple times + \end{itemize} + This allows the extractor to rewind the prover to earlier states and try different challenges. + \lean{Extractor.Rewinding} + \uses{def:prover, def:oracle_interface} +\end{definition} + +\subsubsection{Adversarial Provers} + +% \begin{definition}[Adaptive Prover] +% \label{def:adaptive_prover} +% An \textbf{adaptive prover} extends the basic prover type with the ability to choose the input statement adaptively based on oracle access. This models stronger adversaries that can choose their statements after seeing some oracle responses. +% \lean{Prover.Adaptive} +% \uses{def:prover} +% \end{definition} + +\begin{definition}[State-Restoration Prover] + \label{def:sr_prover} + A \textbf{state-restoration prover} is a modified prover that has query access to challenge oracles that can return the $i$-th challenge, for all $i$, given the input statement and the transcript up to that point. + + It takes in the input statement and witness, and outputs a full transcript of interaction, + along with the output statement and witness. + + This models adversaries in the state-restoration setting where challenges can be queried programmably. + \lean{Prover.StateRestoration} + \uses{def:transcript} +\end{definition} + +\subsubsection{Soundness Definitions} + For soundness, we need to consider different notions. These notions differ in two main aspects: \begin{itemize} \item Whether we consider the plain soundness, or knowledge soundness. The latter relies on the @@ -216,39 +522,240 @@ \subsection{Security properties} \begin{definition}[Soundness] \label{def:soundness} + A reduction satisfies \textbf{soundness} with error $\epsilon \geq 0$ and with respect to input + language $L_{\text{in}} \subseteq \text{Statement}_{\text{in}}$ and output language $L_{\text{out}} \subseteq \text{Statement}_{\text{out}}$ if: + \begin{itemize} + \item for all (malicious) provers with arbitrary types for witness types, + \item for all arbitrary input witness, + \item for all input statement $x_{\text{in}} \notin L_{\text{in}}$, + \end{itemize} + the execution between the prover and the honest verifier will result in an output statement + $x_{\text{out}} \in L_{\text{out}}$ with probability at most $\epsilon$. \lean{Verifier.soundness} - \uses{def:interactive_oracle_reduction} + \uses{def:verifier, def:prover_run} \end{definition} -A (straightline) extractor for knowledge soundness is a deterministic algorithm that takes in the output public context after executing the oracle reduction, the side information (i.e. log of oracle queries from the malicious prover) observed during execution, and outputs the witness for the input context. - -Note that since we assume the context is append-only, and we append only the public (or oracle) -messages obtained during protocol execution, it follows that the witness stays the same throughout -the execution. - \begin{definition}[Knowledge Soundness] \label{def:knowledge_soundness} + A reduction satisfies \textbf{(straightline) knowledge soundness} with error $\epsilon \geq 0$ and + with respect to input relation $R_{\text{in}}$ and output relation $R_{\text{out}}$ if: + \begin{itemize} + \item there exists a straightline extractor $E$, such that + \item for all input statement $x_{\text{in}}$, witness $w_{\text{in}}$, and (malicious) prover, + \item if the execution with the honest verifier results in a pair $(x_{\text{out}}, w_{\text{out}})$, + \item and the extractor produces some $w'_{\text{in}}$, + \end{itemize} + then the probability that $(x_{\text{in}}, w'_{\text{in}})$ is not valid for $R_{\text{in}}$ and yet $(x_{\text{out}}, w_{\text{out}})$ is valid for $R_{\text{out}}$ is at most $\epsilon$. + + A (straightline) extractor for knowledge soundness is a deterministic algorithm that takes in the output public context after executing the oracle reduction, the side information (i.e. log of oracle queries from the malicious prover) observed during execution, and outputs the witness for the input context. + + Note that since we assume the context is append-only, and we append only the public (or oracle) + messages obtained during protocol execution, it follows that the witness stays the same throughout + the execution. \lean{Verifier.knowledgeSoundness} - \uses{def:interactive_oracle_reduction} + \uses{def:verifier, def:reduction_run, def:straightline_extractor} \end{definition} +\subsubsection{Round-by-Round Security} + To define round-by-round (knowledge) soundness, we need to define the notion of a \emph{state function}. This is a (possibly inefficient) function $\mathsf{StateF}$ that, for every challenge sent by the verifier, takes in the transcript of the protocol so far and outputs whether the state is doomed or not. Roughly speaking, the requirement of round-by-round soundness is that, for any (possibly malicious) prover $P$, if the state function outputs that the state is doomed on some partial transcript of the protocol, then the verifier will reject with high probability. \begin{definition}[State Function] \label{def:state_function} + A \textbf{(deterministic) state function} for a verifier, with respect to input language $L_{\text{in}}$ and + output language $L_{\text{out}}$, consists of a function that maps partial transcripts to boolean values, satisfying: + \begin{itemize} + \item For all input statements not in the language, the state function is false for the empty transcript + \item If the state function is false for a partial transcript, and the next message is from the + prover to the verifier, then the state function is also false for the new partial transcript + regardless of the message + \item If the state function is false for a full transcript, the verifier will not output a statement + in the output language + \end{itemize} \lean{Verifier.StateFunction} \end{definition} +\begin{definition}[Knowledge State Function] + \label{def:knowledge_state_function} + A \textbf{knowledge state function} for a verifier, with respect to input relation $R_{\text{in}}$, output + relation $R_{\text{out}}$, and intermediate witness types, extends the basic state function to track + witness validity throughout the protocol execution. This is used to define round-by-round knowledge soundness. + \lean{Verifier.KnowledgeStateFunction} +\end{definition} + \begin{definition}[Round-by-Round Soundness] \label{def:round_by_round_soundness} + A protocol with verifier $\mathcal{V}$ satisfies \textbf{round-by-round soundness} with respect to input language + $L_{\text{in}}$, output language $L_{\text{out}}$, and error function $\epsilon: \text{ChallengeIdx} \to \mathbb{R}_{\geq 0}$ if: + \begin{itemize} + \item there exists a state function for the verifier and the input/output languages, such that + \item for all initial statements $x_{\text{in}} \notin L_{\text{in}}$, + \item for all initial witnesses, + \item for all provers, + \item for all challenge rounds $i$, + \end{itemize} + the probability that: + \begin{itemize} + \item the state function is false for the partial transcript output by the prover + \item the state function is true for the partial transcript appended by next challenge (chosen randomly) + \end{itemize} + is at most $\epsilon(i)$. \lean{Verifier.rbrSoundness} - \uses{def:interactive_oracle_reduction} + \uses{def:verifier, def:state_function, def:prover_run_to_round} \end{definition} \begin{definition}[Round-by-Round Knowledge Soundness] \label{def:round_by_round_knowledge_soundness} + A protocol with verifier $\mathcal{V}$ satisfies \textbf{round-by-round knowledge soundness} with respect to input + relation $R_{\text{in}}$, output relation $R_{\text{out}}$, and error function $\epsilon: \text{ChallengeIdx} \to \mathbb{R}_{\geq 0}$ if: + \begin{itemize} + \item there exists a knowledge state function for the verifier and the languages of the input/output relations, + \item there exists a round-by-round extractor, + \item for all initial statements, + \item for all initial witnesses, + \item for all provers, + \item for all challenge rounds $i$, + \end{itemize} + the probability that: + \begin{itemize} + \item the extracted witness does not satisfy the input relation + \item the state function is false for the partial transcript output by the prover + \item the state function is true for the partial transcript appended by next challenge (chosen randomly) + \end{itemize} + is at most $\epsilon(i)$. \lean{Verifier.rbrKnowledgeSoundness} - \uses{def:interactive_oracle_reduction} + \uses{def:verifier, def:knowledge_state_function, def:rbr_extractor, def:prover_run_to_round} +\end{definition} + +% \begin{remark}[Alternative Formulations of RBR Knowledge Soundness] +% There are different ways to formulate round-by-round knowledge soundness, differing in whether +% the extractor's failure to produce a valid witness is included as part of the security condition. +% Some formulations condition on the extractor producing an invalid witness while the state function +% transitions from false to true, while others may condition on the state function transition +% regardless of extractor success. The current formalization includes the extractor failure as +% part of the security condition. +% \end{remark} + +\subsubsection{Extractor Properties} + +These definitions are highly experimental and may change in the future. The goal is to put some conditions on the extractor in order for prove sequential composition preserves knowledge soundness. + +\begin{definition}[Monotone Straightline Extractor] + \label{def:monotone_straightline_extractor} + An extractor is \textbf{monotone} if its success probability on a given query log is the same as + the success probability on any extension of that query log. This property ensures that the extractor's + performance does not degrade when given more information. + \lean{Verifier.Extractor.Straightline.IsMonotone} + \uses{def:straightline_extractor} +\end{definition} + +\begin{definition}[Monotone RBR Extractor] + \label{def:monotone_rbr_extractor} + A round-by-round extractor is \textbf{monotone} if its success probability on a given query log + is the same as the success probability on any extension of that query log. + \lean{Extractor.RoundByRound.IsMonotone} + \uses{def:rbr_extractor} +\end{definition} + +\subsubsection{Implications Between Security Notions} + +We have a lattice of security notions, with knowledge and round-by-round being two strengthenings of soundness. + +\begin{theorem}[Knowledge Soundness Implies Soundness] + \label{thm:knowledge_soundness_implies_soundness} + Knowledge soundness with knowledge error $\epsilon < 1$ implies soundness with the same + soundness error $\epsilon$, and for the corresponding input and output languages. + \lean{Verifier.knowledgeSoundness_implies_soundness} + \uses{def:knowledge_soundness, def:soundness} +\end{theorem} + +\begin{theorem}[RBR Soundness Implies Soundness] + \label{thm:rbr_soundness_implies_soundness} + Round-by-round soundness with error function $\epsilon$ implies soundness with error + $\sum_i \epsilon(i)$, where the sum is over all challenge rounds $i$. + \lean{Verifier.rbrSoundness_implies_soundness} + \uses{def:round_by_round_soundness, def:soundness} +\end{theorem} + +\begin{theorem}[RBR Knowledge Soundness Implies RBR Soundness] + \label{thm:rbr_knowledge_soundness_implies_rbr_soundness} + Round-by-round knowledge soundness with error function $\epsilon$ implies round-by-round + soundness with the same error function $\epsilon$. + \lean{Verifier.rbrKnowledgeSoundness_implies_rbrSoundness} + \uses{def:round_by_round_knowledge_soundness, def:round_by_round_soundness} +\end{theorem} + +\begin{theorem}[RBR Knowledge Soundness Implies Knowledge Soundness] + \label{thm:rbr_knowledge_soundness_implies_knowledge_soundness} + Round-by-round knowledge soundness with error function $\epsilon$ implies knowledge soundness + with error $\sum_i \epsilon(i)$, where the sum is over all challenge rounds $i$. + \lean{Verifier.rbrKnowledgeSoundness_implies_knowledgeSoundness} + \uses{def:round_by_round_knowledge_soundness, def:knowledge_soundness} +\end{theorem} + +\subsubsection{Zero-Knowledge} + +\begin{definition}[Simulator] + \label{def:simulator} + A \textbf{simulator} consists of: + \begin{itemize} + \item Oracle simulation capabilities for the shared oracles + \item A prover simulation function that takes an input statement and produces a transcript + \end{itemize} + The simulator should have programming access to the shared oracles and be able to generate + transcripts that are indistinguishable from real protocol executions. + \lean{Reduction.Simulator} +\end{definition} + +\begin{remark}[Zero-Knowledge Definition] + We define honest-verifier zero-knowledge as follows: There exists a simulator such that for all + (malicious) verifiers, the distributions of transcripts generated by the simulator and the + interaction between the verifier and the prover are (statistically) indistinguishable. + A full definition will be provided in future versions. +\end{remark} + +\subsubsection{Oracle-Specific Security} + +For oracle reductions, the security definitions are analogous to those for standard reductions, but adapted to work with oracle interfaces: + +\begin{definition}[Oracle Reduction Completeness] + \label{def:oracle_reduction_completeness} + Completeness of an oracle reduction is the same as for non-oracle reductions, but applied to the + converted reduction where oracle statements are handled through their interfaces. + \lean{OracleReduction.completeness} + \uses{def:oracle_reduction, def:completeness, def:oracle_verifier_to_verifier} +\end{definition} + +\begin{definition}[Oracle Verifier Soundness] + \label{def:oracle_verifier_soundness} + Soundness of an oracle verifier is defined by converting it to a standard verifier and applying + the standard soundness definition. + \lean{OracleVerifier.soundness} + \uses{def:oracle_verifier, def:soundness, def:oracle_verifier_to_verifier} +\end{definition} + +\begin{definition}[Oracle Verifier Knowledge Soundness] + \label{def:oracle_verifier_knowledge_soundness} + Knowledge soundness of an oracle verifier is defined by converting it to a standard verifier + and applying the standard knowledge soundness definition. + \lean{OracleVerifier.knowledgeSoundness} + \uses{def:oracle_verifier, def:knowledge_soundness, def:oracle_verifier_to_verifier} +\end{definition} + +\begin{definition}[Oracle Verifier RBR Soundness] + \label{def:oracle_verifier_rbr_soundness} + Round-by-round soundness of an oracle verifier is defined by converting it to a standard verifier + and applying the standard round-by-round soundness definition. + \lean{OracleVerifier.rbrSoundness} + \uses{def:oracle_verifier, def:round_by_round_soundness, def:oracle_verifier_to_verifier} +\end{definition} + +\begin{definition}[Oracle Verifier RBR Knowledge Soundness] + \label{def:oracle_verifier_rbr_knowledge_soundness} + Round-by-round knowledge soundness of an oracle verifier is defined by converting it to a standard + verifier and applying the standard round-by-round knowledge soundness definition. + \lean{OracleVerifier.rbrKnowledgeSoundness} + \uses{def:oracle_verifier, def:round_by_round_knowledge_soundness, def:oracle_verifier_to_verifier} \end{definition} By default, the properties we consider are perfect completeness and (straightline) round-by-round knowledge soundness. We can encapsulate these properties into the following typing judgement: @@ -256,3 +763,155 @@ \subsection{Security properties} \[ \Gamma := (\Psi; \Theta; \varSigma; \rho; \mathcal{O}) \vdash \{\mathcal{R}_1\} \quad \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle \quad \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} \] + +\subsubsection{State-Restoration Security} + +\begin{definition}[State-Restoration Soundness] + \label{def:sr_soundness} + \textbf{State-restoration soundness} is a security notion where the adversarial prover has access to + challenge oracles that can return the $i$-th challenge for any round $i$, given the input statement + and the transcript up to that point. This models stronger adversaries in the programmable random + oracle model or when challenges can be computed deterministically. + + A verifier satisfies state-restoration soundness if for all input statements not in the language, + for all witnesses, and for all state-restoration provers, the probability that the verifier + outputs a statement in the output language is bounded by the soundness error. + + \emph{Note: This definition is currently under development in the Lean formalization.} + % \lean{Verifier.srSoundness} +\end{definition} + +\begin{definition}[State-Restoration Knowledge Soundness] + \label{def:sr_knowledge_soundness} + \textbf{State-restoration knowledge soundness} extends state-restoration soundness with the + requirement that there exists a straightline extractor that can recover valid witnesses from + any state-restoration prover that convinces the verifier. + + \emph{Note: This definition is currently under development in the Lean formalization.} + % \lean{Verifier.srKnowledgeSoundness} +\end{definition} + + +% We can now define properties of interactive reductions. The two main properties we consider in this +% project are completeness and various notions of soundness. We will cover zero-knowledge at a later +% stage. + +% First, for completeness, this is essentially probabilistic Hoare-style conditions on the execution +% of the oracle reduction (with the honest prover and verifier). In other words, given a predicate on +% the initial context, and a predicate on the final context, we require that if the initial predicate +% holds, then the final predicate holds with high probability (except for some \emph{completeness} +% error). + +% \begin{definition}[Completeness] +% \label{def:completeness} +% \lean{Reduction.completeness} +% \uses{def:oracle_reduction} +% \end{definition} + +% Almost all oracle reductions we consider actually satisfy \emph{perfect completeness}, which +% simplifies the proof obligation. In particular, this means we only need to show that no matter what challenges are chosen, the verifier will always accept given messages from the honest prover. + +% For soundness, we need to consider different notions. These notions differ in two main aspects: +% \begin{itemize} +% \item Whether we consider the plain soundness, or knowledge soundness. The latter relies on the +% notion of an \emph{extractor}. +% \item Whether we consider plain, state-restoration, round-by-round, or rewinding notion of +% soundness. +% \end{itemize} + +% We note that state-restoration knowledge soundness is necessary for the security of the SNARK +% protocol obtained from the oracle reduction after composing with a commitment scheme and applying +% the Fiat-Shamir transform. It in turn is implied by either round-by-round knowledge soundness, or +% special soundness (via rewinding). At the moment, we only care about non-rewinding soundness, so mostly we will care about round-by-round knowledge soundness. + +% \begin{definition}[Soundness] +% \label{def:soundness} +% \lean{Verifier.soundness} +% \uses{def:oracle_reduction} +% \end{definition} + +% A (straightline) extractor for knowledge soundness is a deterministic algorithm that takes in the output public context after executing the oracle reduction, the side information (i.e. log of oracle queries from the malicious prover) observed during execution, and outputs the witness for the input context. + +% Note that since we assume the context is append-only, and we append only the public (or oracle) +% messages obtained during protocol execution, it follows that the witness stays the same throughout +% the execution. + +% \begin{definition}[Knowledge Soundness] +% \label{def:knowledge_soundness} +% \lean{Verifier.knowledgeSoundness} +% \uses{def:oracle_reduction} +% \end{definition} + +% To define round-by-round (knowledge) soundness, we need to define the notion of a \emph{state function}. This is a (possibly inefficient) function $\mathsf{StateF}$ that, for every challenge sent by the verifier, takes in the transcript of the protocol so far and outputs whether the state is doomed or not. Roughly speaking, the requirement of round-by-round soundness is that, for any (possibly malicious) prover $P$, if the state function outputs that the state is doomed on some partial transcript of the protocol, then the verifier will reject with high probability. + +% \begin{definition}[State Function] +% \label{def:state_function} +% \lean{Verifier.StateFunction} +% \end{definition} + +% \begin{definition}[Round-by-Round Soundness] +% \label{def:round_by_round_soundness} +% \lean{Verifier.rbrSoundness} +% \uses{def:oracle_reduction} +% \end{definition} + +% \begin{definition}[Round-by-Round Knowledge Soundness] +% \label{def:round_by_round_knowledge_soundness} +% \lean{Verifier.rbrKnowledgeSoundness} +% \uses{def:oracle_reduction} +% \end{definition} + +% \textbf{PL Formalization.} We write our definitions in PL notation in~\Cref{fig:type-defs}. The set of types $\Type$ is the same as Lean's dependent type theory (omitting universe levels); in particular, we care about basic dependent types (Pi and Sigma), finite natural numbers, finite fields, lists, vectors, and polynomials. + +% \begin{figure}[t] +% \[\begin{array}{rcl} +% % Basic types +% \mathsf{Type} &::=& \mathsf{Unit} \mid \mathsf{Bool} \mid \mathbb{N} \mid \mathsf{Fin}\; n \mid \mathbb{F}_q \mid \mathsf{List}\;(\alpha : \mathsf{Type}) \mid (i : \iota) \to \alpha\; i \mid (i : \iota) \times \alpha\; i \mid \dots \\[1em] +% % Protocol message types +% \mathsf{Dir} &::=& \mathsf{P2V.Pub} \mid \mathsf{P2V.Orac} \mid \mathsf{V2P} \\ +% \mathsf{OI}\; (\mathrm{M} : \Type) &::=& \langle \mathrm{Q}, \mathrm{R}, \mathrm{M} \to \mathrm{Q} \to \mathrm{R} \rangle \\ +% % Protocol type signature +% \pSpec\; (n : \mathbb{N}) &::=& \mathsf{Fin}\; n \to (d : \mathsf{Dir}) \times (M : \Type) \times (\mathsf{if}\; d = \mathsf{P2V.Orac} \; \mathsf{then} \; \mathsf{OI}(M) \; \mathsf{else} \; \mathsf{Unit}) \\ +% % Oracle type signature +% \oSpec \; (\iota : \mathsf{Type}) &::=& (i : \iota) \to \mathsf{dom}\; i \times \mathsf{range}\; i \\[1em] +% % Contexts +% \varSigma &::=& \emptyset \mid \varSigma \times \Type \\ +% \Omega &::=& \emptyset \mid \Omega \times \langle \mathrm{M} : \Type, \mathsf{OI}(\mathrm{M}) \rangle \\ +% \Psi &::=& \emptyset \mid \Psi \times \Type\\ +% \end{array}\] +% \[\begin{array}{rcl} +% \Gamma &::=& (\Psi; \Omega; \varSigma; \rho; \mathcal{O})\\ +% \mathsf{OComp}^{\mathcal{O}}\; (\alpha : \Type) &::=& \mid\; \mathsf{pure}\; (a : \alpha) \\ +% && \mid\; \mathsf{queryBind}\;(i : \iota)\; (q : \mathsf{dom}\; i)\; (k : \mathsf{range}\; i \to \mathsf{OComp}^{\mathcal{O}}\; \alpha) \\ +% && \mid\; \mathsf{fail} \\[1em] +% \tau_{\mathsf{P}}(\Gamma) &::=& (i : \mathsf{Fin}\; n) \to (h : (\rho \; i).\mathsf{fst} = \mathsf{P2V}) \to \\ +% && \varSigma \to \Omega \to \Psi \to \rho_{[:i]} \to \mathsf{OComp}^{\mathcal{O}}\;\left( (\rho \; i).\mathsf{snd}\right) \\[1em] + +% \tau_{\mathsf{V}}(\Gamma) &::=& \varSigma \to (\rho.\mathsf{Chals}) \to \mathsf{OComp}^{\mathcal{O} :: \OI(\Omega) :: \OI(\rho.\mathsf{Msg.Orac})}\; \mathsf{Unit} \\[1em] +% \tau_{\mathsf{E}}(\Gamma) &::=& \varSigma \to \Omega \to \rho.\mathsf{Transcript} \to \calO.\mathsf{QueryLog} \to \Psi +% \end{array}\] +% \caption{Type definitions for interactive oracle reductions} +% \label{fig:type-defs} +% \end{figure} + +% Using programming language notation, we can express an interactive oracle reduction as a typing judgment: +% \[ +% \Gamma := (\Psi; \Theta; \varSigma; \rho; \mathcal{O}) \vdash \mathcal{P} : \tau_{\mathsf{P}}(\Gamma), \; \mathcal{V} : \tau_{\mathsf{V}}(\Gamma) +% \] +% where: +% \begin{itemize} +% \item $\Psi$ represents the witness (private) inputs +% \item $\Theta$ represents the oracle inputs +% \item $\varSigma$ represents the public inputs (i.e. statements) +% \item $\mathcal{O} : \oSpec\; \iota$ represents the shared oracle +% \item $\rho : \pSpec\; n$ represents the protocol type signature +% \item $\mathcal{P}$ and $\mathcal{V}$ are the prover and verifier, respectively, being of the given types $\tau_{\mathsf{P}}(\Gamma)$ and $\tau_{\mathsf{V}}(\Gamma)$. +% \end{itemize} + +% To exhibit valid elements for the prover and verifier types, we will use existing functions in the ambient programming language (e.g. Lean). + +% By default, the properties we consider are perfect completeness and (straightline) round-by-round knowledge soundness. We can encapsulate these properties into the following typing judgement: + +% \[ +% \Gamma := (\Psi; \Theta; \varSigma; \rho; \mathcal{O}) \vdash \{\mathcal{R}_1\} \quad \langle\mathcal{P}, \mathcal{V}, \mathcal{E}\rangle \quad \{\!\!\{\mathcal{R}_2; \mathsf{St}; \epsilon\}\!\!\} +% \] diff --git a/blueprint/src/oracle_reductions/fiat_shamir.tex b/blueprint/src/oracle_reductions/fiat_shamir.tex new file mode 100644 index 000000000..72eed5480 --- /dev/null +++ b/blueprint/src/oracle_reductions/fiat_shamir.tex @@ -0,0 +1,160 @@ +\section{The Fiat-Shamir Transformation}\label{sec:fiat_shamir} + +(NOTE: generated by Claude 4 Sonnet, will need to be cleaned up) + +The Fiat-Shamir transformation is a fundamental cryptographic technique that converts a public-coin interactive reduction into a non-interactive reduction by replacing verifier challenges with queries to a random oracle. This transformation removes the need for interaction while preserving important security properties under certain assumptions. + +In our formalization, the Fiat-Shamir transformation takes an interactive reduction $R$ and produces a non-interactive reduction where the prover computes all messages at once, and the verifier derives the challenges using queries to a hash function (modeled as a random oracle) applied to the statement and the messages up to each challenge round. + +\subsection{Oracle Interface for Fiat-Shamir Challenges}\label{sec:fiat_shamir_oracle_interface} + +The key insight of the Fiat-Shamir transformation is to replace interactive challenges with deterministic computations based on the protocol messages so far. + +\begin{definition}[Fiat-Shamir Challenge Oracle Interface] + \label{def:fiat_shamir_challenge_oracle_interface} + For a protocol specification $\pSpec$ and input statement type $\StmtIn$, the Fiat-Shamir challenge oracle interface for the $i$-th challenge is defined as follows: + \begin{itemize} + \item \textbf{Query type}: $\StmtIn \times \pSpec.\MessagesUpTo \;i.\mathsf{val}.\mathsf{castSucc}$ + \item \textbf{Response type}: $\pSpec.\Challenge \;i$ + \item \textbf{Oracle behavior}: Returns the challenge (which is determined by the random oracle) + \end{itemize} + + The query consists of the input statement and all prover messages sent up to (but not including) round $i$. + % \lean{instChallengeOracleInterfaceFiatShamir} +\end{definition} + +\begin{definition}[Fiat-Shamir Oracle Specification] + \label{def:fiat_shamir_spec} + The Fiat-Shamir oracle specification for a protocol $\pSpec$ with input statement type $\StmtIn$ is: + \[ \mathsf{fiatShamirSpec} \;\pSpec \;\StmtIn : \OracleSpec \;\pSpec.\ChallengeIdx \] + where for each challenge index $i$, the oracle domain is $\StmtIn \times \pSpec.\MessagesUpTo \;i.\mathsf{val}.\mathsf{castSucc}$ and the range is $\pSpec.\Challenge \;i$. + + This specification defines a family of oracles, one for each challenge round, that deterministically computes challenges based on the statement and messages up to that round. + \lean{ProtocolSpec.fiatShamirSpec} +\end{definition} + +\subsection{Fiat-Shamir Transformation for Provers}\label{sec:fiat_shamir_prover} + +The Fiat-Shamir transformation modifies the prover's execution to compute all messages non-interactively while simulating the verifier's challenges using oracle queries. + +\begin{definition}[Fiat-Shamir Round Processing] + \label{def:prover_process_round_fs} + The modified round processing function for Fiat-Shamir maintains the prover messages (but not challenges) and the input statement throughout execution: + + \[ \mathsf{processRoundFS} \;j \;\mathsf{prover} \;\mathsf{currentResult} \] + + For each round $j$: + \begin{itemize} + \item If $j$ is a challenge round: Query the Fiat-Shamir oracle with the statement and messages so far, then update the prover state with the received challenge + \item If $j$ is a message round: Generate the message using the prover's $\mathsf{sendMessage}$ function and append it to the message history + \end{itemize} + + The key difference from standard execution is that challenges are derived via oracle queries rather than received from an interactive verifier. + \lean{Prover.processRoundFS} + \uses{def:fiat_shamir_spec} +\end{definition} + +\begin{definition}[Fiat-Shamir Prover Execution] + \label{def:prover_run_to_round_fs} + The Fiat-Shamir prover execution up to round $i$ is defined as: + + \[ \mathsf{runToRoundFS} \;i \;\mathsf{stmt} \;\mathsf{prover} \;\mathsf{state} \] + + This executes the prover inductively using $\mathsf{processRoundFS}$, starting from the initial state and accumulating messages and the statement. Returns the messages up to round $i$, the input statement, and the prover's final state. + \lean{Prover.runToRoundFS} + \uses{def:prover_process_round_fs} +\end{definition} + +\begin{definition}[Fiat-Shamir Prover Transformation] + \label{def:prover_fiat_shamir} + Given an interactive prover $P$ for protocol $\pSpec$, the Fiat-Shamir transformation produces a non-interactive prover: + + \[ P.\mathsf{fiatShamir} : \NonInteractiveProver \;(\forall i, \pSpec.\Message \;i) \;(\oSpec \mathrel{++_\mathsf{o}} \mathsf{srChallengeOracle} \;\pSpec \;\StmtIn) \;\StmtIn \;\WitIn \;\StmtOut \;\WitOut \] + + The transformed prover: + \begin{itemize} + \item Has state type that combines the statement with the original prover's state at round 0, and uses the final state type for subsequent rounds + \item On input, stores both the statement and initializes the original prover's state + \item Sends a single message containing all of the original prover's messages, computed via $\mathsf{runToRoundFS}$ + \item Never receives challenges (since it's non-interactive) + \item Outputs using the original prover's output function + \end{itemize} + \lean{Prover.fiatShamir} + \uses{def:prover_run_to_round_fs} +\end{definition} + +\subsection{Transcript Derivation and Verifier Transformation}\label{sec:fiat_shamir_verifier} + +The Fiat-Shamir verifier must reconstruct the full interactive transcript from the prover's messages in order to run the original verification logic. + +\begin{definition}[Fiat-Shamir Transcript Derivation] + \label{def:derive_transcript_fs} + Given a collection of prover messages and an input statement, the function $\mathsf{deriveTranscriptFS}$ reconstructs the full protocol transcript up to round $k$: + + \[ \mathsf{messages}.\mathsf{deriveTranscriptFS} \;\mathsf{stmt} \;k : \OracleComp \;(\oSpec \mathrel{++_\mathsf{o}} \mathsf{srChallengeOracle} \;\pSpec \;\StmtIn) \;(\pSpec.\Transcript \;k) \] + + This is computed inductively: + \begin{itemize} + \item For challenge rounds: Query the Fiat-Shamir oracle with the statement and messages up to that point + \item For message rounds: Use the corresponding message from the prover + \end{itemize} + + The result is a complete transcript that includes both prover messages and verifier challenges. + \lean{ProtocolSpec.Messages.deriveTranscriptFS} + \uses{def:fiat_shamir_spec} +\end{definition} + +\begin{definition}[Fiat-Shamir Verifier Transformation] + \label{def:verifier_fiat_shamir} + Given an interactive verifier $V$ for protocol $\pSpec$, the Fiat-Shamir transformation produces a non-interactive verifier: + + \[ V.\mathsf{fiatShamir} : \NonInteractiveVerifier \;(\forall i, \pSpec.\Message \;i) \;(\oSpec \mathrel{++_\mathsf{o}} \mathsf{srChallengeOracle} \;\pSpec \;\StmtIn) \;\StmtIn \;\StmtOut \] + + The transformed verifier: + \begin{itemize} + \item Takes the input statement and a proof consisting of all prover messages + \item Derives the full transcript using $\mathsf{deriveTranscriptFS}$ + \item Runs the original verifier's verification logic on the reconstructed transcript + \end{itemize} + \lean{Verifier.fiatShamir} + \uses{def:derive_transcript_fs} +\end{definition} + +\subsection{Fiat-Shamir Transformation for Reductions}\label{sec:fiat_shamir_reduction} + +\begin{definition}[Fiat-Shamir Reduction Transformation] + \label{def:reduction_fiat_shamir} + Given an interactive reduction $R$ for protocol $\pSpec$, the Fiat-Shamir transformation produces a non-interactive reduction: + + \[ R.\mathsf{fiatShamir} : \NonInteractiveReduction \;(\forall i, \pSpec.\Message \;i) \;(\oSpec \mathrel{++_\mathsf{o}} \mathsf{srChallengeOracle} \;\pSpec \;\StmtIn) \;\StmtIn \;\WitIn \;\StmtOut \;\WitOut \] + + This transformation simply applies the Fiat-Shamir transformation to both the prover and verifier components of the reduction. + \lean{Reduction.fiatShamir} + \uses{def:prover_fiat_shamir, def:verifier_fiat_shamir} +\end{definition} + +\subsection{Security Properties}\label{sec:fiat_shamir_security} + +The Fiat-Shamir transformation preserves important security properties of the original interactive reduction, under appropriate assumptions about the random oracle. + +\begin{theorem}[Fiat-Shamir Preserves Completeness] + \label{thm:fiat_shamir_completeness} + Let $R$ be an interactive reduction with completeness error $\epsilon$ with respect to input relation $R_{\text{in}}$ and output relation $R_{\text{out}}$. Then the Fiat-Shamir transformed reduction $R.\mathsf{fiatShamir}$ also satisfies completeness with error $\epsilon$ with respect to the same relations. + + Formally: $R.\mathsf{completeness} \;R_{\text{in}} \;R_{\text{out}} \;\epsilon \to (R.\mathsf{fiatShamir}).\mathsf{completeness} \;R_{\text{in}} \;R_{\text{out}} \;\epsilon$ + \lean{fiatShamir_completeness} + \uses{def:reduction_fiat_shamir} +\end{theorem} + +\begin{remark}[Additional Security Properties] + While completeness is straightforward to establish, soundness properties require more careful analysis. In particular: + \begin{itemize} + \item State-restoration knowledge soundness of the original reduction implies knowledge soundness of the Fiat-Shamir transformed reduction + \item Honest-verifier zero-knowledge of the original reduction implies zero-knowledge of the transformed reduction + \end{itemize} + These results require the random oracle model and careful handling of the oracle programming needed for simulation and extraction. The formal statements and proofs of these results are currently under development. +\end{remark} + +\begin{remark}[Implementation Considerations] + Our formalization models the "theoretical" version of Fiat-Shamir where the entire statement and transcript prefix are hashed to derive each challenge. In practice, more efficient variants use cryptographic sponges or other techniques to incrementally absorb transcript elements and squeeze out challenges. Our theoretical model provides the foundation for analyzing these practical variants. +\end{remark} diff --git a/blueprint/src/print.pdf b/blueprint/src/print.pdf index 5d674290b..f2ae8557e 100644 Binary files a/blueprint/src/print.pdf and b/blueprint/src/print.pdf differ diff --git a/blueprint/src/proof_systems/binius.tex b/blueprint/src/proof_systems/binius.tex new file mode 100644 index 000000000..aa0d712c0 --- /dev/null +++ b/blueprint/src/proof_systems/binius.tex @@ -0,0 +1,63 @@ +\section{Binius}\label{sec:binius} + +This section documents our formalization of the Binius commitment scheme protocols. These protocols are built upon the unique hierarchical structure of binary tower fields, which enables highly efficient arithmetic. We first describe the primitives used in the protocols, and then describe the protocols themselves. + +\subsection{Binary Tower Fields} + +We define the binary tower fields \cite{DP25} as defined originally as iterated quadratic extensions by Wie88\cite{Wie88}. These fields, denoted $(\mathcal{T})_{\iota \in \mathbb{N}}$, provide a chain of nested field extensions for efficient arithmetic, particularly for operations involving subfields, by leveraging a highly compatible basis structure across the tower. + +\begin{definition}[Binary Tower Field] + \lean{BinaryTower.BTField} + \label{def:binary_tower_field} + A binary tower field $\mathcal{T}_{\iota}$ for $\iota \in \mathbb{N}$ is defined inductively as the $\iota$-th field in the sequence of quadratic extensions over the ground field $\mathbb{F}_2$. + \begin{itemize} + \item $\mathcal{T}_{0} := \mathbb{F}_{2}$ + \item $\forall \iota > 0$, $\mathcal{T}_{\iota} := \mathcal{T}_{\iota-1}[X_{\iota-1}] / (X_{\iota-1}^{2}+X_{\iota-2} \cdot X_{\iota-1}+1)$, where we conventionally set $X_{-1} := 1$. + \end{itemize} +\end{definition} + +\begin{theorem}[Irreducible defining polynomial] + \lean{BinaryTower.polyIrreducible} + \label{thm:binary_tower_field_irreducible} + The defining polynomial $X_{\iota-1}^{2}+X_{\iota-2} \cdot X_{\iota-1}+1$ of $\mathcal{T}_{\iota}$ is irreducible over $\mathcal{T}_{\iota-1}$ for all $\iota > 0$. +\end{theorem} + +\begin{theorem}[Binary Tower Fields are fields] + \lean{BinaryTower.BTFieldIsField, BinaryTower.BTFieldCard, BinaryTower.BTFieldChar2} + \uses{def:binary_tower_field, thm:binary_tower_field_irreducible} + We prove that the binary tower fields are finite fields: + \label{thm:binary_tower_fields_are_fields} + \begin{itemize} + \item The ground field $\mathcal{T}_{0} := \mathbb{F}_2$ is a field. For all $\iota > 0$, $\mathcal{T}_{\iota}$ is the quotient ring of $\mathcal{T}_{\iota-1}[X_{\iota-1}]$ by the irreducible polynomial $X_{\iota-1}^{2}+X_{\iota-2} \cdot X_{\iota-1}+1$, therefore $\mathcal{T}_{\iota}$ is a field extension of $\mathcal{T}_{\iota-1}$. + \item For all $\iota \in \mathbb{N}$, the cardinality of $\mathcal{T}_{\iota}$ is $2^{2^{\iota}}$. + \item For all $\iota \in \mathbb{N}$, the characteristic of $\mathcal{T}_{\iota}$ is 2. + \end{itemize} +\end{theorem} + +The structure of the tower provides a natural way to represent elements using a consistent set of variables. This leads to a family of multilinear bases that are compatible across different levels of the tower. + +\begin{definition}[Multilinear Bases for Tower Fields] + \uses{def:binary_tower_field, thm:binary_tower_fields_are_fields} + \label{def:multilinear_basis} + For any tower field $\mathcal{T}_\iota$, we define its canonical bases as follows: + \begin{itemize} + \item \textbf{$\mathbb{F}_2$-Basis of $\mathcal{T}_\iota$}: The set of multilinear monomials in the variables $\{X_0, \dots, X_{\iota-1}\}$ forms a basis for $\mathcal{T}_\iota$ as a $2^\iota$-dimensional vector space over $\mathbb{F}_2$. An element is typically stored as a $2^\iota$-bit string corresponding to this basis. + \item \textbf{$\mathcal{T}_\iota$-Basis of $\mathcal{T}_{\iota+\kappa}$}: For any $\kappa \ge 0$, the set of multilinear monomials in the variables $\{X_\iota, \dots, X_{\iota+\kappa-1}\}$ forms a basis for $\mathcal{T}_{\iota+\kappa}$ as a $2^\kappa$-dimensional vector space over the subfield $\mathcal{T}_\iota$. + \end{itemize} +\end{definition} + +\begin{definition}[Computable Binary Tower Fields] + \lean{BinaryTower.ConcreteDefinition.instFieldConcrete} + \uses{def:binary_tower_field, def:multilinear_basis, thm:binary_tower_fields_are_fields} + \label{def:computable_binary_tower_field} + Building upon the abstract definition of binary tower fields, we define a concrete, computable representation of binary tower fields. This construction, which underpins our formalization, represents each element of the field $\mathcal{T}_\iota$ as a bit vector of length $2^\iota$ corresponding to the coefficients of the multilinear $\mathbb{F}_2$-basis. + + The arithmetic operations on these bit-vector representations are defined as follows: + \begin{itemize} + \item \textbf{Addition}: The sum of two elements is defined as the bitwise XOR of their corresponding bit-vector representations. + + \item \textbf{Multiplication (in $\mathcal{T}_\iota$)}: The product of two elements within the same field $\mathcal{T}_\iota$ is defined via a recursive Karatsuba-based algorithm~\cite{FP97}. The complexity of this operation is $\Theta(2^{\log_2(3) \cdot \iota})$. + + \item \textbf{Cross-Level Multiplication}: The product of an element $\alpha \in \mathcal{T}_{\iota+\kappa}$ by a scalar $b \in \mathcal{T}_\iota$ is defined by representing $\alpha$ via its $2^\kappa$ coefficients $(a_u)_{u \in \{0,1\}^\kappa}$ in the $\mathcal{T}_\iota$-basis and performing the multiplication component-wise on those coefficients in the subfield $\mathcal{T}_\iota$. The total complexity is $2^\kappa \cdot \Theta(2^{\log_2(3) \cdot \iota})$. + \end{itemize} +\end{definition} \ No newline at end of file diff --git a/blueprint/src/proof_systems/simple_protocols.tex b/blueprint/src/proof_systems/simple_protocols.tex index 96f12470c..8ed64f778 100644 --- a/blueprint/src/proof_systems/simple_protocols.tex +++ b/blueprint/src/proof_systems/simple_protocols.tex @@ -1,65 +1,316 @@ \section{Simple Oracle Reductions} -We start by introducing a number of simple oracle reductions. +We start by introducing a number of simple oracle reductions that serve as fundamental building blocks for more complex proof systems. These components can be composed together to construct larger protocols. -\subsection{Polynomial Equality Testing} +\subsection{Trivial Reduction} -Context: two univariate polynomials $P, Q \in \mathbb{F}[X]$ of degree at most $d$, available as polynomial evaluation oracles +The simplest possible oracle reduction is one that performs no computation at all. Both the prover and verifier simply pass their inputs through unchanged. While seemingly trivial, this reduction serves as an important identity element for composition and provides a base case for lifting and lens operations. -Input relation: $P = Q$ as polynomials +\begin{definition}[DoNothing Reduction] + \label{def:donothing_reduction} + The DoNothing reduction is a zero-round protocol with the following components: + \begin{itemize} + \item \textbf{Protocol specification}: $\pSpec := []$ (empty protocol, no messages exchanged) + \item \textbf{Prover}: Simply stores the input statement and witness, and outputs them unchanged + \item \textbf{Verifier}: Takes the input statement and outputs it directly + \item \textbf{Input relation}: Any relation $R_{\mathsf{in}} : \StmtIn \to \WitIn \to \Prop$ + \item \textbf{Output relation}: The same relation $R_{\mathsf{out}} := R_{\mathsf{in}}$ + \end{itemize} + \lean{DoNothing.reduction, DoNothing.oracleReduction} +\end{definition} -Protocol type: a single message of type $\mathbb{F}$ from the verifier to the prover. +\begin{theorem}[DoNothing Perfect Completeness] + The DoNothing reduction satisfies perfect completeness for any input relation. + \lean{DoNothing.reduction_completeness, DoNothing.oracleReduction_completeness} + \uses{def:donothing_reduction} +\end{theorem} -Honest prover: does nothing +The oracle version of DoNothing handles oracle statements by passing them through unchanged as well. The prover receives both non-oracle and oracle statements as input, and outputs them in the same form to the verifier. -Honest verifier: checks that $P(r) = Q(r)$ +\subsection{Sending the Witness} -Output relation: $P(r) = Q(r)$ +A fundamental building block in many proof systems is the ability for the prover to transmit witness information to the verifier. The SendWitness reduction provides this functionality in both direct and oracle settings. -Extractor: trivial since there is no witness +\begin{definition}[SendWitness Reduction] + \label{def:sendwitness_reduction} + The SendWitness reduction is a one-round protocol where the prover sends the complete witness to the verifier: + \begin{itemize} + \item \textbf{Protocol specification}: $\pSpec := [(\PtoVdir, \WitIn)]$ (single message from prover to verifier) + \item \textbf{Prover}: Sends the witness $\mathbbm{w}$ as its single message + \item \textbf{Verifier}: Receives the witness and combines it with the input statement to form the output + \item \textbf{Input relation}: $R_{\mathsf{in}} : \StmtIn \to \WitIn \to \Prop$ + \item \textbf{Output relation}: $R_{\mathsf{out}} : (\StmtIn \times \WitIn) \to \Unit \to \Prop$ defined by $((\mathsf{stmt}, \mathsf{wit}), ()) \mapsto R_{\mathsf{in}}(\mathsf{stmt}, \mathsf{wit})$ + \end{itemize} + \lean{SendWitness.reduction} + \uses{def:reduction} +\end{definition} -Completenes: trivial +\begin{theorem}[SendWitness Perfect Completeness] + The SendWitness reduction satisfies perfect completeness. + \lean{SendWitness.reduction_completeness} + \uses{def:sendwitness_reduction} +\end{theorem} -Round-by-round state function: corresponds precisely to input and output relation +In the oracle setting, we consider two variants: -\newcommand{\abs}[1]{\lvert #1 \rvert} +\begin{definition}[SendWitness Oracle Reduction] + \label{def:sendwitness_oracle_reduction} + The oracle version handles witnesses that are indexed families of types with oracle interfaces: + \begin{itemize} + \item \textbf{Witness type}: $\WitIn : \iota_w \to \Type$ where each $\WitIn(i)$ has an oracle interface + \item \textbf{Protocol specification}: $\pSpec := [(\PtoVdir, \forall i, \WitIn(i))]$ + \item \textbf{Output oracle statements}: Combination of input oracle statements and the transmitted witness + \end{itemize} + % \lean{SendWitness.oracleReduction} + \uses{def:oracle_reduction} +\end{definition} -Round-by-round error: $d / \abs{\mathbb{F}}$ +\begin{definition}[SendSingleWitness Oracle Reduction] + \label{def:sendsinglewitness_oracle_reduction} + A specialized variant for a single witness with oracle interface: + \begin{itemize} + \item \textbf{Witness type}: $\WitIn : \Type$ with oracle interface + \item \textbf{Protocol specification}: $\pSpec := [(\PtoVdir, \WitIn)]$ + \item \textbf{Conversion}: Implicitly converts to indexed family $\WitIn : \Fin(1) \to \Type$ + \end{itemize} + \lean{SendSingleWitness.oracleReduction} +\end{definition} -Round-by-round knowledge soundness: follows from Schwartz-Zippel +\begin{theorem}[SendSingleWitness Perfect Completeness] + The SendSingleWitness oracle reduction satisfies perfect completeness. + \lean{SendSingleWitness.oracleReduction_completeness} + \uses{def:sendsinglewitness_oracle_reduction} +\end{theorem} -To summarize, we have the following judgment: +\subsection{Oracle Equality Testing} -\[ - \begin{array}{c} - \Psi := (); \; \Theta := (P, Q); \; \Sigma := (); \; \tau := (\mathsf{V2P}, \mathbb{F}) \vdash \\[1ex] - \{P = Q\} \quad \left( - \begin{array}{l} - \mathcal{P} := (), \\ - \mathcal{V} := (P,Q,r) \mapsto [P(r) \stackrel{?}{=} Q(r)], \\ - \mathcal{E} := () - \end{array} - \right)^{\emptyset} \quad \{\!\!\{P(r) = Q(r); \mathsf{St}_{P,Q}; \frac{d}{\abs{\mathbb{F}}}\}\!\!\} - \end{array} -\] -where $\mathsf{St}(i) = \begin{cases} P \stackrel{?}{=} Q \text{ if } i = 0 \\ P(r) \stackrel{?}{=} Q(r) \text{ if } i = 1 \end{cases}$ +One of the most fundamental oracle reductions is testing whether two oracles of the same type are equal. This is achieved through random sampling from the query space. -\subsection{Batching Polynomial Evaluation Claims} +\begin{definition}[RandomQuery Oracle Reduction] + \label{def:randomquery_oracle_reduction} + The RandomQuery reduction tests equality between two oracles by random querying: + \begin{itemize} + \item \textbf{Input}: Two oracles $\mathsf{a}, \mathsf{b}$ of the same type with oracle interface + \item \textbf{Protocol specification}: $\pSpec := [(\VtoPdir, \mathsf{Query})]$ (single challenge from verifier) + \item \textbf{Input relation}: $R_{\mathsf{in}}((), (\mathsf{a}, \mathsf{b}), ()) := (\mathsf{a} = \mathsf{b})$ + \item \textbf{Verifier}: Samples random query $q$ and sends it to prover + \item \textbf{Prover}: Receives query $q$, performs no computation + \item \textbf{Output relation}: $R_{\mathsf{out}}((q, (\mathsf{a}, \mathsf{b})), ()) := (\mathsf{oracle}(\mathsf{a}, q) = \mathsf{oracle}(\mathsf{b}, q))$ + \end{itemize} + \lean{RandomQuery.oracleReduction} + \uses{def:oracle_reduction} +\end{definition} -Context: $n$-tuple of values $v = (v_1, \ldots, v_n) \in \mathbb{F}^n$ +\begin{theorem}[RandomQuery Perfect Completeness] + The RandomQuery oracle reduction satisfies perfect completeness: if two oracles are equal, they will agree on any random query. + \lean{RandomQuery.oracleReduction_completeness} + \uses{def:randomquery_oracle_reduction} +\end{theorem} -Protocol type: one message of type $\mathbb{F}^k$ from the verifier to the prover, and another message of type $\mathbb{F}$ from the prover to the verifier +The key security property of RandomQuery depends on the notion of oracle distance: -Auxiliary function: a polynomial map $E : \mathbb{F}^k \to \mathbb{F}^n$ +\begin{definition}[Oracle Distance] + \label{def:oracle_distance} + For oracles $\mathsf{a}, \mathsf{b}$ of the same type, we define their distance as: + \[ \mathsf{distance}(\mathsf{a}, \mathsf{b}) := |\{q : \mathsf{Query} \mid \mathsf{oracle}(\mathsf{a}, q) \neq \mathsf{oracle}(\mathsf{b}, q)\}| \] -Honest prover: given $r \gets \mathbb{F}^k$ from the verifier's message, computes $\langle E(r), v\rangle := E(r)_1 \cdot v_1 + \cdots + E(r)_n \cdot v_n$ and sends it to the verifier + We say an oracle type has distance bound $d$ if for any two distinct oracles $\mathsf{a} \neq \mathsf{b}$, we have $\mathsf{distance}(\mathsf{a}, \mathsf{b}) \geq |\mathsf{Query}| - d$. +\end{definition} -Honest verifier: checks that the received value $v'$ is equal to $\langle E(r), v\rangle$ +\begin{theorem}[RandomQuery Knowledge Soundness] + If the oracle type has distance bound $d$, then the RandomQuery oracle reduction satisfies round-by-round knowledge soundness with error probability $\frac{d}{|\mathsf{Query}|}$. + \lean{RandomQuery.rbr_knowledge_soundness} + \uses{def:randomquery_oracle_reduction, def:oracle_distance} +\end{theorem} -Extractor: trivial since there is no witness +\begin{definition}[RandomQueryWithResponse Variant] + \label{def:randomquery_with_response} + A variant of RandomQuery where the second oracle is replaced with an explicit response: + \begin{itemize} + \item \textbf{Input}: Single oracle $\mathsf{a}$ and target response $r$ + \item \textbf{Output relation}: $R_{\mathsf{out}}(((q, r), \mathsf{a}), ()) := (\mathsf{oracle}(\mathsf{a}, q) = r)$ + \end{itemize} + This variant is useful when one wants to verify a specific query-response pair rather than oracle equality. + % \lean{RandomQueryWithResponse} + \uses{def:randomquery_oracle_reduction} +\end{definition} -Security: depends on the degree \& non-degeneracy of the polynomial map $E$ +We mention two special cases of RandomQuery that are useful for specific applications. -% State function: TODO +\subsubsection{Polynomial Equality Testing} -% Round-by-round error: TODO \ No newline at end of file +A common application of oracle reductions is testing equality between polynomial oracles. This is a specific instance of the RandomQuery reduction applied to polynomial evaluation oracles. + +\begin{definition}[Polynomial Equality Testing] + \label{def:polynomial_equality_testing} + Consider two univariate polynomials $P, Q \in \mathbb{F}[X]$ of degree at most $d$, available as polynomial evaluation oracles. The polynomial equality testing reduction is defined as: + \begin{itemize} + \item \textbf{Input relation}: $P = Q$ as polynomials + \item \textbf{Protocol specification}: Single challenge of type $\mathbb{F}$ from verifier to prover + \item \textbf{Honest prover}: Receives the random field element $r$ but performs no computation + \item \textbf{Honest verifier}: Checks that $P(r) = Q(r)$ by querying both polynomial oracles + \item \textbf{Output relation}: $P(r) = Q(r)$ for the sampled point $r$ + \end{itemize} +\end{definition} + +\begin{theorem}[Polynomial Equality Testing Completeness] + The polynomial equality testing reduction satisfies perfect completeness: if $P = Q$ as polynomials, then $P(r) = Q(r)$ for any field element $r$. +\end{theorem} + +\begin{theorem}[Polynomial Equality Testing Soundness] + The polynomial equality testing reduction satisfies round-by-round knowledge soundness with error probability $\frac{d}{|\mathbb{F}|}$, where $d$ is the maximum degree bound. This follows from the Schwartz-Zippel lemma: distinct polynomials of degree at most $d$ can agree on at most $d$ points. +\end{theorem} + +The state function for this reduction corresponds precisely to the input and output relations, transitioning from checking polynomial equality to checking evaluation equality at the sampled point. + +\subsubsection{Batching Polynomial Evaluation Claims} + +Another important building block is the ability to batch multiple polynomial evaluation claims into a single check using random linear combinations. + +TODO: express this as a lifted version of $\mathsf{RandomQuery}$ over a virtual polynomial whose +variables are the random linear combination coefficients. + +\begin{definition}[Batching Polynomial Evaluation Claims] + \label{def:batching_polynomial_evaluation} + Consider an $n$-tuple of values $v = (v_1, \ldots, v_n) \in \mathbb{F}^n$ and a polynomial map $E : \mathbb{F}^k \to \mathbb{F}^n$. The batching reduction is defined as: + \begin{itemize} + \item \textbf{Protocol specification}: Two messages: + \begin{enumerate} + \item Verifier sends random $r \in \mathbb{F}^k$ to prover + \item Prover sends $\langle E(r), v \rangle := \sum_{i=1}^n E(r)_i \cdot v_i$ to verifier + \end{enumerate} + \item \textbf{Honest prover}: Computes the inner product $\langle E(r), v \rangle$ and sends it + \item \textbf{Honest verifier}: Verifies that the received value equals the expected inner product + \item \textbf{Extractor}: Trivial since there is no witness to extract + \end{itemize} +\end{definition} + +\begin{theorem}[Batching Completeness] + The batching polynomial evaluation reduction satisfies perfect completeness. +\end{theorem} + +\begin{remark}[Batching Security] + The security of this reduction depends on the degree and non-degeneracy properties of the polynomial map $E$. The specific error bounds depend on the structure of $E$ and require careful analysis of the polynomial's properties. +\end{remark} + +\subsection{Sending a Claim} + +The SendClaim reduction enables a prover to transmit a claim (oracle statement) to the verifier, who then verifies a relationship between the original and transmitted claims. + +\begin{definition}[SendClaim Oracle Reduction] + \label{def:sendclaim_oracle_reduction} + The SendClaim reduction is a one-round protocol for claim transmission: + \begin{itemize} + \item \textbf{Protocol specification}: $\pSpec := [(\PtoVdir, \OStmtIn)]$ (single oracle message) + \item \textbf{Input}: Statement and single oracle statement (via $\mathsf{Unique}$ index type) + \item \textbf{Prover}: Sends the input oracle statement as protocol message + \item \textbf{Verifier}: Executes oracle computation $\mathsf{relComp} : \StmtIn \to \OracleComp[\OStmtIn]_{\mathcal{O}} \Unit$ + \item \textbf{Output oracle statements}: Sum type $\OStmtIn \oplus \OStmtIn$ containing both original and transmitted claims + \item \textbf{Output relation}: $R_{\mathsf{out}}((), \mathsf{oracles}) := \mathsf{oracles}(\mathsf{inl}) = \mathsf{oracles}(\mathsf{inr})$ + \end{itemize} + \lean{SendClaim.oracleReduction} + \uses{def:oracle_reduction} +\end{definition} + +\begin{theorem}[SendClaim Perfect Completeness] + The SendClaim oracle reduction satisfies perfect completeness when the input relation matches the oracle computation requirement. + \lean{SendClaim.completeness} + \uses{def:sendclaim_oracle_reduction} +\end{theorem} + +\begin{remark}[SendClaim Development Status] + The SendClaim reduction is currently under active development in the Lean formalization. Several components including the verifier embedding and completeness proof require further implementation. The current version represents a specialized case that may be generalized in future iterations. + % TODO: Complete implementation as noted in the Lean code +\end{remark} + +\subsection{Claim Reduction} + +A fundamental building block for constructing complex proof systems is the ability to locally reduce one type of claim to another. The ReduceClaim reduction provides this functionality through mappings between statement and witness types. + +\begin{definition}[ReduceClaim Reduction] + \label{def:reduceclaim_reduction} + The ReduceClaim reduction is a zero-round protocol that transforms claims via explicit mappings: + \begin{itemize} + \item \textbf{Protocol specification}: $\pSpec := []$ (no messages exchanged) + \item \textbf{Statement mapping}: $\mathsf{mapStmt} : \StmtIn \to \StmtOut$ + \item \textbf{Witness mapping}: $\mathsf{mapWit} : \WitIn \to \WitOut$ + \item \textbf{Prover}: Applies both mappings to input statement and witness + \item \textbf{Verifier}: Applies statement mapping to input statement + \item \textbf{Input relation}: $R_{\mathsf{in}} : \StmtIn \to \WitIn \to \Prop$ + \item \textbf{Output relation}: $R_{\mathsf{out}} : \StmtOut \to \WitOut \to \Prop$ + \item \textbf{Relation condition}: $R_{\mathsf{in}}(\mathsf{stmt}, \mathsf{wit}) \iff R_{\mathsf{out}}(\mathsf{mapStmt}(\mathsf{stmt}), \mathsf{mapWit}(\mathsf{wit}))$ + \end{itemize} + \lean{ReduceClaim.reduction} + \uses{def:reduction} +\end{definition} + +\begin{theorem}[ReduceClaim Perfect Completeness] + The ReduceClaim reduction satisfies perfect completeness when the relation condition holds. + \lean{ReduceClaim.reduction_completeness} + \uses{def:reduceclaim_reduction} +\end{theorem} + +\begin{definition}[ReduceClaim Oracle Reduction] + \label{def:reduceclaim_oracle_reduction} + The oracle version additionally handles oracle statements through an embedding: + \begin{itemize} + \item \textbf{Oracle statement mapping}: Embedding $\mathsf{embedIdx} : \iota_{\mathsf{out}} \hookrightarrow \iota_{\mathsf{in}}$ + \item \textbf{Type compatibility}: $\OStmtIn(\mathsf{embedIdx}(i)) = \OStmtOut(i)$ for all $i$ + \item \textbf{Oracle embedding}: Maps output oracle indices to corresponding input oracle indices + \end{itemize} + \lean{ReduceClaim.oracleReduction} + \uses{def:oracle_reduction, def:reduceclaim_reduction} +\end{definition} + +\begin{remark}[ReduceClaim Oracle Completeness] + The oracle version's completeness proof is currently under development in the Lean formalization. + % TODO: Complete when oracleReduction_completeness is implemented +\end{remark} + +\subsection{Claim Verification} + +Another essential building block is the ability to verify that a given predicate holds for a statement without requiring additional witness information. + +\begin{definition}[CheckClaim Reduction] + \label{def:checkclaim_reduction} + The CheckClaim reduction is a zero-round protocol that verifies predicates: + \begin{itemize} + \item \textbf{Protocol specification}: $\pSpec := []$ (no messages exchanged) + \item \textbf{Predicate}: $\mathsf{pred} : \StmtIn \to \Prop$ (decidable) + \item \textbf{Prover}: Simply stores and outputs the input statement with unit witness + \item \textbf{Verifier}: Checks $\mathsf{pred}(\mathsf{stmt})$ and outputs statement if successful + \item \textbf{Input relation}: $R_{\mathsf{in}}(\mathsf{stmt}, ()) := \mathsf{pred}(\mathsf{stmt})$ + \item \textbf{Output relation}: $R_{\mathsf{out}}(\mathsf{stmt}, ()) := \mathsf{True}$ (trivial after verification) + \end{itemize} + \lean{CheckClaim.reduction} + \uses{def:reduction} +\end{definition} + +\begin{theorem}[CheckClaim Perfect Completeness] + The CheckClaim reduction satisfies perfect completeness. + \lean{CheckClaim.reduction_completeness} + \uses{def:checkclaim_reduction} +\end{theorem} + +\begin{definition}[CheckClaim Oracle Reduction] + \label{def:checkclaim_oracle_reduction} + The oracle version handles predicates that require oracle access: + \begin{itemize} + \item \textbf{Oracle predicate}: $\mathsf{pred} : \StmtIn \to \OracleComp[\OStmtIn]_{\mathcal{O}} \Prop$ + \item \textbf{Never-fails condition}: $\mathsf{pred}(\mathsf{stmt})$ never fails for any statement + \item \textbf{Oracle computation}: Verifier executes oracle computation to check predicate + \item \textbf{Input relation}: Defined via oracle simulation of the predicate + \end{itemize} + \lean{CheckClaim.oracleReduction} + \uses{def:oracle_reduction, def:checkclaim_reduction} +\end{definition} + +\begin{theorem}[CheckClaim Oracle Perfect Completeness] + The CheckClaim oracle reduction satisfies perfect completeness. + \lean{CheckClaim.oracleReduction_completeness} + \uses{def:checkclaim_oracle_reduction} +\end{theorem} + +\begin{remark}[CheckClaim Security Analysis] + The round-by-round knowledge soundness proofs for both reduction and oracle versions are currently under development in the Lean formalization. +\end{remark} diff --git a/blueprint/src/proof_systems/spartan.tex b/blueprint/src/proof_systems/spartan.tex new file mode 100644 index 000000000..d354a5723 --- /dev/null +++ b/blueprint/src/proof_systems/spartan.tex @@ -0,0 +1,53 @@ +\section{The Spartan Protocol}\label{sec:spartan} + +\subsection{Preliminaries} + +The Spartan protocol is designed to prove the satisfiability of Rank-1 Constraint System (R1CS). +An R1CS instance is defined by a set of matrices $(A, B, C)$ over a field $\mathbb{F}$ (more generally the definition makes sense over any semiring $\mathsf{R}$), and it is satisfied by a public input $x$ and a private witness $w$ if the combined vector $z = (x, w)$ satisfies the relation: +\[ (A \cdot z) \circ (B \cdot z) = (C \cdot z) \] +where $\circ$ denotes the Hadamard (entry-wise) product. +Our formalization follows the definition in \texttt{ArkLib/ProofSystem/ConstraintSystem/R1CS.lean}. + +\subsection{Description in Paper} + +\Cref{fig:spartan_protocol} is the description of the Spartan protocol from the original +paper~\cite{spartan}. Note that in this section, we only formalize the \emph{Polynomial IOP (PIOP)} +aspect of Spartan. In the PIOP model, the prover does not commit to polynomials using a Polynomial +Commitment Scheme (PCS). Instead, the verifier is given oracle access to the polynomials. Therefore, +steps involving `PCS.Setup`, `PCS.Commit`, and `PCS.Eval` are replaced by simple oracle interactions. + +\begin{figure}[ht] + \centering + \includegraphics{figures/spartan-description.png} + \caption{The Spartan protocol description, from the original paper.} + \label{fig:spartan_protocol} +\end{figure} + +After stripping away the polynomial commitment scheme, the protocol has the following structure: + +\begin{description} + \item[Setup:] This step is part of the polynomial commitment scheme and is not part of the PIOP we formalize. + \item[Interaction:] The interaction is between a prover $\mathcal{P}$ with witness $w$ and a verifier $\mathcal{V}$ with public inputs $(\mathbb{F}, A, B, C, \text{io}, m, n)$. + \begin{enumerate} + \item $\mathcal{P}$: Sends oracle access to the multilinear extension of the witness, $\tilde{w}$, to $\mathcal{V}$. In the original paper, this is a commitment $(C,S) \leftarrow \text{PC.Commit}(\text{pp}, \tilde{w})$. + \item $\mathcal{V}$: Samples a random challenge $\tau \in \mathbb{F}^{\log m}$ and sends it to $\mathcal{P}$. + \item Let $T_1 = 0$, $\mu_1 = \log m$, $\ell_1 = 3$. These are parameters for the first sum-check protocol. + \item $\mathcal{V}$: Samples a random challenge $r_x \in \mathbb{F}^{\mu_1}$. + \item \textbf{Sum-check\#1.} A sum-check protocol is executed. The verifier receives the claimed evaluation $e_x$. The prover of the sum-check has oracle access to a polynomial $G_{io, \tau}$, and the verifier has oracle access to $r_x$. The parameters for this sub-protocol are $(\mu_1, \ell_1, T_1)$. + \item $\mathcal{P}$: Computes evaluations $v_A = \tilde{A}(r_x)$, $v_B = \tilde{B}(r_x)$, $v_C = \tilde{C}(r_x)$ and sends them to $\mathcal{V}$. $\tilde{A}, \tilde{B}, \tilde{C}$ are multilinear extensions of the matrices $A, B, C$. + \item $\mathcal{V}$: Aborts if $e_x \neq (v_A \cdot v_B - v_C) \cdot \tilde{\text{eq}}(r_x, \tau)$. This is the verifier's check for the first sum-check. + \item $\mathcal{V}$: Samples random challenges $r_A, r_B, r_C \in \mathbb{F}$ and sends them to $\mathcal{P}$. + \item Let $T_2 = r_A \cdot v_A + r_B \cdot v_B + r_C \cdot v_C$, $\mu_2 = \log n$, $\ell_2 = 2$. These are parameters for the second sum-check protocol. Note: The image states $\mu_2 = \log m$, which is likely a typo and should be $\log n$. + \item $\mathcal{V}$: Samples a random challenge $r_y \in \mathbb{F}^{\mu_2}$. + \item \textbf{Sum-check\#2.} Another sum-check protocol is executed. The verifier receives the claimed evaluation $e_y$. + \item $\mathcal{P}$: Computes $v \leftarrow \tilde{w}(r_y[1..])$ and sends $v$ to $\mathcal{V}$. + \item This step involves a polynomial commitment evaluation proof. In our PIOP formalization, this check is not needed as the verifier has direct oracle access to $\tilde{w}$. + \item $\mathcal{V}$: This step is part of the evaluation proof check, so it is omitted. + \item $\mathcal{V}$: Computes $v_Z \leftarrow (1 - r_y[0]) \cdot \tilde{w}(r_y[1..]) + r_y[0] \cdot \widetilde{(\text{io}, 1)}(r_y[1..])$. This reconstructs the evaluation of the combined input-witness vector polynomial $\tilde{z}$. + \item $\mathcal{V}$: Queries oracles for $\tilde{A}, \tilde{B}, \tilde{C}$ at $(r_x, r_y)$ to get $v_1, v_2, v_3$. + \item $\mathcal{V}$: Aborts if $e_y \neq (r_A \cdot v_1 + r_B \cdot v_2 + r_C \cdot v_3) \cdot v_Z$. This is the final check. + \item $\mathcal{V}$: Outputs 1. + \end{enumerate} +\end{description} + +\subsection{Formalization using IOR Composition} diff --git a/blueprint/src/proof_systems/stir.tex b/blueprint/src/proof_systems/stir.tex new file mode 100644 index 000000000..fcfc93088 --- /dev/null +++ b/blueprint/src/proof_systems/stir.tex @@ -0,0 +1,278 @@ +% Copyright (c) 2025 ZKLib Contributors. All rights reserved. +% Released under Apache 2.0 license as described in the file LICENSE. +% Authors: Poulami Das (Least Authority) + +\section{Stir} + +\subsection{Tools for Reed Solomon codes} +\subsubsection{Random linear combination as a proximity generator}\label{sec:proximity_gap} + +\begin{theorem}\label{thm:proximity_gap} +\lean{proximity_gap} +\uses{def:distance_from_code,def:reed_solomon_code,def:list_close_codewords} + Let $\code := \rscode[\field,\evaldomain,\degree]$ be a Reed Solomon code with rate $\rate:=\frac{\degree}{|\evaldomain|}$ and let $B^*(\rate):=\sqrt{\rate}$. For every $\distance \in (0,1 - B^*(\rate))$ and functions $f_0,\ldots,f_{m-1} : \evaldomain \to \field$, if + \[ + \Pr_{r\leftarrow\field}\!\Bigl[ + \Delta\Bigl(\sum_{j=0}^{m-1} r^j\cdot {f_j},\rscode[\field,\evaldomain,\degree]\Bigr) + \le \delta + \Bigr]>\err^*(\degree,\rate,\distance,m), + \] + then there exists a subset $S \subseteq \evaldomain$ with $|S| \ge (1 - \delta)\cdot|L|$, + and for every $i \in [m]$, there exists $u \in \rscode[\field,\evaldomain,\degree]$ such that $f_i(S) = u(S)$. + + \medskip + + \noindent + Above, $\err^*(\degree,\rate,\distance,m)$ is defined as follows: + \begin{itemize} + \item if $\distance \in \left(0,\frac{1-\rate}{2}\right]$ then + \[ + \err^*(\degree,\rate,\distance,m)=\frac{(m-1)\cdot \degree}{\rate\cdot|\field|} + \] + \item if $\distance \in \Bigl(\frac{1-\rate}{2}, 1-\sqrt{\rate}\Bigr)$ then + \[ + \err^*(\degree,\rate,\distance,m)=\frac{(m-1)\cdot {\degree}^2}{|\field|\cdot{\Bigl(2\cdot\min\{1-\sqrt{\rate}-\distance,\frac{\sqrt{\rate}}{20}\}\Bigr)}^7} + \] + \end{itemize} +\end{theorem} + +\subsubsection{Univariate Function Quotienting}\label{sec:quotienting} + +In the following, we start by defining the \emph{quotient} of a univariate function. +\begin{definition}\label{def:quotient} +\lean{Quotienting.funcQuotient} + Let $f:\evaldomain\to\field$ be a function, $S\subseteq\field$ be a set, and $\mathsf{Ans},\mathsf{Fill}: S\rightarrow\field$ be functions. Let $\hat{\mathsf{Ans}}\in\field^{<|S|}[X]$ be the (unique) polynomial with $\hat{\mathsf{Ans}}(x)=\mathsf{Ans}(x)$ for every $x\in S$, and let $\hat{V}_S\in\field^{<|S|+1}[X]$ be the unique non-zero polynomial with $\hat{V}_S(x)=0$ for every $x\in S$. + The \emph{quotient function} $\mathsf{Quotient}\bigl(f,S,\mathsf{Ans},\mathsf{Fill}\bigr): \evaldomain\to\field$ + is defined as follows: + + \[ + \forall x \in \evaldomain, \quad + \mathsf{Quotient}\bigl(f, S, \mathsf{Ans}, \mathsf{Fill} \bigr)(x) + := + \left\{ + \begin{array}{ll} + \mathsf{Fill}(x) + & \text{if } x \in S \\[6pt] + \dfrac{f(x) - \hat{\mathsf{Ans}}(x)}{\hat{V}_S(x)} + & \text{otherwise} + \end{array} + \right. + \] + +\end{definition} + +Next we define the polynomial quotient operator, which quotients a polynomial relative to its output on evaluation points. The polynomial quotient is a polynomial of lower degree. + +\begin{definition}\label{def:poly_quotient} +\lean{Quotienting.polyQuotient} + Let $\hat{f}\in\field^{<\degree}[X]$ be a polynomial and $S\subseteq\field$ be a set, let $\hat{V}_S\in\field^{<|S|+1}[X]$ be the unique non-zero polynomial with $\hat{V}_S(x)=0$ for every $x\in S$. The \emph{polynomial quotient} $\mathsf{PolyQuotient}(\hat{f},S)\in\field^{\distance, + \] + where $T:=\{x\in\evaldomain\cap S: \hat{\mathsf{Ans}}(x)\neq f(x)\}$. +\end{lemma} + +\subsubsection{Out of domain sampling}\label{sec:out_of_domain_smpl} +\begin{lemma}\label{lemma:out_of_domain_smpl} +\lean{OutOfDomSmpl.out_of_dom_smpl_1, OutOfDomSmpl.out_of_dom_smpl_2} +\uses{def:reed_solomon_code,def:list_decodable,def:list_close_codewords} + Let $f:\evaldomain\rightarrow\field$ be a function, $\degree\in\N$ be a degree parameter, $s\in\N$ be a repetition parameter, and $\distance\in[0,1]$ be a distance parameter. If $\rscode[\field,\evaldomain,\degree]$ be $(\degree,l)$-list decodable then + \[ + \begin{array}{rcl} + \Pr_{\substack{r_0, \ldots, r_{s-1} \gets \field \setminus \evaldomain}} + \left[ + \begin{array}{c} + \exists \text{ distinct } u, u' \in \listcode(f, \degree, \distance): \\ + \forall i \in [s],\ \hat{u}(r_i) = \hat{u}'(r_i) + \end{array} + \right] + & \leq & \binom{l}{2} \cdot \left( \frac{\degree - 1}{|\field| - |\evaldomain|} \right)^s \\ + & \leq & \left( \frac{l^2}{2} \right) \cdot \left( \frac{\degree}{|\field| - |\evaldomain|} \right)^s + \end{array} + \] + + + +\end{lemma} + +\subsubsection{Folding univariate functions}\label{sec:folding_uf} +STIR relies on $k$-wise folding of functions and polynomials - this is similar to prior works, although presented in a slightly different form. As shown below, folding a function preserves proximity from the Reed-Solomon code with high probability. The folding operator is based on the following fact, decomposing univariate polynomials into bivariate ones. + +\begin{lemma}\label{fact:poly_folding} +\lean{Folding.exists_unique_bivariate,Folding.degree_bound_bivariate} +Given a polynomial $\hat{q}\in\field[X]$: +\begin{itemize} + \item For every univariate polynomial $\hat{f}\in\field[X]$, there exists a unique bivariate polynomial $\hat{Q}\in\field[X,Y]$ with: + \[ + \polydeg_X(\hat{Q}) := \left\lfloor \frac{\polydeg(\hat{f})}{\polydeg(\hat{q})} \right\rfloor,\quad \polydeg_Y(\hat{Q}) < \polydeg(\hat{q}) + \] + such that $\hat{f}(Z)=\hat{Q}(\hat{q}(Z),Z)$. Moreover, $\hat{Q}$ can be computed efficiently given $\hat{f}$ and $\hat{q}$. Observe that if $\polydeg(\hat{f})\err^*(\degree/k,\rate,\distance,k). + \] + Above, ${\mathsf{B}}^*$ and ${\err}^*$ are the proximity bound and error (respectively) described in Section~\ref{sec:proximity_gap}. +\end{lemma} + +\subsubsection{Combine functions of varying degrees}\label{sec:combine} +We show a new method for combining functions of varying degrees with minimal proximity require- ments using geometric sums. We begin by recalling a fact about geometric sums. + +\begin{lemma}\label{fact:geometric_sum} +\lean{Combine.geometric_sum_units} + Let $\field$ be a field, $r\in\field$ be a field element, $a\in\N$ be a natural number. Then + \[ + \sum_{i=0}^{a} r^i := + \left\{ + \begin{array}{ll} + \displaystyle\left(\frac{1 - r^{a+1}}{1 - r}\right) + & \text{if } r \neq 1 \\[6pt] + a + 1 + & \text{if } r = 1 + \end{array} + \right. + \] + +\end{lemma} + +\begin{definition}\label{def:combine} +\lean{Combine.combineInterm,Combine.combineFinal} + Given target degree $\degree^*\in\N$, shifting parameter $r\in\field$, functions $f_0,\ldots,f_{m-1}:\evaldomain\rightarrow\field$, and degrees $0\leq \degree_0,\ldots,\degree_{m-1}\leq {\degree}^*$, we define $\combine(\degree^*,r,(f_0,\degree_0),\ldots,(f_{m-1},\degree_{m-1})):\evaldomain\rightarrow\field$ as follows: + \[ + \begin{array}{rcl} + \combine(\degree^*, r, (f_0, \degree_0), \ldots, (f_{m-1}, \degree_{m-1}))(x) + & := & \displaystyle\sum_{i=0}^{m-1} r_i \cdot f_i(x) \cdot \left( \sum_{l=0}^{\degree^* - \degree_i} (r \cdot x)^l \right) \\[10pt] + & = & \left\{ + \begin{array}{ll} + \displaystyle\sum_{i=0}^{m-1} r_i \cdot f_i(x) \cdot \left( \dfrac{1 - (x r)^{\degree^* - \degree_i + 1}}{1 - x r} \right) + & \text{if } x \cdot r \neq 1 \\[8pt] + \displaystyle\sum_{i=0}^{m-1} r_i \cdot f_i(x) \cdot (\degree^* - \degree_i + 1) + & \text{if } x \cdot r = 1 + \end{array} + \right. + \end{array} + \] + +Above, $r_i:=r^{i-1+\sum_{j\err^*(\degree^*,\rate,\distance,m\cdot(\degree^*+1)-\sum_{i=0}^{m-1}\degree_i), + \] + then there exists $S\subseteq \evaldomain$ with $|S|\geq(1-\distance)\cdot|\evaldomain|$, and + \[ + \forall i\in[m-1],\exists u\in\rscode[\field,\evaldomain,\degree_i], f_i(S)=u(S). + \] + Note that this implies $\Delta(f_i,\rscode[\field,\evaldomain,\degree_i])<\distance$ for every $i$. Above, ${\mathsf{B}}^*$ and ${\err}^*$ are the proximity bound and error (respectively) described in the proximity gap theorem. +\end{lemma} + +\subsection{Stir Main theorems} + +\begin{theorem}[STIR Main Theorem]\label{thm:stir} +\lean{StirIOP.stir_main} +\uses{def:reed_solomon_code,lemma:rnd_by_rnd_soundness} + Consider the following ingrediants: + \begin{itemize} + \item A security parameter $\lambda\in\N$. + \item A Reed-Solomon code $\rscode[\field,\evaldomain,\degree]$ with $\rate:=\frac{\degree}{|\evaldomain|}$ where $\degree$ is a power of $2$, and $\evaldomain$ is a smooth domain. + \item A proximity parameter $\distance\in(0,1-1.05\cdot\sqrt{\rate})$. + \item A folding parameter $k\in\N$ that is power of $2$ with $k\geq 4$. + \end{itemize} +If $|\field|=\Omega(\frac{\lambda\cdot2^\lambda\cdot\degree^2\cdot{|\evaldomain|}^2}{\log(1/\rate)})$, there is a public-coin IOPP for $\rscode[\field,\evaldomain,\degree]$ with the following parameters: +\begin{itemize} + \item Round-by-round soundness error $2^{-\lambda}$. + \item Round complexity: $M:=O(\log_k{\degree})$. + \item Proof length: $|\evaldomain|+O_k(\log{\degree})$. + \item Query complexity to the input: $\frac{\lambda}{-\log{(1-\distance)}}$. + \item Query complexity to the proof strings: $O_k(\log{\degree}+\lambda\cdot\log{\Big(\frac{\log{\degree}}{\log{1/\rate}}\Big)})$. +\end{itemize} +\end{theorem} + +\begin{lemma}\label{lemma:rnd_by_rnd_soundness} +\lean{StirIOP.stir_rbr_soundness} +\uses{def:reed_solomon_code,def:list_decodable,lemma:folding,lemma:out_of_domain_smpl,lemma:quotienting,lemma:combine} + Consider $(\field,M,\degree,k_0,\ldots,k_M,\evaldomain_0,\ldots,\evaldomain_M,t_0,\ldots,t_M)$ and for every $i\in\{0,\ldots,M\}$, let $\degree_i:=\frac{\degree}{\prod_{j err(\code,\parl,\delta), +\end{align*} +then there exists $S\subseteq \evaldomain$, $|S|>(1-\delta)\cdot|\evaldomain|$, and +$\forall i \in [0, (\parl-1)]$, $\exists u \in \code, \forall x \in S$, $f_i(x)=u(x)$. +\end{definition} + +\begin{theorem}\label{thm:proximity_gap_whir} +\lean{RSGenerator.proximityGapTheorem} +\uses{def:proximity_generator,def:reed_solomon_code} + Let $\code = \rscode[\field,\evaldomain,m]$ be a Reed Solomon code with rate $\rate = 2^m/|\evaldomain|$. $\gen(\alpha,\parl)=\{1,\alpha,\ldots,\alpha^{\parl-1}\}$ is a proximity generator for $\code$ with proximity bounds ${\bound}(\rate,\parl)=\sqrt{\rate}$ and $\err(C,\parl,\delta)$ defined below. + \begin{itemize} + \item if $\distance \in \left(0,\frac{1-\rate}{2}\right]$ then + \[ + \err(\code,\parl,\delta)=\frac{(m-1)\cdot \degree}{\rate\cdot|\field|} + \] + \item if $\distance \in \Bigl(\frac{1-\rate}{2}, 1-\sqrt{\rate}\Bigr)$ then + \[ + \err(\code,\parl,\delta)=\frac{(m-1)\cdot {\degree}^2}{|\field|\cdot{\Bigl(2\cdot\min\{1-\sqrt{\rate}-\distance,\frac{\sqrt{\rate}}{20}\}\Bigr)}^7} + \] + \end{itemize} +\end{theorem} + +\begin{definition}\label{def:gen_mutual_corr_agreement} + \lean{CorrelatedAgreement.genMutualCorrAgreement} + \uses{def:proximity_generator} + Let $\code$ be a linear code. We say that $\gen$ be a proximity generator with mutual correlated agreement with proximity bounds ${\bound}^\star$ and $\err^\star$, if for $f_0,\ldots,f_{\parl-1}:\evaldomain\rightarrow\field$ and $\delta\in(0,1-{\bound}^\star(\code,\parl))$ the following holds. + \[ + \Pr_{(r_0, \ldots, r_{\parl-1}) \leftarrow \gen(\parl)} \left[ + \exists S \subseteq \evaldomain \;\; s.t.\;\; + \begin{array}{l} + |S| \geq (1 - \delta) \cdot |\evaldomain| \\ + \land\; \exists u \in \code, u(S) = \sum_{j \in [0,(\parl-1)]} r_j \cdot f_j(S) \\ + \land\; \exists i \in [0,(\parl-1)], \forall u' \in \code, u'(S) \neq f_i(S) + \end{array}\right] + \leq \err^\star(\code, \parl, \delta). + \] +\end{definition} + +\begin{lemma}\label{lemma:gen_mutual_corr_agreement} +\lean{CorrelatedAgreement.genMutualCorrAgreement_le_bound} +\uses{def:proximity_generator, def:gen_mutual_corr_agreement} + Let $\code$ be a linear code with minimum distance $\delta_{\code}$ and let $\gen$ be a proximity generator for $\code$ with proximity bound ${\bound}$ and error $\err$. Then $\gen$ has mutual correlated agreement with proximity bound ${\bound}^\star(\code, \parl) = \min\{1 - \delta_{\code}/2, \bound(\code, \parl)\}$ and error $\err^\star(\code, \parl, \delta) := \err(\code, \parl, \delta)$. +\end{lemma} + +\begin{lemma} +\lean{CorrelatedAgreement.genMutualCorrAgreement_rsc_le_bound} +\uses{lemma:gen_mutual_corr_agreement, def:reed_solomon_code} + Let $\code := \rscode[\field, \evaldomain, m]$ be a Reed Solomon code with rate $\rate$. The function $\gen(\parl; \alpha) = (1, \alpha, \ldots, \alpha^{\parl - 1})$ is a proximity generator for $\code$ with mutual correlated agreement with proximity bound ${\bound}^\star(\code, \parl) := \frac{1 + \rate}{2}$ and error $\err^\star(\code, \parl, \delta) = \frac{(\parl - 1) \cdot 2^m}{\rate \cdot |\field|}$. +\end{lemma} + +\begin{theorem}\label{conjecture:whir} +\lean{CorrelatedAgreement.genMutualCorrAgreement_le_johnsonBound,CorrelatedAgreement.genMutualCorrAgreement_le_capacity} +\uses{def:reed_solomon_code,lemma:gen_mutual_corr_agreement} + The function $\gen(\parl; \alpha) := (1, \alpha, \ldots, \alpha^{\parl - 1})$ is a proximity generator with mutual correlated agreement for every smooth Reed Solomon code $\code := \rscode[\field, \evaldomain, m]$ (with rate $\rate := 2^m / |\evaldomain|$). We give two conjectures, for the parameters of the proximity bound ${\bound}^\star$ and the error $\err^\star$: + \begin{enumerate} + \item \textit{Up to the Johnson bound:} ${\bound}^\star(\code, \parl) := \sqrt{\rate},$ \textit{and} + \[ + \err(\code, \parl, \delta) := \frac{(\parl - 1) \cdot 2^m}{|\field| \cdot \left( 2 \cdot \min\left\{1 - \sqrt{\rate} - \delta, \frac{\sqrt{\rate}}{20} \right\} \right)^7}. + \] + + \item \textit{Up to capacity:} ${\bound}^\star(\code, \parl) := \rate$, \textit{and there exist constants $c_1, c_2, c_3 \in \mathbb{N}$ such that for every $\eta > 0$ and $0 < \delta < 1 - \rate - \eta$:} + \[ + \err^\star(\code, \parl, \delta) := \frac{(\parl - 1)^{c_2} \cdot \delta^{c_2}}{\eta^{c_1} \cdot \rate^{c_1 + c_2} \cdot |\field|}. + \] + \end{enumerate} +\end{theorem} + +\subsubsection{Mutual correlated agreement preserves list decoding} + +\begin{lemma}\label{lemma: mutual_corrAgr_listdecoding} +\lean{CorrelatedAgreement.mutualCorrAgreement_list_decoding} +\uses{def:reed_solomon_code,def:interleaved_code,def:list_close_codewords,lemma:gen_mutual_corr_agreement} + Let $\code \subseteq \field^{\evaldomain}$ be a linear code with minimum distance $\delta_{\code}$, and let $\gen$ be a proximity generator for $\code$ with mutual correlated agreement with proximity bound ${\bound}^\star$ and error $\err^\star$. Then, for every $f_0, \ldots, f_{\parl-1} : \evaldomain \to \field$ and $\delta \in (0, \min\{\delta_{\code}, 1 - {\bound}^\star(\code, \parl)\})$: + \[ + \Pr_{\substack{\alpha \leftarrow \{0,1\}^{w^\star} \\ \boldsymbol{r} := \gen(\parl; \alpha)}} \left[ + \Lambda\left(\code, \sum_{j \in [0,(\parl-1)]} r_j \cdot f_j, \delta \right) \neq + \left\{ \sum_{j \in [0,(\parl-1)]} r_j \cdot u_j : \boldsymbol{u} \in \Lambda\left(\mathcal{C}^\ell, (f_0, \ldots, f_{\parl-1}), \delta \right) \right\} + \right] \leq \err^\star(\code, \parl, \delta). + \] +\end{lemma} + +\subsubsection{Folding univariate functions} + +\begin{definition}\label{def:extract} +\lean{Fold.extract_x} + Let $\mathsf{extract}:\evaldomain^{2^{k+1}}\rightarrow \evaldomain^{2^k}$ be a function. There exists $x \in \evaldomain$, such that $y = x^{2^{k+1}}\in\evaldomain^{2^{k+1}}$. Then $\mathsf{extract}$ returns $z = \sqrt{y} = x^{2^k}\in\evaldomain^{2^k}$ such that $y = z^2$. +\end{definition} + +\begin{definition}\label{def:foldf} +\lean{Fold.foldf} +\uses{def:extract} + Let $f : \evaldomain^{2^k} \to \field$ be a function, and $\alpha \in \field$. We define $\mathrm{Fold_f}(f, {\alpha}) : \evaldomain^{(2^{k+1})} \to \field$ as follows: + \[ + \forall x \in \evaldomain^{2^k}, y \in \evaldomain^{2^{k+1}}, \quad \mathrm{Fold_f}(f, \alpha)(y) := \frac{f(x) + f(-x)}{2} + \alpha \cdot \frac{f(x) - f(-x)}{2 \cdot x}. + \] + + In order to compute $\mathrm{Fold_f}(f, \alpha)(y)$ it suffices to query $f$ at $x$ and $-x$, by retrieving $x=\mathsf{extract}(y)$. +\end{definition} + +\begin{definition}\label{def:fold_k} +\lean{Fold.fold_k_core,Fold.fold_k} +\uses{def:foldf} + For $k \leq m$ and $\vec{\alpha} = (\alpha_0, \ldots, \alpha_{k-1}) \in \field^k$ we define $\mathrm{Fold}(f, \vec{\alpha}) : \evaldomain^{2^k} \to \field$ to equal $\mathrm{Fold}(f, \vec{\alpha}) := f_k$ where $f_k$ is defined recursively as follows: $f_0 := f$, and $f_i := \mathrm{Fold_f}(f_{i-1}, \alpha_i)$. +\end{definition} + +\begin{definition}\label{def:fold_k_set} +\lean{Fold.fold_k_set} +\uses{def:fold_k} + For a set $S \subseteq \field^{\evaldomain}$ we denote $\mathrm{Fold_{S}}(S, \vec{\alpha}) := \{\mathrm{Fold_{S}}(f, \vec{\alpha}) \mid f \in S\}$. +\end{definition} + +\begin{lemma}\label{lemma:fold_fg} +\lean{Fold.fold_f_g} +\uses{def:fold_k,def:smooth_rs_code} + Let $f : \evaldomain \to \field$ be a function, $\vec{\alpha} \in \field^k$ folding randomness and let $g := \mathrm{Fold}(f, \vec{\alpha})$. If $f \in \rscode[\field, \evaldomain, m]$ and $k \leq m$, then $g \in \rscode[\field, \evaldomain^{2^k}, m - k]$, and further the multilinear extension of $g$ is given by $\hat{g}(X_k, \ldots, X_{m-1}) := \hat{f}(\vec{\alpha}, X_k, \ldots, X_{m-1})$ where $\hat{f}$ is the multilinear extension of $f$. +\end{lemma} + +\subsubsection{Block relative distance} + +\begin{definition}\label{def:block} + \lean{BlockRelDistance.block} + Let $\evaldomain \subseteq \field$ be a smooth evaluation domain and $k \in \mathbb{N}$ be a folding parameter. For $z \in \evaldomain^{2^k}$, define $\mathrm{Block}(\evaldomain, i, k, z) := \{ x \in \evaldomain, y \in \evaldomain^{2^i} : y^{2^{k-i}} = z \}$. +\end{definition} + +\begin{definition}\label{def:block_rel_distance} +\lean{BlockRelDistance.blockRelDistance} +\uses{def:block} + Let $\code := \rscode[\field, \evaldomain, m]$ be a smooth Reed Solomon code and let $f, g : \evaldomain^{2^i} \to \field$. We define the $(i,k)$-wise block relative distance as + \[ + \Delta_{r}(\code, i, k, f, g) = \frac{ \left| \left\{ z \in \evaldomain^{2^k} : \exists y \in \mathrm{Block}(\evaldomain, i, k, z), f(y) \neq g(y) \right\} \right| }{ |\evaldomain^{2^k}| } + \] +\end{definition} + +\begin{definition}\label{def:min_block_rel_distance} +\lean{BlockRelDistance.minBlockRelDistance} +\uses{def:block_rel_distance} + For $S \subseteq \field^{\evaldomain}$, we let $\Delta_{r}(\code, i, k, f, S) := \min_{g \in S} \Delta_{r}(\code, i, k, f, g)$. +\end{definition} + +{Note that $\Delta_{r}(\code, 0, 0, f, g) = \Delta(f, g)$ for any $\code$. We define the block list decoding of a codeword.} + +\begin{definition}\label{def:list_close_codewords_block} +\lean{BlockRelDistance.listBlockRelDistance} +\uses{def:block_rel_distance} + For a smooth Reed Solomon code $\rscode := \rscode[\field, \evaldomain, m]$, proximity parameter $\delta \in [0,1]$, and $f : \evaldomain^{2^i} \to \field$, we let + \[ + \Lambda_{r}(\code, i, k, f, \delta) := \{ u \in \code \mid \Delta_{r}(\code, i, k, f, u) \leq \delta \}, + \] + denote the list of codewords in $\code$ within relative block distance at most $\delta$ from $f$. +\end{definition} + +\begin{lemma}\label{lemma:block_rel_distance} +\lean{BlockRelDistance.blockRelDistance_le_hammingDistance} +\uses{def:block_rel_distance,def:distance_from_code,def:list_close_codewords_block,def:list_close_codewords} + For any $\code := \rscode[\field, \evaldomain, m]$, $k \in \mathbb{N}$, and $f, g : \evaldomain^{2^i} \to \field$, we have that $\Delta(f, g) \leq \Delta_{r}(\code, i, k, f, g)$. Consequently, $\Lambda_{r}(\code, i, k, f, \delta) \subseteq \Lambda(\code, f, \delta)$ for $\delta\in[0,1]$. +\end{lemma} + +\subsubsection{Folding preserves list decoding} + +\begin{theorem}\label{thm:folding_preserves_listdecoding} +\lean{Fold.folding_listdecoding_if_genMutualCorrAgreement} +\uses{lemma:folding_preserves_listdecoding_base, lemma:folding_preserves_listdecoding_bound, lemma:folding_preserves_listdecoding_base_ne_subset} + Let $\code = \rscode[\field, \evaldomain, m]$ be a smooth Reed Solomon code and $k \leq m$. For $0 \leq i \leq k$ let $\code^{(i)} := \rscode[\field, \evaldomain^{2^i}, m - i]$. Let $\gen(\parl; \alpha) = (1, \alpha, \ldots, \alpha^{\parl - 1})$ be a proximity generator with mutual correlated agreement for the codes $\code^{(0)}, \ldots, \mathcal{C}^{(k-1)}$ with proximity bound ${\bound}^\star$ and error $\err^\star$. Then for every $f : \evaldomain \to \field$ and $\delta \in \left(0, 1 - \max_{i \in [0,(k-1)]} \{ {\bound}^\star(\code^{(i)}, 2) \} \right)$, + \[ + \Pr_{\alpha \leftarrow \field^k} \left[ + \mathrm{Fold_S}(\Lambda_{r}(\code, 0, k, f, \delta), \alpha) + \neq \Lambda(\code^{(k)}, \mathrm{Fold}(f, \alpha), \delta) + \right] < \err^{(k)}(\code, \delta). + \] +\end{theorem} + +\begin{lemma}\label{lemma:folding_preserves_listdecoding_base} +\lean{Fold.folding_preserves_listdecoding_base} +\uses{def:list_close_codewords_block,def:fold_k,def:fold_k_set} + Let $\code := \rscode[\field, \evaldomain, m]$ be a Reed Solomon code, and $k \leq m$ be a parameter. Denote $\code' := \rscode[\field, \evaldomain^{2}, m - 1]$. Then for every $f : \evaldomain \to \field$ and $\delta \in (0, 1 - {\bound}^\star(\code', 2))$, + \[ + \Pr_{\alpha \leftarrow \field} \left[ + \mathrm{Fold_S}(\Lambda_{r}(\code, 0, k, f, \delta), \alpha) + \neq \Lambda_{r}(\code', 1, k, \mathrm{Fold}(f, \alpha), \delta) + \right] < \err^\star(\code', 2, \delta). + \] +\end{lemma} + +\begin{lemma}\label{lemma:folding_preserves_listdecoding_bound} +\lean{Fold.folding_preserves_listdecoding_bound} +\uses{def:list_close_codewords_block,def:fold_k} + For every $\alpha \in \field$, $\mathrm{Fold_S}(\Lambda_{r}(\code, 0, k, f, \delta), \alpha) \subseteq \Lambda_{r}(\code', 1, k, \mathrm{Fold}(f, \alpha), \delta)$. +\end{lemma} + +\begin{lemma}\label{lemma:folding_preserves_listdecoding_base_ne_subset} +\lean{Fold.folding_preserves_listdecoding_base_ne_subset} +\uses{def:list_close_codewords_block,def:fold_k,def:fold_k_set} + \[ + \Pr_{\alpha \leftarrow \field} \left[ + \Lambda_{r}(\code', 1, k, \mathrm{Fold}(f, \alpha), \delta) + \not\subseteq \mathrm{Fold_S}(\Lambda_{r}(\code, 0, k, f, \delta), \alpha) + \right] < \err^\star(\code', 2, \delta). + \] +\end{lemma} + +\begin{lemma}\label{lemma:crs_equiv_rs_randpompt_agreement} +\lean{OutOfDomSmpl.crs_equiv_rs_randpompt_agreement} +\uses{def:smooth_rs_code,def:constrained_code,def:list_close_codewords} + Let $f : \evaldomain \rightarrow \field$ be a function, $m \in \mathbb{N}$ be a number of variables, $s \in \mathbb{N}$ be a repetition parameter, and let $\delta \in [0,1]$ be a distance parameter. For every $\vec{r_0}, \dots, \vec{r_{s-1}} \in \field^m$, the following are equivalent statements. +\begin{itemize} + \item There exist distinct $u, u' \in \Lambda(\rscode[\field, \evaldomain, m], f, \delta)$ such that, for every $i \in [0,s-1]$, $\hat{u}(\vec{r_i}) = \hat{u}'(\vec{r_i})$. + \item There exists $\sigma_0, \dots, \sigma_{s-1} \in \field$ such that $$\left| \Lambda(\crscode[\field, \evaldomain, m, ((Z \cdot \mathrm{eq}(\vec{r_0}, \cdot), \sigma_0), \dots, (Z \cdot \mathrm{eq}(\vec{r_{s-1}}, \cdot), \sigma_{s-1}))], f, \delta) \right| > 1.$$ +\end{itemize} +\end{lemma} + +\begin{lemma}\label{lemma:out_of_domain_sampling_crs_eq_rs} +\lean{OutOfDomSmpl.out_of_domain_sampling_crs_eq_rs} +\uses{def:smooth_rs_code,def:constrained_code,def:list_close_codewords,def:list_decodable} + Let $f : \evaldomain \rightarrow \field$ be a function, $m \in \mathbb{N}$ be a number of variables, $s \in \mathbb{N}$ be a repetition parameter, and $\delta \in [0,1]$ be a distance parameter. If $\rscode[\field, \evaldomain, m]$ is $(\delta, \ell)$-list decodable then + \[ + \Pr_{r_0, \dots, r_{s-1} \leftarrow \field} \left[ + \begin{array}{c} + \exists \sigma_0, \dots, \sigma_{s-1} \in \field \text{ s.t.} \\ + \left| \Lambda(\crscode[\field, \evaldomain, m, ((Z \cdot \mathrm{eq}(\mathrm{pow}(r_{i}, m), \cdot), \sigma_{i}))_{i \in [s]}], f, \delta) \right| > 1 + \end{array} + \right] + \] + + \[ + = \Pr_{r_0, \dots, r_{s-1} \leftarrow \field} \left[ + \begin{array}{c} + \exists \text{ distinct } u, u' \in \Lambda(\rscode[\field, \evaldomain, m], f, \delta) \\ + \text{ s.t. } \forall i \in [s],\ \hat{u}(\mathrm{pow}(r_i, m)) = \hat{u}'(\mathrm{pow}(r_i, m)) + \end{array} + \right] + \] + + \[ + \leq \frac{\ell^2}{2} \cdot \left( \frac{2^m}{|\field|} \right)^s. + \] +\end{lemma} + +\begin{theorem}\label{thm: whir_rbr_soundness} +\lean{WhirIOP.whir_rbr_soundness} +\uses{thm:folding_preserves_listdecoding,lemma:out_of_domain_sampling_crs_eq_rs,lemma:gen_mutual_corr_agreement,def:constrained_code} + Consider $(\field, M, (k_i, m_i, \evaldomain_i, t_i)_{0 \leq i \leq M}, \widehat{w}_0, \sigma_0, m, d^\star, d)$ with the following ingrediants and conditions, + \begin{itemize} + \item a constrained Reed Solomon code $\crscode[\field, \evaldomain_0, m_0, \widehat{w}_0, \sigma_0]$; + \item an iteration count $M \in \mathbb{N}$; + \item folding parameters $k_0, \ldots, k_{M}$ such that $\sum_{i=0}^{M} k_i \leq m$; + \item evaluation domains $\evaldomain_0, \ldots, \evaldomain_{M} \subseteq \field$ where $\evaldomain_i$ is a smooth coset of $\field^*$ with order $|\evaldomain_i| \geq 2^{m_i}$; + \item repetition parameters $t_0, \ldots, t_M$ with $t_i \leq |\evaldomain_i|$; + \item define $m_0 := m$ and $m_i := m - \sum_{j < i} k_j$; + \item define $d^\star := 1 + \deg_{\mathbb{Z}}(\widehat{w}_0) + \max_{i \in [m_0]} \deg_{X_i}(\widehat{w}_0)$ and $d := \max\{d^\star, 3\}$. + \end{itemize} + For every $f \notin \crscode[\field, \evaldomain_0, m_0, \widehat{w}_0, \sigma_0]$ and every $\delta_0, \dots, \delta_{M}$ and $(\parl_{i,s})_{0 \leq i \leq M}^{0 \leq s \leq k_i}$ where + \begin{itemize} + \item $\delta_0 \in (0, \Delta(f, \crscode[\field, \evaldomain_0, m_0, \widehat{w}_0, \sigma_0]))$; + \item the function $\gen(\parl; \alpha) = (1, \alpha, \dots, \alpha^{\parl-1})$ is a proximity generator with mutual correlated agreement for the codes $(\mathcal{C}_{\mathrm{RS}}^{(i,s)})_{0 \leq i \leq M}^{0 \leq s \leq k_i}$ where $\mathcal{C}_{\mathrm{RS}}^{(i,s)} := \rscode[\field, \evaldomain_i^{(2^s)}, m_i - s]$ with bound ${\bound}^\star$ and error $\err^\star$; + \item for every $0 \leq i \le M$, $\delta_i \in (0, 1 - {\bound}^\star(\mathcal{C}_{\mathrm{RS}}^{(i,s)}, 2))$; + \item for every $0 \leq i \le M$, $\mathcal{C}_{\mathrm{RS}}^{(i,s)}$ is $(\ell_{i,s}, \delta_i)$-list decodable. + \end{itemize} + Then there exists an IOPP for $\crscode[\field, \evaldomain_0, m_0, \widehat{w}_0, \sigma_0]$ with above parameters, with round-by-round soundness error + + \[ +((\varepsilon_{0,s}^{\mathrm{fold}})_{s \leq k_0},\ (\varepsilon_i^{\mathrm{out}},\ \varepsilon_i^{\mathrm{shift}})_{i \leq M},\ (\varepsilon_{i,s}^{\mathrm{fold}})_{i \in [M], s \leq k_i},\ \varepsilon^{\mathrm{fin}}), +\] + +{where:} + +\begin{itemize} + \item $\varepsilon_{0,s}^{\mathrm{fold}} \leq \dfrac{d^* \cdot \ell_{0,s-1}}{|\mathbb{F}|} + \mathrm{err}^*(\mathcal{C}_{\mathrm{RS}}^{(0,s)}, 2, \delta_0)$; + \item $\varepsilon_i^{\mathrm{out}} \leq \dfrac{2^{m_i} \cdot \ell_{i,0}^2}{2 \cdot |\mathbb{F}|}$; + \item $\varepsilon_i^{\mathrm{shift}} \leq (1 - \delta_{i-1})^{t_i - 1} + \dfrac{\ell_{i,0} \cdot (t_i - 1 + 1)}{|\mathbb{F}|}$; + \item $\varepsilon_{i,s}^{\mathrm{fold}} \leq \dfrac{d \cdot \ell_{i,s-1}}{|\mathbb{F}|} + \mathrm{err}^*(\mathcal{C}_{\mathrm{RS}}^{(i,s)}, 2, \delta_i)$; + \item $\varepsilon^{\mathrm{fin}} \leq (1 - \delta_{M-1})^{t_M - 1}$. +\end{itemize} +\end{theorem} + + diff --git a/blueprint/src/references.bib b/blueprint/src/references.bib index 5f4eae2fd..14a57a429 100644 --- a/blueprint/src/references.bib +++ b/blueprint/src/references.bib @@ -1,4 +1,4 @@ -@article{sumcheck-protocol, +@article{sumcheck, author = {Lund, Carsten and Fortnow, Lance and Karloff, Howard and Nisan, Noam}, title = {Algebraic methods for interactive proof systems}, year = {1992}, @@ -18,7 +18,7 @@ @article{sumcheck-protocol keywords = {interactive proof systems} } -@inproceedings{sumcheck-formal-verification, +@inproceedings{sumcheck-fv, title = {Formal Verification of the Sumcheck Protocol}, author = {Bosshard, Azucena Garvia and Bootle, Jonathan and Sprenger, Christoph}, booktitle = {2024 IEEE 37th Computer Security Foundations Symposium (CSF)}, @@ -27,7 +27,7 @@ @inproceedings{sumcheck-formal-verification organization = {IEEE Computer Society} } -@inproceedings{interactive-oracle-proofs, +@inproceedings{IOPs, title = {Interactive oracle proofs}, author = {Ben-Sasson, Eli and Chiesa, Alessandro and Spooner, Nicholas}, booktitle = {Theory of Cryptography: 14th International Conference, TCC 2016-B, Beijing, China, October 31-November 3, 2016, Proceedings, Part II 14}, @@ -50,4 +50,104 @@ @book{ChiesaYogev2024 title = {Building Cryptographic Proofs from Hash Functions}, url = {https://github.com/hash-based-snargs-book}, year = {2024} -} \ No newline at end of file +} + +@inproceedings{ARoK, + title={Algebraic reductions of knowledge}, + author={Kothapalli, Abhiram and Parno, Bryan}, + booktitle={Annual International Cryptology Conference}, + pages={669--701}, + year={2023}, + organization={Springer} +} + +@article{WARP, + title={Linear-Time Accumulation Schemes}, + author={B{\"u}nz, Benedikt and Chiesa, Alessandro and Fenzi, Giacomo and Wang, William}, + journal={Cryptology ePrint Archive}, + year={2025} +} + +@article{Arc, + title={Arc: Accumulation for Reed--Solomon Codes}, + author={B{\"u}nz, Benedikt and Mishra, Pratyush and Nguyen, Wilson and Wang, William}, + journal={Cryptology ePrint Archive}, + year={2024} +} + +@article{fics-facs, + title={FICS and FACS: Fast IOPPs and Accumulation via Code-Switching}, + author={Baweja, Anubhav and Mishra, Pratyush and Mopuri, Tushar and Shtepel, Matan}, + journal={Cryptology ePrint Archive}, + year={2025} +} + +@inproceedings{WHIR, + title={WHIR: Reed--Solomon proximity testing with super-fast verification}, + author={Arnon, Gal and Chiesa, Alessandro and Fenzi, Giacomo and Yogev, Eylon}, + booktitle={Annual International Conference on the Theory and Applications of Cryptographic Techniques}, + pages={214--243}, + year={2025}, + organization={Springer} +} + +@inproceedings{Marlin, + title={Marlin: Preprocessing zkSNARKs with universal and updatable SRS}, + author={Chiesa, Alessandro and Hu, Yuncong and Maller, Mary and Mishra, Pratyush and Vesely, Noah and Ward, Nicholas}, + booktitle={Advances in Cryptology--EUROCRYPT 2020: 39th Annual International Conference on the Theory and Applications of Cryptographic Techniques, Zagreb, Croatia, May 10--14, 2020, Proceedings, Part I 39}, + pages={738--768}, + year={2020}, + organization={Springer} +} + +@inproceedings{DARK, + title={Transparent SNARKs from DARK compilers}, + author={B{\"u}nz, Benedikt and Fisch, Ben and Szepieniec, Alan}, + booktitle={Advances in Cryptology--EUROCRYPT 2020: 39th Annual International Conference on the Theory and Applications of Cryptographic Techniques, Zagreb, Croatia, May 10--14, 2020, Proceedings, Part I 39}, + pages={677--706}, + year={2020}, + organization={Springer} +} + +@article{Wie88, + author = {Wiedemann, Doug}, + title = {An Iterated Quadratic Extension of GF(2)}, + journal = {The Fibonacci Quarterly}, + volume = {26}, + number = {4}, + pages = {290--295}, + year = {1988}, + publisher = {Taylor \& Francis}, + doi = {10.1080/00150517.1988.12429608}, + URL = {https://doi.org/10.1080/00150517.1988.12429608}, + eprint = {https://doi.org/10.1080/00150517.1988.12429608} +} + +@inproceedings{FP97, + author={Fan, J.L. and Paar, C.}, + booktitle={Proceedings of IEEE International Symposium on Information Theory}, + title={On efficient inversion in tower fields of characteristic two}, + year={1997}, + volume={}, + number={}, + pages={20-}, + keywords={Poles and towers;Polynomials;Vectors;Galois fields;Hardware;Equations}, + doi={10.1109/ISIT.1997.612935} +} + +@inproceedings{DP25, + author = {Diamond, Benjamin E. and Posen, Jim}, + title = {Succinct Arguments over Towers of Binary Fields}, + year = {2025}, + isbn = {978-3-031-91133-0}, + publisher = {Springer-Verlag}, + address = {Berlin, Heidelberg}, + url = {https://doi.org/10.1007/978-3-031-91134-7_4}, + doi = {10.1007/978-3-031-91134-7_4}, + abstract = {We introduce an efficient SNARK for towers of binary fields. Adapting Brakedown (CRYPTO ’23), we construct a multilinear polynomial commitment scheme suitable for polynomials over tiny fields, including that with just two elements. Our commitment scheme, unlike those of previous works, treats small-field polynomials with no embedding overhead. We further introduce binary-field adaptations of HyperPlonk (EUROCRYPT ’23)’s product and permutation checks and of Lasso (EUROCRYPT ’24)’s lookup. Our binary PLONKish variant captures standard hash functions—like Keccak-256 and Gr\o{}stl—extremely efficiently. With recourse to thorough performance benchmarks, we argue that our scheme can efficiently generate precisely those Keccak-256-proofs which critically underlie modern efforts to scale Ethereum.}, + booktitle = {Advances in Cryptology – EUROCRYPT 2025: 44th Annual International Conference on the Theory and Applications of Cryptographic Techniques, Madrid, Spain, May 4–8, 2025, Proceedings, Part IV}, + pages = {93–122}, + numpages = {30}, + keywords = {succinct arguments, binary fields, error-correcting codes}, + location = {Madrid, Spain} +} diff --git a/blueprint/src/web.pdf b/blueprint/src/web.pdf deleted file mode 100644 index 824e6609c..000000000 Binary files a/blueprint/src/web.pdf and /dev/null differ diff --git a/blueprint/web/chap-commitment_schemes.html b/blueprint/web/chap-commitment_schemes.html deleted file mode 100644 index bab64fb63..000000000 --- a/blueprint/web/chap-commitment_schemes.html +++ /dev/null @@ -1,191 +0,0 @@ - - - - - - - - -Commitment Schemes - - - - - - - - - - - - -
- -

Formally Verified Arguments of Knowledge in Lean

-
- - - - - - - - - - - - \ No newline at end of file diff --git a/blueprint/web/chap-oracle_reductions.html b/blueprint/web/chap-oracle_reductions.html deleted file mode 100644 index 9b43a34e1..000000000 --- a/blueprint/web/chap-oracle_reductions.html +++ /dev/null @@ -1,1089 +0,0 @@ - - - - - - - - -Oracle Reductions - - - - - - - - - - - - -
- -

Formally Verified Arguments of Knowledge in Lean

-
- -
- - -
-
- - -
-

2 Oracle Reductions

- -

2.1 Definitions

-

In this section, we give the basic definitions of a public-coin interactive oracle reduction (henceforth called an oracle reduction or IOR). In particular, we will define its building blocks, and various security properties.

-

2.1.1 Format

-

An (interactive) oracle reduction is an interactive protocol between two parties, a prover \(\mathcal{P}\) and a verifier \(\mathcal{V}\). It takes place in the following setting:

-
    -
  1. We work in an ambient dependent type theory (in our case, Lean).

    -
  2. -
  3. The protocol flow is fixed and defined by a given type signature, which describes in each round which party sends a message to the other, and the type of that message.

    -
  4. -
  5. The prover and verifier has access to some inputs (called the context) at the beginning of the protocol. These inputs are classified as follows:

    -
      -
    • Public inputs: available to both parties;

      -
    • -
    • Private inputs (or witness): available only to the prover;

      -
    • -
    • Oracle inputs: the underlying data is available to the prover, but it’s only exposed as an oracle to the verifier. An oracle interface for such inputs consists of a query type, a response type, and a function that takes in the underlying input, a query, and outputs a response.

      -
    • -
    • Shared oracle: the oracle is available to both parties via an interface; in most cases, it is either empty, a probabilistic sampling oracle, or a random oracle. See Section 5.3 for more information on oracle computations.

      -
    • -
    -
  6. -
  7. The messages sent from the prover may either: 1) be seen directly by the verifier, or 2) only available to a verifier through an oracle interface (which specifies the type for the query and response, and the oracle’s behavior given the underlying message).

    -
  8. -
  9. All messages from \(\mathcal{V}\) are chosen uniformly at random from the finite type corresponding to that round. This property is called public-coin in the literature.

    -
  10. -
-

We now go into more details on these objects.

-
-
- - Definition - - 1 - Type Signature of an Oracle Reduction -
- -
-
- - # - - - - - - - -
-
-
-

A protocol specification \(\rho : \mathsf{PSpec}\; n\) of an oracle reduction is parametrized by a fixed number of messages sent in total. For each step of interaction, it specifies a direction for the message (prover to verifier, or vice versa), and a type for the message. If a message from the prover to the verifier is further marked as oracle, then we also expect an oracle interface for that message. In Lean, we handle these oracle interfaces via the \(\mathsf{OracleInterface}\) type class.

- -
-
-
-
- - Definition - - 2 - Context -
- -
-
- - # - - - -
-
-
-

In an oracle reduction, its context (denoted \(\Gamma \)) consists of a list of public inputs, a list of witness inputs, a list of oracle inputs, and a shared oracle (possibly represented as a list of lazily sampled query-response pairs). These inputs have the expected visibility.

-

For simplicity, we imagine the context as append-only, as we add new messages from the protocol execution.

- -
-
-

We define some supporting definitions for a protocol specification.

- -
-
- - Remark - - 4 - Design Decision -
- -
-
- - # - - - -
-
-
-

We do not enforce a particular interaction flow in the definition of an interactive (oracle) reduction. This is done so that we can capture all protocols in the most generality. Also, we want to allow the prover to send multiple messages in a row, since each message may have a different oracle representation (for instance, in the Plonk protocol, the prover’s first message is a 3-tuple of polynomial commitments.)

- -
-
-
-
- - Definition - - 5 - Type Signature of a Prover -
- -
-
- - # - - - - - - - -
-
-
-

A prover \(\mathcal{P}\) in an oracle reduction, given a context, is a stateful oracle computation that at each step of the protocol, either takes in a new message from the verifier, or sends a new message to the verifier.

- -
-
-

Our modeling of oracle reductions only consider public-coin verifiers; that is, verifiers who only outputs uniformly random challenges drawn from the (finite) types, and uses no other randomness. Because of this fixed functionality, we can bake the verifier’s behavior in the interaction phase directly into the protocol execution semantics.

-

After the interaction phase, the verifier may then run some verification procedure to check the validity of the prover’s responses. In this procedure, the verifier gets access to the public part of the context, and oracle access to either the shared oracle, or the oracle inputs.

-
-
- - Definition - - 6 - Type Signature of a Verifier -
- -
-
- - # - - - - - - - -
-
-
-

A verifier \(\mathcal{V}\) in an oracle reduction is an oracle computation that may perform a series of checks (i.e. ‘Bool‘-valued, or ‘Option Unit‘) on the given context.

- -
-
-

An oracle reduction then consists of a type signature for the interaction, and a pair of prover and verifier for that type signature.

-

l

-
-
- - Definition - - 7 - Interactive Oracle Reduction -
- -
-
- - # - - - - - - - - - - - -
-
-
-

An interactive oracle reduction for a given context \(\Gamma \) is a combination a prover and a verifier of the types specified above.

- -
-
-

PL Formalization. We write our definitions in PL notation in Figure 2.1. The set of types \(\mathsf{Type}\) is the same as Lean’s dependent type theory (omitting universe levels); in particular, we care about basic dependent types (Pi and Sigma), finite natural numbers, finite fields, lists, vectors, and polynomials.

-
-
- \[ \begin{array}{rcl} \mathsf{Type} & ::=& \mathsf{Unit} \mid \mathsf{Bool} \mid \mathbb {N} \mid \mathsf{Fin}\; n \mid \mathbb {F}_q \mid \mathsf{List}\; (\alpha : \mathsf{Type}) \mid (i : \iota ) \to \alpha \; i \mid (i : \iota ) \times \alpha \; i \mid \dots \\[1em] \mathsf{Dir} & ::=& \mathsf{P2V.Pub} \mid \mathsf{P2V.Orac} \mid \mathsf{V2P} \\ \mathsf{OI}\; (\mathrm{M} : \mathsf{Type}) & ::=& \langle \mathrm{Q}, \mathrm{R}, \mathrm{M} \to \mathrm{Q} \to \mathrm{R} \rangle \\ \mathsf{PSpec}\; (n : \mathbb {N}) & ::=& \mathsf{Fin}\; n \to (d : \mathsf{Dir}) \times (M : \mathsf{Type}) \times (\mathsf{if}\; d = \mathsf{P2V.Orac} \; \mathsf{then} \; \mathsf{OI}(M) \; \mathsf{else} \; \mathsf{Unit}) \\ \mathsf{OSpec} \; (\iota : \mathsf{Type}) & ::=& (i : \iota ) \to \mathsf{dom}\; i \times \mathsf{range}\; i \\[1em] \varSigma & ::=& \emptyset \mid \varSigma \times \mathsf{Type}\\ \Omega & ::=& \emptyset \mid \Omega \times \langle \mathrm{M} : \mathsf{Type}, \mathsf{OI}(\mathrm{M}) \rangle \\ \Psi & ::=& \emptyset \mid \Psi \times \mathsf{Type}\\ \end{array} \] -
-
- \[ \begin{array}{rcl} \Gamma & ::=& (\Psi ; \Omega ; \varSigma ; \rho ; \mathcal{O})\\ \mathsf{OComp}^{\mathcal{O}}\; (\alpha : \mathsf{Type}) & ::=& \mid \; \mathsf{pure}\; (a : \alpha ) \\ & & \mid \; \mathsf{queryBind}\; (i : \iota )\; (q : \mathsf{dom}\; i)\; (k : \mathsf{range}\; i \to \mathsf{OComp}^{\mathcal{O}}\; \alpha ) \\ & & \mid \; \mathsf{fail} \\[1em] \tau _{\mathsf{P}}(\Gamma ) & ::=& (i : \mathsf{Fin}\; n) \to (h : (\rho \; i).\mathsf{fst} = \mathsf{P2V}) \to \\ & & \varSigma \to \Omega \to \Psi \to \rho _{[:i]} \to \mathsf{OComp}^{\mathcal{O}}\; \left( (\rho \; i).\mathsf{snd}\right) \\[1em]\tau _{\mathsf{V}}(\Gamma ) & ::=& \varSigma \to (\rho .\mathsf{Chals}) \to \mathsf{OComp}^{\mathcal{O} :: \mathsf{OI}(\Omega ) :: \mathsf{OI}(\rho .\mathsf{Msg.Orac})}\; \mathsf{Unit} \\[1em] \tau _{\mathsf{E}}(\Gamma ) & ::=& \varSigma \to \Omega \to \rho .\mathsf{Transcript} \to \mathcal{O}.\mathsf{QueryLog} \to \Psi \end{array} \] -
-
- Figure - 2.1 - Type definitions for interactive oracle reductions -
- - -
-

Using programming language notation, we can express an interactive oracle reduction as a typing judgment:

-
- \[ \Gamma := (\Psi ; \Theta ; \varSigma ; \rho ; \mathcal{O}) \vdash \mathcal{P} : \tau _{\mathsf{P}}(\Gamma ), \; \mathcal{V} : \tau _{\mathsf{V}}(\Gamma ) \] -
-

where:

-
    -
  • \(\Psi \) represents the witness (private) inputs

    -
  • -
  • \(\Theta \) represents the oracle inputs

    -
  • -
  • \(\varSigma \) represents the public inputs (i.e. statements)

    -
  • -
  • \(\mathcal{O} : \mathsf{OSpec}\; \iota \) represents the shared oracle

    -
  • -
  • \(\rho : \mathsf{PSpec}\; n\) represents the protocol type signature

    -
  • -
  • \(\mathcal{P}\) and \(\mathcal{V}\) are the prover and verifier, respectively, being of the given types \(\tau _{\mathsf{P}}(\Gamma )\) and \(\tau _{\mathsf{V}}(\Gamma )\).

    -
  • -
-

To exhibit valid elements for the prover and verifier types, we will use existing functions in the ambient programming language (e.g. Lean).

-

We now define what it means to execute an oracle reduction. This is essentially achieved by first executing the prover, interspersed with oracle queries to get the verifier’s challenges (these will be given uniform random probability semantics later on), and then executing the verifier’s checks. Any message exchanged in the protocol will be added to the context. We may also log information about the execution, such as the log of oracle queries for the shared oracles, for analysis purposes (i.e. feeding information into the extractor).

-
-
- - Definition - - 8 - Execution of an Oracle Reduction -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Remark - - 9 - More efficient representation of oracle reductions -
- -
-
- - # - - - -
-
-
-

The presentation of oracle reductions as protocols on an append-only context is useful for reasoning, but it does not lead to the most efficient implementation for the prover and verifier. In particular, the prover cannot keep intermediate state, and thus needs to recompute everything from scratch for each new message.

-

To fix this mismatch, we will also define a stateful variant of the prover, and define a notion of observational equivalence between the stateless and stateful reductions.

- -
-
-

2.1.2 Security properties

-

We can now define properties of interactive reductions. The two main properties we consider in this project are completeness and various notions of soundness. We will cover zero-knowledge at a later stage.

-

First, for completeness, this is essentially probabilistic Hoare-style conditions on the execution of the oracle reduction (with the honest prover and verifier). In other words, given a predicate on the initial context, and a predicate on the final context, we require that if the initial predicate holds, then the final predicate holds with high probability (except for some completeness error).

-
-
- - Definition - - 10 - Completeness -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-

Almost all oracle reductions we consider actually satisfy perfect completeness, which simplifies the proof obligation. In particular, this means we only need to show that no matter what challenges are chosen, the verifier will always accept given messages from the honest prover.

-

For soundness, we need to consider different notions. These notions differ in two main aspects:

-
    -
  • Whether we consider the plain soundness, or knowledge soundness. The latter relies on the notion of an extractor.

    -
  • -
  • Whether we consider plain, state-restoration, round-by-round, or rewinding notion of soundness.

    -
  • -
-

We note that state-restoration knowledge soundness is necessary for the security of the SNARK protocol obtained from the oracle reduction after composing with a commitment scheme and applying the Fiat-Shamir transform. It in turn is implied by either round-by-round knowledge soundness, or special soundness (via rewinding). At the moment, we only care about non-rewinding soundness, so mostly we will care about round-by-round knowledge soundness.

-
-
- - Definition - - 11 - Soundness -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-

A (straightline) extractor for knowledge soundness is a deterministic algorithm that takes in the output public context after executing the oracle reduction, the side information (i.e. log of oracle queries from the malicious prover) observed during execution, and outputs the witness for the input context.

-

Note that since we assume the context is append-only, and we append only the public (or oracle) messages obtained during protocol execution, it follows that the witness stays the same throughout the execution.

-
-
- - Definition - - 12 - Knowledge Soundness -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-

To define round-by-round (knowledge) soundness, we need to define the notion of a state function. This is a (possibly inefficient) function \(\mathsf{StateF}\) that, for every challenge sent by the verifier, takes in the transcript of the protocol so far and outputs whether the state is doomed or not. Roughly speaking, the requirement of round-by-round soundness is that, for any (possibly malicious) prover \(P\), if the state function outputs that the state is doomed on some partial transcript of the protocol, then the verifier will reject with high probability.

-
-
- - Definition - - 13 - State Function -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 14 - Round-by-Round Soundness -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 15 - Round-by-Round Knowledge Soundness -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-

By default, the properties we consider are perfect completeness and (straightline) round-by-round knowledge soundness. We can encapsulate these properties into the following typing judgement:

-
- \[ \Gamma := (\Psi ; \Theta ; \varSigma ; \rho ; \mathcal{O}) \vdash \{ \mathcal{R}_1\} \quad \langle \mathcal{P}, \mathcal{V}, \mathcal{E}\rangle \quad \{ \! \! \{ \mathcal{R}_2; \mathsf{St}; \epsilon \} \! \! \} \] -
-

2.2 A Program Logic for Oracle Reductions

-

In this section, we describe a program logic for reasoning about oracle reductions. In other words, we define a number of rules (or theorems) that govern how oracle reductions can be composed to form larger reductions, and how the resulting reduction inherits the security properties of the components.

-

The first group of rules changes relations and shared oracles.

-

2.2.1 Changing Relations and Oracles

-

Here we express the consequence rule. Namely, if we have an oracle reduction for \(\mathcal{R}_1 \implies \mathcal{R}_2\), along with \(\mathcal{R}_1' \implies \mathcal{R}_1\) and \(\mathcal{R}_2 \implies \mathcal{R}_2'\), then we obtain an oracle reduction for \(\mathcal{R}_1' \implies \mathcal{R}_2'\).

-
- \[ \frac{\begin{array}{c} \Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P},\, \mathcal{V},\, \mathcal{E}\rangle ^{\mathcal{O}} : \tau \; \{ \! \! \{ \mathcal{R}_2; \, \mathsf{St};\, \epsilon \} \! \! \} \\[1.5ex] \mathcal{R}_1’ \implies \mathcal{R}_1 \\[1.5ex] \mathcal{R}_2 \implies \mathcal{R}_2’ \end{array}}{\Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R}_1'\} \; \langle \mathcal{P},\, \mathcal{V},\, \mathcal{E}\rangle ^{\mathcal{O}} : \tau \; \{ \! \! \{ \mathcal{R}_2'; \, \mathsf{St};\, \epsilon \} \! \! \} } \quad \text{(Conseq)} \] -
-
- \[ \frac{\Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P}, \mathcal{V}, \mathcal{E}\rangle ^{\mathcal{O}} : \tau \; \{ \! \! \{ \mathcal{R}_2; \mathsf{St}; \epsilon \} \! \! \} }{\Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R} \times \mathcal{R}_1\} \; \langle \mathcal{P}, \mathcal{V}, \mathcal{E}\rangle ^{\mathcal{O}} : \tau \; \{ \! \! \{ \mathcal{R} \times \mathcal{R}_2; \mathsf{St}; \epsilon \} \! \! \} } \quad \text{(Frame)} \] -
-
- \[ \frac{\begin{array}{c} \Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P}, \mathcal{V}, \mathcal{E}\rangle ^{\mathcal{O}_1} : \tau \; \{ \! \! \{ \mathcal{R}_2; \mathsf{St}; \epsilon \} \! \! \} \\[1.5ex] \mathcal{O}_1 \subset \mathcal{O}_2 \end{array}}{\Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P}, \mathcal{V}, \mathcal{E}\rangle ^{\mathcal{O}_2} : \tau \; \{ \! \! \{ \mathcal{R}_2; \mathsf{St}; \epsilon \} \! \! \} } \quad \text{(Oracle-Lift)} \] -
-

TODO: figure out how the state function needs to change for these rules (they are basically the same, but not exactly)

-

2.2.2 Sequential Composition

-

The reason why we consider interactive (oracle) reductions at the core of our formalism is that we can compose these reductions to form larger reductions. Equivalently, we can take a complex interactive (oracle) proof (which differs only in that it reduces a relation to the trivial relation that always outputs true) and break it down into a series of smaller reductions. The advantage of this approach is that we can prove security properties (completeness and soundness) for each of the smaller reductions, and these properties will automatically transfer to the larger reductions.

-

This section is devoted to the composition of interactive (oracle) reductions, and proofs that the resulting reductions inherit the security properties of the two (or more) constituent reductions.

-

Sequential composition can be expressed as the folowing rule:

-
- \[ \frac{\begin{array}{c} \Psi ; \Theta ; \varSigma \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P}_1, \mathcal{V}_1, \mathcal{E}_1\rangle ^{\mathcal{O}} : \tau _1 \; \{ \! \! \{ \mathcal{R}_2; \mathsf{St}_1; \epsilon _1\} \! \! \} \\[1.5ex] \Psi ; (\Theta :: \tau _1) ; \varSigma \vdash \{ \mathcal{R}_2\} \; \langle \mathcal{P}_2, \mathcal{V}_2, \mathcal{E}_2\rangle ^{\mathcal{O}} : \tau _2 \; \{ \! \! \{ \mathcal{R}_3; \mathsf{St}_2; \epsilon _2\} \! \! \} \end{array}}{\Psi ; (\Theta :: \tau _1 :: \tau _2) \ ; \varSigma \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P}_1 \circ \mathcal{P}_2, \mathcal{V}_1 \circ \mathcal{V}_2, \mathcal{E}_1 \circ _{\mathcal{V}_2} \mathcal{E}_2\rangle ^{\mathcal{O}} : \tau _1 \oplus \tau _2 \; \{ \! \! \{ \mathcal{R}_3; \mathsf{St}_1 \oplus \mathsf{St}_2; \epsilon _1 \oplus \epsilon _2\} \! \! \} } \quad \text{(Seq-Comp)} \] -
-

2.2.3 Virtualization

-

Another tool we will repeatedly use is the ability to change the context of an oracle reduction. This is often needed when we want to adapt an oracle reduction in a simple context into one for a more complex context.

-

See the section on sum-check 3.2 for an example.

-
-
- - Definition - - 16 - Mapping into Virtual Context -
- -
-
- - # - - - -
-
-
-

In order to apply an oracle reduction on virtual data, we will need to provide a mapping from the current context to the virtual context. This includes:

-
    -
  • A mapping from the current public inputs to the virtual public inputs.

    -
  • -
  • A simulation of the oracle inputs for the virtual context using the public and oracle inputs for the current context.

    -
  • -
  • A mapping from the current private inputs to the virtual private inputs.

    -
  • -
  • A simulation of the shared oracle for the virtual context using the shared oracle for the current context.

    -
  • -
- -
-
-
-
- - Definition - - 17 - Virtual Oracle Reduction -
- -
-
- - # - - - -
-
-
-

Given a suitable mapping into a virtual context, we may define an oracle reduction via the following construction:

-
    -
  • The prover first applies the mappings to obtain the virtual context. The verifier does the same, but only for the non-private inputs.

    -
  • -
  • The prover and verifier then run the virtual oracle reduction on the virtual context.

    -
  • -
- -
-
-

We will show security properties for this virtualization process. One can see that completeness and soundness are inherited from the completeness and soundness of the virtual oracle reduction. However, (round-by-round) knowledge soundness is more tricky; this is because we must extract back to the witness of the original context from the virtual context.

-
- \[ \frac{\begin{array}{c} \Psi ’; \Theta ’; \Sigma ’ \vdash \{ \mathcal{R}_1\} \; \langle \mathcal{P}, \mathcal{V}, \mathcal{E}\rangle ^{\mathcal{O}} : \tau \; \{ \! \! \{ \mathcal{R}_2; \mathsf{St}; \epsilon \} \! \! \} \\[1.5ex] f : (\Psi , \Theta , \Sigma ) \to (\Psi ’, \Theta ’, \Sigma ’) \\[1.5ex] g : \Psi ’ \to \Psi \\[1.5ex] f.\mathsf{fst} \circ g = \mathsf{id} \end{array}}{\Psi ; \Theta ; \Sigma \vdash \{ \mathcal{R}_1 \circ f\} \; \langle \mathcal{P} \circ f, \mathcal{V} \circ f, \mathcal{E} \circ (f, g)\rangle ^{\mathcal{O}} : \tau \; \{ \! \! \{ \mathcal{R}_2 \circ f; \mathsf{St} \circ f; \epsilon \} \! \! \} } \quad \text{(Virtual-Ctx)} \] -
-

2.2.4 Substitution

-

Finally, we need a transformation / inference rule that allows us to change the message type in a given round of an oracle reduction. In other words, we substitute a value in the round with another value, followed by a reduction establishing the relationship between the new and old values.

-

Examples include:

-
    -
  1. Substituting an oracle input by a public input:

    -
      -
    • Often by just revealing the underlying data. This has no change on the prover, and for the verifier, this means that any query to the oracle input can be locally computed.

      -
    • -
    • A variant of this is when the oracle input consists of a data along with a proof that the data satisfies some predicate. In this case, the verifier needs to additionally check that the predicate holds for the substituted data.

      -
    • -
    • Another common substitution is to replace a vector with its Merkle commitment, or a polynomial with its polynomial commitment.

      -
    • -
    -
  2. -
  3. Substituting an oracle input by another oracle input, followed by a reduction for each oracle query the verifier makes to the old oracle:

    -
      -
    • This is also a variant of the previous case, where we do not fully substitute with a public input, but do a “half-substitution” by substituting with another oracle input. This happens e.g. when using a polynomial commitment scheme that is itself based on a vector commitment scheme. One can cast protocols like Ligero / Brakedown / FRI / STIR in this two-step process.

      -
    • -
    -
  4. -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/blueprint/web/chap-proof_systems.html b/blueprint/web/chap-proof_systems.html deleted file mode 100644 index 8f9195086..000000000 --- a/blueprint/web/chap-proof_systems.html +++ /dev/null @@ -1,760 +0,0 @@ - - - - - - - - -Proof Systems - - - - - - - - - - - - -
- -

Formally Verified Arguments of Knowledge in Lean

-
- -
- - -
-
- - -
-

3 Proof Systems

- -

3.1 Simple Oracle Reductions

-

We start by introducing a number of simple oracle reductions.

-

3.1.1 Polynomial Equality Testing

-

Context: two univariate polynomials \(P, Q \in \mathbb {F}[X]\) of degree at most \(d\), available as polynomial evaluation oracles

-

Input relation: \(P = Q\) as polynomials

-

Protocol type: a single message of type \(\mathbb {F}\) from the verifier to the prover.

-

Honest prover: does nothing

-

Honest verifier: checks that \(P(r) = Q(r)\)

-

Output relation: \(P(r) = Q(r)\)

-

Extractor: trivial since there is no witness

-

Completenes: trivial

-

Round-by-round state function: corresponds precisely to input and output relation

- -

Round-by-round error: \(d / \lvert \mathbb {F} \rvert \)

-

Round-by-round knowledge soundness: follows from Schwartz-Zippel

-

To summarize, we have the following judgment:

-
- \[ \begin{array}{c} \Psi := (); \; \Theta := (P, Q); \; \Sigma := (); \; \tau := (\mathsf{V2P}, \mathbb {F}) \vdash \\[1ex] \{ P = Q\} \quad \left( \begin{array}{l} \mathcal{P} := (), \\ \mathcal{V} := (P,Q,r) \mapsto [P(r) \stackrel{?}{=} Q(r)], \\ \mathcal{E} := () \end{array} \right)^{\emptyset } \quad \{ \! \! \{ P(r) = Q(r); \mathsf{St}_{P,Q}; \frac{d}{\lvert \mathbb {F} \rvert }\} \! \! \} \end{array} \] -
-

where \(\mathsf{St}(i) = \begin{cases} \end{cases} P \stackrel{?}{=} Q \text{ if } i = 0 \\ P(r) \stackrel{?}{=} Q(r) \text{ if } i = 1 \end{cases}\)

-

3.1.2 Batching Polynomial Evaluation Claims

-

Context: \(n\)-tuple of values \(v = (v_1, \ldots , v_n) \in \mathbb {F}^n\)

-

Protocol type: one message of type \(\mathbb {F}^k\) from the verifier to the prover, and another message of type \(\mathbb {F}\) from the prover to the verifier

-

Auxiliary function: a polynomial map \(E : \mathbb {F}^k \to \mathbb {F}^n\)

-

Honest prover: given \(r \gets \mathbb {F}^k\) from the verifier’s message, computes \(\langle E(r), v\rangle := E(r)_1 \cdot v_1 + \cdots + E(r)_n \cdot v_n\) and sends it to the verifier

-

Honest verifier: checks that the received value \(v'\) is equal to \(\langle E(r), v\rangle \)

-

Extractor: trivial since there is no witness

-

Security: depends on the degree & non-degeneracy of the polynomial map \(E\)

-

3.2 The Sum-Check Protocol

- -

This section documents our formalization of the sum-check protocol. We first describe the sum-check protocol as it is typically described in the literature, and then present a modular description that maximally relies on our general oracle reduction framework.

-

3.2.1 Standard Description

- -

Protocol Parameters

-

The sum-check protocol is parameterized by the following:

-
    -
  • \(R\): the underlying ring (for soundness, required to be finite and an integral domain)

    -
  • -
  • \(n \in \mathbb {N}\): the number of variables (and the number of rounds for the protocol)

    -
  • -
  • \(d \in \mathbb {N}\): the individual degree bound for the polynomial

    -
  • -
  • \(\mathcal{D}: \{ 0, 1, \ldots , m-1\} \hookrightarrow R\): the set of \(m\) evaluation points for each variable, represented as an injection. The image of \(\mathcal{D}\) as a finite subset of \(R\) is written as \(\text{Image}(\mathcal{D})\).

    -
  • -
  • \(\mathcal{O}\): the set of underlying oracles (e.g., random oracles) that may be needed for other reductions. However, the sum-check protocol itself does not use any oracles.

    -
  • -
-

Input and Output Statements

-

For the standard description of the sum-check protocol, we specify the complete input and output data:

-

Input Statement.

-

The claimed sum \(T \in R\).

-

Input Oracle Statement.

-

The polynomial \(P \in R[X_0, X_1, \ldots , X_{n-1}]_{\leq d}\) of \(n\) variables with bounded individual degrees \(d\).

-

Input Witness.

-

None (the unit type).

-

Input Relation.

-

The sum-check relation:

-
- \[ \sum _{x \in (\text{Image}(\mathcal{D}))^n} P(x) = T \] -
-

Output Statement.

-

The claimed evaluation \(e \in R\) and the challenge vector \((r_0, r_1, \ldots , r_{n-1}) \in R^n\).

-

Output Oracle Statement.

-

The same polynomial \(P \in R[X_0, X_1, \ldots , X_{n-1}]_{\leq d}\).

-

Output Witness.

-

None (the unit type).

-

Output Relation.

-

The evaluation relation:

-
- \[ P(r_0, r_1, \ldots , r_{n-1}) = e \] -
-

Protocol Description

-

The sum-check protocol proceeds in \(n\) rounds of interaction between the prover and verifier. The protocol reduces the claim that a multivariate polynomial \(P\) sums to a target value \(T\) over the domain \((\text{Image}(\mathcal{D}))^n\) to the claim that \(P\) evaluates to a specific value \(e\) at a random point \((r_0, r_1, \ldots , r_{n-1})\).

-

In each round, the prover sends a univariate polynomial of bounded degree, and the verifier responds with a random challenge. The verifier performs consistency checks by querying the polynomial oracle at specific evaluation points. After \(n\) rounds, the protocol terminates with an output statement asserting that \(P(r_0, r_1, \ldots , r_{n-1}) = e\), where the challenges \((r_0, r_1, \ldots , r_{n-1})\) are the random values chosen by the verifier during the protocol execution.

-

The protocol is described as an oracle reduction, where the polynomial \(P\) is accessed only through evaluation queries rather than being explicitly represented.

-

Security Properties

-

We prove the following security properties for the sum-check protocol:

-
-
- - Theorem - - 18 - Perfect Completeness -
- -
-
- - # - - - -
-
-
-

The sum-check protocol satisfies perfect completeness. That is, for any valid input statement and oracle statement satisfying the input relation, the protocol accepts with probability 1.

- -
-
-
-
- - Theorem - - 19 - Knowledge Soundness -
- -
-
- - # - - - -
-
-
-

The sum-check protocol satisfies knowledge soundness. The soundness error is bounded by \(n \cdot d / |R|\), where \(n\) is the number of rounds, \(d\) is the degree bound, and \(|R|\) is the size of the field.

- -
-
-
-
- - Theorem - - 20 - Round-by-Round Knowledge Soundness -
- -
-
- - # - - - -
-
-
-

The sum-check protocol satisfies round-by-round knowledge soundness with respect to an appropriate state function (to be specified). Each round maintains the security properties compositionally, allowing for modular security analysis.

- -
-
-

Implementation Notes

-

Our formalization includes several important implementation considerations:

-

Oracle Reduction Level.

-

This description of the sum-check protocol stays at the oracle reduction level, describing the protocol before being compiled with concrete cryptographic primitives such as polynomial commitment schemes for \(P\). The oracle model allows us to focus on the logical structure and security properties of the protocol without being concerned with the specifics of how polynomial evaluations are implemented or verified.

-

Abstract Protocol Description.

-

The protocol description above does not consider implementation details and optimizations that would be necessary in practice. For instance, we do not address computational efficiency, concrete polynomial representations, or specific algorithms for polynomial evaluation. This abstraction allows us to establish the fundamental security properties that any concrete implementation must preserve.

-

Degree Constraints.

-

To represent sum-check as a series of Interactive Oracle Reductions (IORs), we implicitly constrain the degree of the polynomials via using subtypes such as \(R[X]_{\leq d}\) and appropriate multivariate polynomial degree bounds. This is necessary because the oracle verifier only gets oracle access to evaluating the polynomials, but does not see the polynomials in the clear.

-

Polynomial Commitments.

-

When this protocol is compiled to an interactive proof (rather than an oracle reduction), the corresponding polynomial commitment schemes will enforce that the declared degree bounds hold, by letting the (non-oracle) verifier perform explicit degree checks.

-

Formalization Alignment.

-

TODO: Align the sum-check protocol formalization in Lean to use \(n\) variables and \(n\) rounds (as in this standard description) rather than \(n+1\) variables and \(n+1\) rounds. This should be achievable by refactoring the current implementation to better match the standard presentation.

-

Future Extensions

-

Several generalizations are considered for future work:

-
    -
  • Variable Degree Bounds: Generalize to \(d : \{ 0, 1, \ldots , n+1\} \to \mathbb {N}\) and \(\mathcal{D} : \{ 0, 1, \ldots , n+1\} \to (\{ 0, 1, \ldots , m-1\} \hookrightarrow R)\), allowing different degree bounds and summation domains for each variable.

    -
  • -
  • Restricted Challenge Domains: Generalize the challenges to come from suitable subsets of \(R\) (e.g., subtractive sets), rather than the entire domain. This modification is used in lattice-based protocols.

    -
  • -
  • Module-based Sum-check: Extend to sum-check over modules instead of just rings. This would require extending multivariate polynomial evaluation to modules, defining something like \(\text{evalModule} : (R^n \to M) \to R[X_0, \ldots , X_{n-1}] \to M\) where \(M\) is an \(R\)-module.

    -
  • -
-

The sum-check protocol, as described in the original paper and many expositions thereafter, is a protocol to reduce the claim that

-
- \[ \sum _{x \in \{ 0, 1\} ^n} P(x) = c, \] -
-

where \(P\) is an \(n\)-variate polynomial of certain individual degree bounds, and \(c\) is some field element, to the claim that

-
- \[ P(r) = v, \] -
-

for some claimed value \(v\) (derived from the protocol transcript), where \(r\) is a vector of random challenges from the verifier sent during the protocol.

-

In our language, the initial context of the sum-check protocol is the pair \((P, c)\), where \(P\) is an oracle input and \(c\) is public. The protocol proceeds in \(n\) rounds of interaction, where in each round \(i\) the prover sends a univariate polynomial \(s_i\) of bounded degree and the verifier sends a challenge \(r_i \gets \mathbb {F}\). The honest prover would compute

-
- \[ s_i(X) = \sum _{x \in \{ 0, 1\} ^{n - i - 1}} P(r_1, \ldots , r_{i - 1}, X, x), \] -
-

and the honest verifier would check that \(s_i(0) + s_i(1) = s_{i - 1}(r_{i - 1})\), with the convention that \(s_0(r_0) = c\).

-

3.2.2 Modular Description

- -

Round-by-Round Analysis

-

Our modular approach breaks down the sum-check protocol into individual rounds, each of which can be analyzed as a two-message Interactive Oracle Reduction. This decomposition allows us to prove security properties compositionally and provides a more granular understanding of the protocol’s structure.

-

Round-Specific Statements.

-

For the \(i\)-th round, where \(i \in \{ 0, 1, \ldots , n\} \), the statement contains:

-
    -
  • \(\text{target} \in R\): the target value for sum-check at this round

    -
  • -
  • \(\text{challenges} \in R^i\): the list of challenges sent from the verifier to the prover in previous rounds

    -
  • -
-

The oracle statement remains the same polynomial \(P \in R[X_0, X_1, \ldots , X_{n-1}]_{\leq d}\).

-

Round-Specific Relations.

-

The sum-check relation for the \(i\)-th round checks that:

-
- \[ \sum _{x \in (\text{Image}(\mathcal{D}))^{n-i}} P(\text{challenges}, x) = \text{target} \] -
-

Note that when \(i = n\), this becomes the output statement of the sum-check protocol, checking that \(P(\text{challenges}) = \text{target}\).

-

Individual Round Protocol

-

For \(i = 0, 1, \ldots , n-1\), the \(i\)-th round of the sum-check protocol consists of the following:

-

Step 1: Prover’s Message.

-

The prover sends a univariate polynomial \(p_i \in R[X]_{\leq d}\) of degree at most \(d\). If the prover is honest, then:

-
- \[ p_i(X) = \sum _{x \in (\text{Image}(\mathcal{D}))^{n-i}} P(\text{challenges}_0, \ldots , \text{challenges}_{i-1}, X, x) \] -
-

Here, \(P(\text{challenges}_0, \ldots , \text{challenges}_{i-1}, X, x)\) is the polynomial \(P\) evaluated at the concatenation of:

-
    -
  • the prior challenges \(\text{challenges}_0, \ldots , \text{challenges}_{i-1}\)

    -
  • -
  • the \(i\)-th variable as the new indeterminate \(X\)

    -
  • -
  • the remaining values \(x \in (\text{Image}(\mathcal{D}))^{n-i}\)

    -
  • -
-

In the oracle protocol, this polynomial \(p_i\) is turned into an oracle for which the verifier can query evaluations at arbitrary points.

-

Step 2: Verifier’s Challenge.

-

The verifier sends the \(i\)-th challenge \(r_i\) sampled uniformly at random from \(R\).

-

Step 3: Verifier’s Check.

-

The (oracle) verifier performs queries for the evaluations of \(p_i\) at all points in \(\text{Image}(\mathcal{D})\), and checks that:

-
- \[ \sum _{x \in \text{Image}(\mathcal{D})} p_i(x) = \text{target} \] -
-

If the check fails, the verifier outputs \(\texttt{failure}\). Otherwise, it outputs a statement for the next round as follows:

-
    -
  • \(\text{target}\) is updated to \(p_i(r_i)\)

    -
  • -
  • \(\text{challenges}\) is updated to the concatenation of the previous challenges and \(r_i\)

    -
  • -
-

Single Round Security Analysis

-
-
- - Definition - - 21 - Single Round Protocol -
- -
-
- - # - - - -
-
-
-

The \(i\)-th round of sum-check consists of:

-
    -
  1. Input: A statement containing target value and prior challenges, along with an oracle for the multivariate polynomial

    -
  2. -
  3. Prover’s message: A univariate polynomial \(p_i \in R[X]_{\leq d}\)

    -
  4. -
  5. Verifier’s challenge: A random element \(r_i \gets R\)

    -
  6. -
  7. Output: An updated statement with new target \(p_i(r_i)\) and extended challenges

    -
  8. -
- -
-
-
-
- - Theorem - - 22 - Single Round Completeness -
- -
-
- - # - - - -
-
-
-

Each individual round of the sum-check protocol is perfectly complete.

- -
-
-
-
- - Theorem - - 23 - Single Round Soundness -
- -
-
- - # - - - -
-
-
-

Each individual round of the sum-check protocol is sound with error probability at most \(d / |R|\), where \(d\) is the degree bound and \(|R|\) is the size of the field.

- -
-
-
-
- - Theorem - - 24 - Round-by-Round Knowledge Soundness -
- -
-
- - # - - - -
-
-
-

The sum-check protocol satisfies round-by-round knowledge soundness. Each individual round can be analyzed independently, and the soundness error in each round is bounded by \(d / |R|\).

- -
-
-

Virtual Protocol Decomposition

-

We now proceed to break down this protocol into individual messages, and then specify the predicates that should hold before and after each message is exchanged.

-

First, it is clear that we can consider each round in isolation. In fact, each round can be seen as an instantiation of the following simpler "virtual" protocol:

-
-
- - Definition - - 25 -
- -
-
- - # - - - -
-
-
- -
    -
  1. In this protocol, the context is a pair \((p, d)\), where \(p\) is now a univariate polynomial of bounded degree. The predicate / relation is that \(p(0) + p(1) = d\).

    -
  2. -
  3. The prover first sends a univariate polynomial \(s\) of the same bounded degree as \(p\). In the honest case, it would just send \(p\) itself.

    -
  4. -
  5. The verifier samples and sends a random challenge \(r \gets R\).

    -
  6. -
  7. The verifier checks that \(s(0) + s(1) = d\). The predicate on the resulting output context is that \(p(r) = s(r)\).

    -
  8. -
- -
-
-

The reason why this simpler protocol is related to a sum-check round is that we can emulate the simpler protocol using variables in the context at the time:

-
    -
  • The univariate polynomial \(p\) is instantiated as \(\sum _{x \in (\text{Image}(\mathcal{D}))^{n - i - 1}} P(r_0, \ldots , r_{i - 1}, X, x)\).

    -
  • -
  • The scalar \(d\) is instantiated as \(T\) if \(i = 0\), and as \(s_{i - 1}(r_{i - 1})\) otherwise.

    -
  • -
-

It is "clear" that the simpler protocol is perfectly complete. It is sound (and since there is no witness, also knowledge sound) since by the Schwartz-Zippel Lemma, the probability that \(p \ne s\) and yet \(p(r) = s(r)\) for a random challenge \(r\) is at most the degree of \(p\) over the size of the field.

-
-
- - Theorem - - 26 -
- -
-
- - # - - - - - - - -
-
-
-

The virtual sum-check round protocol is sound.

- -
-
-

Note that there is no witness, so knowledge soundness follows trivially from soundness.

-
-
- - Theorem - - 27 -
- -
-
- - # - - - - - - - -
-
-
-

The virtual sum-check round protocol is knowledge sound.

- -
-
-

Moreover, we can define the following state function for the simpler protocol

-
-
- - Definition - - 28 - State Function -
- -
-
- - # - - - - - - - -
-
-
-

The state function for the virtual sum-check round protocol is given by:

-
    -
  1. The initial state function is the same as the predicate on the initial context, namely that \(p(0) + p(1) = d\).

    -
  2. -
  3. The state function after the prover sends \(s\) is the predicate that \(p(0) + p(1) = d\) and \(s(0) + s(1) = d\). Essentially, we add in the verifier’s check.

    -
  4. -
  5. The state function for the output context (after the verifier sends \(r\)) is the predicate that \(s(0) + s(1) = d\) and \(p(r) = s(r)\).

    -
  6. -
- - -
-
-

Seen in this light, it should be clear that the simpler protocol satisfies round-by-round soundness.

-
-
- - Theorem - - 29 -
- -
-
- - # - - - - - - - -
-
-
-

The virtual sum-check round protocol is round-by-round sound.

- -
-
-

In fact, we can break down this simpler protocol even more: consider the two sub-protocols that each consists of a single message. Then the intermediate state function is the same as the predicate on the intermediate context, and is given in a "strongest post-condition" style where it incorporates the verifier’s check along with the initial predicate. We can also view the final state function as a form of "canonical" post-condition, that is implied by the previous predicate except with small probability.

-

3.3 The Spartan Protocol

-

3.4 The Ligero Polynomial Commitment Scheme

- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/blueprint/web/chap-references.html b/blueprint/web/chap-references.html deleted file mode 100644 index f5a1f3ce6..000000000 --- a/blueprint/web/chap-references.html +++ /dev/null @@ -1,189 +0,0 @@ - - - - - - - - -References - - - - - - - - - - - - -
- -

Formally Verified Arguments of Knowledge in Lean

-
- - - - - - - - - - - - \ No newline at end of file diff --git a/blueprint/web/chap-supporting_theories.html b/blueprint/web/chap-supporting_theories.html deleted file mode 100644 index 8fe681cb7..000000000 --- a/blueprint/web/chap-supporting_theories.html +++ /dev/null @@ -1,1194 +0,0 @@ - - - - - - - - -Supporting Theories - - - - - - - - - - - - -
- -

Formally Verified Arguments of Knowledge in Lean

-
- -
- - -
-
- - -
-

5 Supporting Theories

- -

5.1 Polynomials

- -

This section contains facts about polynomials that are used in the rest of the library, and also definitions for computable representations of polynomials.

-
-
- - Definition - - 30 - Multilinear Extension -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Theorem - - 31 - Multilinear Extension is Unique -
- -
-
- - # - - - -
-
-
- - -
-
-

We note that the Schwartz-Zippel Lemma is already in Mathlib.

-
-
- - Theorem - - 32 - Schwartz-Zippel Lemma -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-

We also define the type of computable univariate & multilinear polynomials using arrays to represent their coefficients (or dually, their evaluations at given points).

-
-
- - Definition - - 33 - Computable Univariate Polynomials -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 34 - Computable Multilinear Polynomials -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-

5.2 Coding Theory

- -

This section contains definitions and theorems about coding theory as they are used in the rest of the library.

-
-
- - Definition - - 35 - Code Distance -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 36 - Distance from a Code -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 37 - Generator Matrix -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 38 - Parity Check Matrix -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 39 - Interleaved Code -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 40 - Reed-Solomon Code -
- -
-
- - # - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 41 - Proximity Measure -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-
-
- - Definition - - 42 - Proximity Gap -
- -
-
- - # - - - - - - - - - - - -
-
-
- - -
-
-

5.3 The VCVio Library

- -

This library provides a formal framework for reasoning about computations that make oracle queries. Many cryptographic primitives and interactive protocols use oracles to model (or simulate) external functionality such as random responses, coin flips, or more structured queries. The VCVio library "lifts" these ideas into a setting where both the abstract specification and concrete simulation of oracles may be studied, and their probabilistic behavior analyzed.

-

The main ingredients of the library are as follows:

-
-
- - Definition - - 43 - Specification of Oracles -
- - - ✓ -
-
- - # - - - - - - - -
-
-
-

An oracle specification describes a collection of available oracles, each with its own input and output types. Formally, it’s given by an indexed family where each oracle is specified by:

-
    -
  • A domain type (what inputs it accepts)

    -
  • -
  • A range type (what outputs it can produce)

    -
  • -
-

The indexing allows for potentially infinite collections of oracles, and the specification itself is agnostic to how the oracles actually behave - it just describes their interfaces.

- -
-
-

Some examples of oracle specifications (and their intended behavior) are as follows:

-
    -
  • emptySpec: Represents an empty set of oracles

    -
  • -
  • singletonSpec: Represents a single oracle available on a singleton index

    -
  • -
  • coinSpec: A coin flipping oracle that produces a random Boolean value

    -
  • -
  • unifSpec: A family of oracles that for every natural number \(n \in \mathbb {N}\) chooses uniformly from the set \(\{ 0, \ldots , n\} \).

    -
  • -
- -

We often require extra properties on the domains and ranges of oracles. For example, we may require that the domains and ranges come equipped with decidable equality or finiteness properties .

-
-
- - Definition - - 44 - Oracle Computation -
- -
-
- - # - - - - - - - - - - - -
-
-
-

An oracle computation represents a program that can make oracle queries. It can:

-
    -
  • Return a pure value without making any queries (via pure)

    -
  • -
  • Make an oracle query and continue with the response (via queryBind)

    -
  • -
  • Signal failure (via failure)

    -
  • -
-

The formal implementation uses a free monad on the inductive type of oracle queries wrapped in an option monad transformer (i.e. OptionT(FreeMonad(OracleQuery spec))).

- -
-
-
-
- - Definition - - 45 - Handling Oracle Queries -
- -
-
- - # - - - - - - - - - - - -
-
-
-

To actually run oracle computations, we need a way to handle (or implement) the oracle queries. An oracle implementation consists a mapping from oracle queries to values in another monad. Depending on the monad, this may allow for various interpretations of the oracle queries.

- -
-
-
-
- - Definition - - 46 - Probabilistic Semantics of Oracle Computations -
- -
-
- - # - - - - - - - - - - - -
-
-
-

We can view oracle computations as probabilistic programs by considering what happens when oracles respond uniformly at random. This gives rise to a probability distribution over possible outputs (including the possibility of failure). The semantics maps each oracle query to a uniform distribution over its possible responses.

- -
-
-

Once we have mapped an oracle computation to a probability distribution, we can define various associated probabilities, such as the probability of failure, or the probability of the output satisfying a given predicate (assuming it does not fail).

-
-
- - Definition - - 47 - Simulating Oracle Queries with Other Oracles -
- -
-
- - # - - - -
-
-
-

We can simulate complex oracles using simpler ones by providing a translation mechanism. A simulation oracle specifies how to implement queries in one specification using computations in another specification, possibly maintaining additional state information during the simulation.

- -
-
-
-
- - Definition - - 48 - Logging & Caching Oracle Queries -
- -
-
- - # - - - - - - - - - - - -
-
-
-

Using the simulation framework, we can add logging and caching behaviors to oracle queries:

-
    -
  • Logging records all queries made during a computation

    -
  • -
  • Caching remembers query responses and reuses them for repeated queries

    -
  • -
-

These are implemented as special cases of simulation oracles.

- -
-
-
-
- - Definition - - 49 - Random Oracle -
- -
-
- - # - - - - - - - - - - - -
-
-
-

A random oracle is implemented as a caching oracle that uses lazy sampling:

-
    -
  • On first query: generates a uniform random response and caches it

    -
  • -
  • On repeated queries: returns the cached response

    -
  • -
- - -
-
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/blueprint/web/dep_graph_document.html b/blueprint/web/dep_graph_document.html deleted file mode 100644 index 1e52aac80..000000000 --- a/blueprint/web/dep_graph_document.html +++ /dev/null @@ -1,1635 +0,0 @@ - - - - - - Dependency graph - - - - - - - - - - - - - - - - -
- Home -

Dependencies

-
-
-
-
- Legend -
-
-
-
-
-
- -
Boxes
definitions
- -
Ellipses
theorems and lemmas
- -
Blue border
the statement of this result is ready to be formalized; all prerequisites are done
- -
Orange border
the statement of this result is not ready to be formalized; the blueprint needs more work
- -
Blue background
the proof of this result is ready to be formalized; all prerequisites are done
- -
Green border
the statement of this result is formalized
- -
Green background
the proof of this result is formalized
- -
Dark green background
the proof of this result and all its ancestors are formalized
- -
Dark green border
this is in Mathlib
- -
-
-
-
- - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - - - - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
- - - -
-
- - - - -
-
-
-
-
- - - - - - - - - - diff --git a/blueprint/web/index.html b/blueprint/web/index.html deleted file mode 100644 index a30e96446..000000000 --- a/blueprint/web/index.html +++ /dev/null @@ -1,297 +0,0 @@ - - - - - - - - -Formally Verified Arguments of Knowledge in Lean - - - - - - - - - - -
- -
- -
- - - -
- - - - - - - - - - \ No newline at end of file diff --git a/blueprint/web/js/d3-graphviz.js b/blueprint/web/js/d3-graphviz.js deleted file mode 100644 index e73751937..000000000 --- a/blueprint/web/js/d3-graphviz.js +++ /dev/null @@ -1,2173 +0,0 @@ -(function (global, factory) { - typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('d3-selection'), require('d3-dispatch'), require('d3-transition'), require('d3-timer'), require('d3-interpolate'), require('d3-zoom'), require('@hpcc-js/wasm'), require('d3-format'), require('d3-path')) : - typeof define === 'function' && define.amd ? define(['exports', 'd3-selection', 'd3-dispatch', 'd3-transition', 'd3-timer', 'd3-interpolate', 'd3-zoom', '@hpcc-js/wasm', 'd3-format', 'd3-path'], factory) : - (global = global || self, factory(global['d3-graphviz'] = {}, global.d3, global.d3, global.d3, global.d3, global.d3, global.d3, global['@hpcc-js/wasm'], global.d3, global.d3)); -}(this, (function (exports, d3, d3Dispatch, d3Transition, d3Timer, d3Interpolate, d3Zoom, wasm, d3Format, d3Path) { 'use strict'; - - function extractElementData(element) { - - var datum = {}; - var tag = element.node().nodeName; - datum.tag = tag; - if (tag == '#text') { - datum.text = element.text(); - } else if (tag == '#comment') { - datum.comment = element.text(); - } - datum.attributes = {}; - var attributes = element.node().attributes; - if (attributes) { - for (var i = 0; i < attributes.length; i++) { - var attribute = attributes[i]; - var name = attribute.name; - var value = attribute.value; - datum.attributes[name] = value; - } - } - var transform = element.node().transform; - if (transform && transform.baseVal.numberOfItems != 0) { - var matrix = transform.baseVal.consolidate().matrix; - datum.translation = {x: matrix.e, y: matrix.f}; - datum.scale = matrix.a; - } - if (tag == 'ellipse') { - datum.center = { - x: datum.attributes.cx, - y: datum.attributes.cy, - }; - } - if (tag == 'polygon') { - var points = element.attr('points').split(' '); - var x = points.map(function(p) {return p.split(',')[0]}); - var y = points.map(function(p) {return p.split(',')[1]}); - var xmin = Math.min.apply(null, x); - var xmax = Math.max.apply(null, x); - var ymin = Math.min.apply(null, y); - var ymax = Math.max.apply(null, y); - var bbox = { - x: xmin, - y: ymin, - width: xmax - xmin, - height: ymax - ymin, - }; - datum.bbox = bbox; - datum.center = { - x: (xmin + xmax) / 2, - y: (ymin + ymax) / 2, - }; - } - if (tag == 'path') { - var d = element.attr('d'); - var points = d.split(/[A-Z ]/); - points.shift(); - var x = points.map(function(p) {return +p.split(',')[0]}); - var y = points.map(function(p) {return +p.split(',')[1]}); - var xmin = Math.min.apply(null, x); - var xmax = Math.max.apply(null, x); - var ymin = Math.min.apply(null, y); - var ymax = Math.max.apply(null, y); - var bbox = { - x: xmin, - y: ymin, - width: xmax - xmin, - height: ymax - ymin, - }; - datum.bbox = bbox; - datum.center = { - x: (xmin + xmax) / 2, - y: (ymin + ymax) / 2, - }; - datum.totalLength = element.node().getTotalLength(); - } - if (tag == 'text') { - datum.center = { - x: element.attr('x'), - y: element.attr('y'), - }; - } - if (tag == '#text') { - datum.text = element.text(); - } else if (tag == '#comment') { - datum.comment = element.text(); - } - return datum - } - - function extractAllElementsData(element) { - - var datum = extractElementData(element); - datum.children = []; - var children = d3.selectAll(element.node().childNodes); - children.each(function () { - var childData = extractAllElementsData(d3.select(this)); - childData.parent = datum; - datum.children.push(childData); - }); - return datum; - } - - function createElement(data) { - - if (data.tag == '#text') { - return document.createTextNode(""); - } else if (data.tag == '#comment') { - return document.createComment(data.comment); - } else { - return document.createElementNS('http://www.w3.org/2000/svg', data.tag); - } - } - - function createElementWithAttributes(data) { - - var elementNode = createElement(data); - var element = d3.select(elementNode); - var attributes = data.attributes; - for (var attributeName of Object.keys(attributes)) { - var attributeValue = attributes[attributeName]; - element.attr(attributeName, attributeValue); - } - return elementNode; - } - - function replaceElement(element, data) { - var parent = d3.select(element.node().parentNode); - var newElementNode = createElementWithAttributes(data); - var newElement = parent.insert(function () { - return newElementNode; - }, function () { - return element.node(); - }); - element.remove(); - return newElement; - } - - function insertElementData(element, datum) { - element.datum(datum); - element.data([datum], function (d) { - return d.key; - }); - } - - function insertAllElementsData(element, datum) { - insertElementData(element, datum); - var children = d3.selectAll(element.node().childNodes); - children.each(function (d, i) { - insertAllElementsData(d3.select(this), datum.children[i]); - }); - } - - function insertChildren(element, index) { - var children = element.selectAll(function () { - return element.node().childNodes; - }); - - children = children - .data(function (d) { - return d.children; - }, function (d) { - return d.tag + '-' + index; - }); - var childrenEnter = children - .enter() - .append(function(d) { - return createElement(d); - }); - - var childrenExit = children - .exit(); - childrenExit = childrenExit - .remove(); - children = childrenEnter - .merge(children); - var childTagIndexes = {}; - children.each(function(childData) { - var childTag = childData.tag; - if (childTagIndexes[childTag] == null) { - childTagIndexes[childTag] = 0; - } - var childIndex = childTagIndexes[childTag]++; - attributeElement.call(this, childData, childIndex); - }); - } - - function attributeElement(data, index=0) { - var element = d3.select(this); - var tag = data.tag; - var attributes = data.attributes; - var currentAttributes = element.node().attributes; - if (currentAttributes) { - for (var i = 0; i < currentAttributes.length; i++) { - var currentAttribute = currentAttributes[i]; - var name = currentAttribute.name; - if (name.split(':')[0] != 'xmlns' && currentAttribute.namespaceURI) { - var namespaceURIParts = currentAttribute.namespaceURI.split('/'); - var namespace = namespaceURIParts[namespaceURIParts.length - 1]; - name = namespace + ':' + name; - } - if (!(name in attributes)) { - attributes[name] = null; - } - } - } - for (var attributeName of Object.keys(attributes)) { - element - .attr(attributeName, attributes[attributeName]); - } - if (data.text) { - element - .text(data.text); - } - insertChildren(element, index); - } - - function shallowCopyObject(obj) { - return Object.assign({}, obj); - } - - function roundTo2Decimals(x) { - return Math.round(x * 100.0) / 100.0 - } - - function zoom(enable) { - - this._options.zoom = enable; - - if (this._options.zoom && !this._zoomBehavior) { - createZoomBehavior.call(this); - } - - return this; - } - - function createZoomBehavior() { - - function zoomed() { - var g = d3.select(svg.node().querySelector("g")); - g.attr('transform', d3.event.transform); - } - - var root = this._selection; - var svg = d3.select(root.node().querySelector("svg")); - if (svg.size() == 0) { - return this; - } - this._zoomSelection = svg; - var zoomBehavior = d3Zoom.zoom() - .scaleExtent(this._options.zoomScaleExtent) - .translateExtent(this._options.zoomTranslateExtent) - .interpolate(d3Interpolate.interpolate) - .on("zoom", zoomed); - this._zoomBehavior = zoomBehavior; - var g = d3.select(svg.node().querySelector("g")); - svg.call(zoomBehavior); - if (!this._active) { - translateZoomBehaviorTransform.call(this, g); - } - this._originalTransform = d3Zoom.zoomTransform(svg.node()); - - return this; - } - function getTranslatedZoomTransform(selection) { - - // Get the current zoom transform for the top level svg and - // translate it uniformly with the given selection, using the - // difference between the translation specified in the selection's - // data and it's saved previous translation. The selection is - // normally the top level g element of the graph. - var oldTranslation = this._translation; - var oldScale = this._scale; - var newTranslation = selection.datum().translation; - var newScale = selection.datum().scale; - var t = d3Zoom.zoomTransform(this._zoomSelection.node()); - if (oldTranslation) { - t = t.scale(1 / oldScale); - t = t.translate(-oldTranslation.x, -oldTranslation.y); - } - t = t.translate(newTranslation.x, newTranslation.y); - t = t.scale(newScale); - return t; - } - - function translateZoomBehaviorTransform(selection) { - - // Translate the current zoom transform for the top level svg - // uniformly with the given selection, using the difference - // between the translation specified in the selection's data and - // it's saved previous translation. The selection is normally the - // top level g element of the graph. - this._zoomBehavior.transform(this._zoomSelection, getTranslatedZoomTransform.call(this, selection)); - - // Save the selections's new translation and scale. - this._translation = selection.datum().translation; - this._scale = selection.datum().scale; - - // Set the original zoom transform to the translation and scale specified in - // the selection's data. - this._originalTransform = d3Zoom.zoomIdentity.translate(selection.datum().translation.x, selection.datum().translation.y).scale(selection.datum().scale); - } - - function resetZoom(transition) { - - // Reset the zoom transform to the original zoom transform. - var selection = this._zoomSelection; - if (transition) { - selection = selection - .transition(transition); - } - selection - .call(this._zoomBehavior.transform, this._originalTransform); - - return this; - } - - function zoomScaleExtent(extent) { - - this._options.zoomScaleExtent = extent; - - return this; - } - - function zoomTranslateExtent(extent) { - - this._options.zoomTranslateExtent = extent; - - return this; - } - - function zoomBehavior() { - return this._zoomBehavior || null; - } - - function zoomSelection() { - return this._zoomSelection || null; - } - - function pathTween(points, d1) { - return function() { - const pointInterpolators = points.map(function(p) { - return d3Interpolate.interpolate([p[0][0], p[0][1]], [p[1][0], p[1][1]]); - }); - return function(t) { - return t < 1 ? "M" + pointInterpolators.map(function(p) { return p(t); }).join("L") : d1; - }; - }; - } - - function pathTweenPoints(node, d1, precision, precisionIsRelative) { - const path0 = node; - const path1 = path0.cloneNode(); - const n0 = path0.getTotalLength(); - const n1 = (path1.setAttribute("d", d1), path1).getTotalLength(); - - // Uniform sampling of distance based on specified precision. - const distances = [0]; - let i = 0; - const dt = precisionIsRelative ? precision : precision / Math.max(n0, n1); - while ((i += dt) < 1) { - distances.push(i); - } - distances.push(1); - - // Compute point-interpolators at each distance. - const points = distances.map(function(t) { - const p0 = path0.getPointAtLength(t * n0); - const p1 = path1.getPointAtLength(t * n1); - return ([[p0.x, p0.y], [p1.x, p1.y]]); - }); - return points; - } - - function data() { - return this._data || null; - } - - function isEdgeElementParent(datum) { - return (datum.attributes.class == 'edge' || ( - datum.tag == 'a' && - datum.parent.tag == 'g' && - datum.parent.parent.attributes.class == 'edge' - )); - } - - function isEdgeElement(datum) { - return datum.parent && isEdgeElementParent(datum.parent); - } - - function getEdgeGroup(datum) { - if (datum.parent.attributes.class == 'edge') { - return datum.parent; - } else { // datum.parent.tag == 'g' && datum.parent.parent.tag == 'g' && datum.parent.parent.parent.attributes.class == 'edge' - return datum.parent.parent.parent; - } - } - - function getEdgeTitle(datum) { - return getEdgeGroup(datum).children.find(function (e) { - return e.tag == 'title'; - }); - } - - function render(callback) { - - if (this._busy) { - this._queue.push(this.render.bind(this, callback)); - return this; - } - this._dispatch.call('renderStart', this); - - if (this._transitionFactory) { - d3Timer.timeout(function () { // Decouple from time spent. See https://github.com/d3/d3-timer/issues/27 - this._transition = d3Transition.transition(this._transitionFactory()); - _render.call(this, callback); - }.bind(this), 0); - } else { - _render.call(this, callback); - } - return this; - } - - function _render(callback) { - - var transitionInstance = this._transition; - var fade = this._options.fade && transitionInstance != null; - var tweenPaths = this._options.tweenPaths; - var tweenShapes = this._options.tweenShapes; - var convertEqualSidedPolygons = this._options.convertEqualSidedPolygons; - var growEnteringEdges = this._options.growEnteringEdges && transitionInstance != null; - var attributer = this._attributer; - var graphvizInstance = this; - - function insertChildren(element) { - var children = element.selectAll(function () { - return element.node().childNodes; - }); - - children = children - .data(function (d) { - return d.children; - }, function (d) { - return d.key; - }); - var childrenEnter = children - .enter() - .append(function(d) { - var element = createElement(d); - if (d.tag == '#text' && fade) { - element.nodeValue = d.text; - } - return element; - }); - - if (fade || (growEnteringEdges && isEdgeElementParent(element.datum()))) { - var childElementsEnter = childrenEnter - .filter(function(d) { - return d.tag[0] == '#' ? null : this; - }) - .each(function (d) { - var childEnter = d3.select(this); - for (var attributeName of Object.keys(d.attributes)) { - var attributeValue = d.attributes[attributeName]; - childEnter - .attr(attributeName, attributeValue); - } - }); - childElementsEnter - .filter(function(d) { - return d.tag == 'svg' || d.tag == 'g' ? null : this; - }) - .style("opacity", 0.0); - } - var childrenExit = children - .exit(); - if (attributer) { - childrenExit.each(attributer); - } - if (transitionInstance) { - childrenExit = childrenExit - .transition(transitionInstance); - if (fade) { - childrenExit - .filter(function(d) { - return d.tag[0] == '#' ? null : this; - }) - .style("opacity", 0.0); - } - } - childrenExit = childrenExit - .remove(); - children = childrenEnter - .merge(children); - children.each(attributeElement); - } - - function attributeElement(data) { - var element = d3.select(this); - if (data.tag == "svg") { - var options = graphvizInstance._options; - if (options.width != null || options.height != null) { - var width = options.width; - var height = options.height; - if (width == null) { - width = data.attributes.width.replace('pt', '') * 4 / 3; - } else { - element - .attr("width", width); - data.attributes.width = width; - } - if (height == null) { - height = data.attributes.height.replace('pt', '') * 4 / 3; - } else { - element - .attr("height", height); - data.attributes.height = height; - } - if (!options.fit) { - element - .attr("viewBox", `0 0 ${width * 3 / 4 / options.scale} ${height * 3 / 4 / options.scale}`); - data.attributes.viewBox = `0 0 ${width * 3 / 4 / options.scale} ${height * 3 / 4 / options.scale}`; - } - } - if (options.scale != 1 && (options.fit || (options.width == null && options.height == null))) { - width = data.attributes.viewBox.split(' ')[2]; - height = data.attributes.viewBox.split(' ')[3]; - element - .attr("viewBox", `0 0 ${width / options.scale} ${height / options.scale}`); - data.attributes.viewBox = `0 0 ${width / options.scale} ${height / options.scale}`; - } - } - if (attributer) { - element.each(attributer); - } - var tag = data.tag; - var attributes = data.attributes; - var currentAttributes = element.node().attributes; - if (currentAttributes) { - for (var i = 0; i < currentAttributes.length; i++) { - var currentAttribute = currentAttributes[i]; - var name = currentAttribute.name; - if (name.split(':')[0] != 'xmlns' && currentAttribute.namespaceURI) { - var namespaceURIParts = currentAttribute.namespaceURI.split('/'); - var namespace = namespaceURIParts[namespaceURIParts.length - 1]; - name = namespace + ':' + name; - } - if (!(name in attributes)) { - attributes[name] = null; - } - } - } - var convertShape = false; - var convertPrevShape = false; - if (tweenShapes && transitionInstance) { - if ((this.nodeName == 'polygon' || this.nodeName == 'ellipse') && data.alternativeOld) { - convertPrevShape = true; - } - if ((tag == 'polygon' || tag == 'ellipse') && data.alternativeNew) { - convertShape = true; - } - if (this.nodeName == 'polygon' && tag == 'polygon' && data.alternativeOld) { - var prevData = extractElementData(element); - var prevPoints = prevData.attributes.points; - if (!convertEqualSidedPolygons) { - var nPrevPoints = prevPoints.split(' ').length; - var points = data.attributes.points; - var nPoints = points.split(' ').length; - if (nPoints == nPrevPoints) { - convertShape = false; - convertPrevShape = false; - } - } - } - if (convertPrevShape) { - var prevPathData = data.alternativeOld; - var pathElement = replaceElement(element, prevPathData); - pathElement.data([data], function () { - return data.key; - }); - element = pathElement; - } - if (convertShape) { - var newPathData = data.alternativeNew; - tag = 'path'; - attributes = newPathData.attributes; - } - } - var elementTransition = element; - if (transitionInstance) { - elementTransition = elementTransition - .transition(transitionInstance); - if (fade) { - elementTransition - .filter(function(d) { - return d.tag[0] == '#' ? null : this; - }) - .style("opacity", 1.0); - } - elementTransition - .filter(function(d) { - return d.tag[0] == '#' ? null : this; - }) - .on("end", function(d) { - d3.select(this) - .attr('style', (d && d.attributes && d.attributes.style) || null); - }); - } - var growThisPath = growEnteringEdges && tag == 'path' && data.offset; - if (growThisPath) { - var totalLength = data.totalLength; - element - .attr("stroke-dasharray", totalLength + " " + totalLength) - .attr("stroke-dashoffset", totalLength) - .attr('transform', 'translate(' + data.offset.x + ',' + data.offset.y + ')'); - attributes["stroke-dashoffset"] = 0; - attributes['transform'] = 'translate(0,0)'; - elementTransition - .attr("stroke-dashoffset", attributes["stroke-dashoffset"]) - .attr('transform', attributes['transform']) - .on("start", function() { - d3.select(this) - .style('opacity', null); - }) - .on("end", function() { - d3.select(this) - .attr('stroke-dashoffset', null) - .attr('stroke-dasharray', null) - .attr('transform', null); - }); - } - var moveThisPolygon = growEnteringEdges && tag == 'polygon' && isEdgeElement(data) && data.offset && data.parent.children[3].tag == 'path'; - if (moveThisPolygon) { - var edgePath = d3.select(element.node().parentNode.querySelector("path")); - var p0 = edgePath.node().getPointAtLength(0); - var p1 = edgePath.node().getPointAtLength(data.totalLength); - var p2 = edgePath.node().getPointAtLength(data.totalLength - 1); - var angle1 = Math.atan2(p1.y - p2.y, p1.x - p2.x) * 180 / Math.PI; - var x = p0.x - p1.x + data.offset.x; - var y = p0.y - p1.y + data.offset.y; - element - .attr('transform', 'translate(' + x + ',' + y + ')'); - elementTransition - .attrTween("transform", function () { - return function (t) { - var p = edgePath.node().getPointAtLength(data.totalLength * t); - var p2 = edgePath.node().getPointAtLength(data.totalLength * t + 1); - var angle = Math.atan2(p2.y - p.y, p2.x - p.x) * 180 / Math.PI - angle1; - x = p.x - p1.x + data.offset.x * (1 - t); - y = p.y - p1.y + data.offset.y * (1 - t); - return 'translate(' + x + ',' + y + ') rotate(' + angle + ' ' + p1.x + ' ' + p1.y + ')'; - } - }) - .on("start", function() { - d3.select(this) - .style('opacity', null); - }) - .on("end", function() { - d3.select(this).attr('transform', null); - }); - } - var tweenThisPath = tweenPaths && transitionInstance && tag == 'path' && element.attr('d') != null; - for (var attributeName of Object.keys(attributes)) { - var attributeValue = attributes[attributeName]; - if (tweenThisPath && attributeName == 'd') { - var points = (data.alternativeOld || data).points; - if (points) { - elementTransition - .attrTween("d", pathTween(points, attributeValue)); - } - } else { - if (attributeName == 'transform' && data.translation) { - if (transitionInstance) { - var onEnd = elementTransition.on("end"); - elementTransition - .on("start", function () { - if (graphvizInstance._zoomBehavior) { - // Update the transform to transition to, just before the transition starts - // in order to catch changes between the transition scheduling to its start. - elementTransition - .tween("attr.transform", function() { - var node = this; - return function(t) { - node.setAttribute("transform", d3Interpolate.interpolateTransformSvg(d3Zoom.zoomTransform(graphvizInstance._zoomSelection.node()).toString(), getTranslatedZoomTransform.call(graphvizInstance, element).toString())(t)); - }; - }); - } - }) - .on("end", function () { - onEnd.call(this); - // Update the zoom transform to the new translated transform - if (graphvizInstance._zoomBehavior) { - translateZoomBehaviorTransform.call(graphvizInstance, element); - } - }); - } else { - if (graphvizInstance._zoomBehavior) { - // Update the transform attribute to set with the current pan translation - translateZoomBehaviorTransform.call(graphvizInstance, element); - attributeValue = getTranslatedZoomTransform.call(graphvizInstance, element).toString(); - } - } - } - elementTransition - .attr(attributeName, attributeValue); - } - } - if (convertShape) { - elementTransition - .on("end", function (d, i, nodes) { - pathElement = d3.select(this); - var newElement = replaceElement(pathElement, d); - newElement.data([d], function () { - return d.key; - }); - }); - } - if (data.text) { - elementTransition - .text(data.text); - } - insertChildren(element); - } - - var root = this._selection; - - if (transitionInstance != null) { - // Ensure original SVG shape elements are restored after transition before rendering new graph - var jobs = this._jobs; - if (graphvizInstance._active) { - jobs.push(null); - return this; - } else { - root - .transition(transitionInstance) - .transition() - .duration(0) - .on("end" , function () { - graphvizInstance._active = false; - if (jobs.length != 0) { - jobs.shift(); - graphvizInstance.render(); - } - }); - this._active = true; - } - } - - if (transitionInstance != null) { - root - .transition(transitionInstance) - .on("start" , function () { - graphvizInstance._dispatch.call('transitionStart', graphvizInstance); - }) - .on("end" , function () { - graphvizInstance._dispatch.call('transitionEnd', graphvizInstance); - }) - .transition() - .duration(0) - .on("start" , function () { - graphvizInstance._dispatch.call('restoreEnd', graphvizInstance); - graphvizInstance._dispatch.call('end', graphvizInstance); - if (callback) { - callback.call(graphvizInstance); - } - }); - } - - var data = this._data; - - var svg = root - .selectAll("svg") - .data([data], function (d) {return d.key}); - svg = svg - .enter() - .append("svg") - .merge(svg); - - attributeElement.call(svg.node(), data); - - - if (this._options.zoom && !this._zoomBehavior) { - createZoomBehavior.call(this); - } - - graphvizInstance._dispatch.call('renderEnd', graphvizInstance); - - if (transitionInstance == null) { - this._dispatch.call('end', this); - if (callback) { - callback.call(this); - } - } - - return this; - } - - function convertToPathData(originalData, guideData) { - if (originalData.tag == 'polygon') { - var newData = shallowCopyObject(originalData); - newData.tag = 'path'; - var originalAttributes = originalData.attributes; - var newAttributes = shallowCopyObject(originalAttributes); - var newPointsString = originalAttributes.points; - if (guideData.tag == 'polygon') { - var bbox = originalData.bbox; - bbox.cx = bbox.x + bbox.width / 2; - bbox.cy = bbox.y + bbox.height / 2; - var pointsString = originalAttributes.points; - var pointStrings = pointsString.split(' '); - var normPoints = pointStrings.map(function(p) {var xy = p.split(','); return [xy[0] - bbox.cx, xy[1] - bbox.cy]}); - var x0 = normPoints[normPoints.length - 1][0]; - var y0 = normPoints[normPoints.length - 1][1]; - for (var i = 0; i < normPoints.length; i++, x0 = x1, y0 = y1) { - var x1 = normPoints[i][0]; - var y1 = normPoints[i][1]; - var dx = x1 - x0; - var dy = y1 - y0; - if (dy == 0) { - continue; - } else { - var x2 = x0 - y0 * dx / dy; - } - if (0 <= x2 && x2 < Infinity && ((x0 <= x2 && x2 <= x1) || (x1 <= x2 && x2 <= x0))) { - break; - } - } - var newPointStrings = [[bbox.cx + x2, bbox.cy + 0].join(',')]; - newPointStrings = newPointStrings.concat(pointStrings.slice(i)); - newPointStrings = newPointStrings.concat(pointStrings.slice(0, i)); - newPointsString = newPointStrings.join(' '); - } - newAttributes['d'] = 'M' + newPointsString + 'z'; - delete newAttributes.points; - newData.attributes = newAttributes; - } else /* if (originalData.tag == 'ellipse') */ { - var newData = shallowCopyObject(originalData); - newData.tag = 'path'; - var originalAttributes = originalData.attributes; - var newAttributes = shallowCopyObject(originalAttributes); - var cx = originalAttributes.cx; - var cy = originalAttributes.cy; - var rx = originalAttributes.rx; - var ry = originalAttributes.ry; - if (guideData.tag == 'polygon') { - var bbox = guideData.bbox; - bbox.cx = bbox.x + bbox.width / 2; - bbox.cy = bbox.y + bbox.height / 2; - var p = guideData.attributes.points.split(' ')[0].split(','); - var sx = p[0]; - var sy = p[1]; - var dx = sx - bbox.cx; - var dy = sy - bbox.cy; - var l = Math.sqrt(Math.pow(dx, 2) + Math.pow(dy, 2)); - var cosA = dx / l; - var sinA = -dy / l; - } else { // if (guideData.tag == 'path') { - // FIXME: add support for getting start position from path - var cosA = 1; - var sinA = 0; - } - var x1 = rx * cosA; - var y1 = -ry * sinA; - var x2 = rx * (-cosA); - var y2 = -ry * (-sinA); - var dx = x2 - x1; - var dy = y2 - y1; - newAttributes['d'] = 'M ' + cx + ' ' + cy + ' m ' + x1 + ',' + y1 + ' a ' + rx + ',' + ry + ' 0 1,0 ' + dx + ',' + dy + ' a ' + rx + ',' + ry + ' 0 1,0 ' + -dx + ',' + -dy + 'z'; - delete newAttributes.cx; - delete newAttributes.cy; - delete newAttributes.rx; - delete newAttributes.ry; - newData.attributes = newAttributes; - } - return newData; - } - - function translatePointsAttribute(pointsString, x, y) { - var pointStrings = pointsString.split(' '); - var points = pointStrings.map(function(p) {return p.split(',')}); - var points = pointStrings.map(function(p) {return [roundTo2Decimals(+x + +p.split(',')[0]), roundTo2Decimals(+y + +p.split(',')[1])]}); - var pointStrings = points.map(function(p) {return p.join(',')}); - var pointsString = pointStrings.join(' '); - return pointsString; - } - - function translateDAttribute(d, x, y) { - var pointStrings = d.split(/[A-Z ]/); - pointStrings.shift(); - var commands = d.split(/[^[A-Z ]+/); - var points = pointStrings.map(function(p) {return p.split(',')}); - var points = pointStrings.map(function(p) {return [roundTo2Decimals(+x + +p.split(',')[0]), roundTo2Decimals(+y + +p.split(',')[1])]}); - var pointStrings = points.map(function(p) {return p.join(',')}); - d = commands.reduce(function(arr, v, i) { - return arr.concat(v, pointStrings[i]); - }, []).join(''); - return d; - } - - function initViz() { - - // force JIT compilation of Viz.js - try { - wasm.graphviz.layout("", "svg", "dot").then(() => { - wasm.graphvizSync().then((graphviz1) => { - this.layoutSync = graphviz1.layout.bind(graphviz1); - if (this._worker == null) { - this._dispatch.call("initEnd", this); - } - if (this._afterInit) { - this._afterInit(); - } - }); - }); - } catch(error) { - } - if (this._worker != null) { - var vizURL = this._vizURL; - var graphvizInstance = this; - this._workerPort.onmessage = function(event) { - var callback = graphvizInstance._workerCallbacks.shift(); - callback.call(graphvizInstance, event); - }; - if (!vizURL.match(/^https?:\/\/|^\/\//i)) { - // Local URL. Prepend with local domain to be usable in web worker - vizURL = (new window.URL(vizURL, document.location.href)).href; - } - postMessage.call(this, {dot: "", engine: 'dot', vizURL: vizURL}, function(event) { - switch (event.data.type) { - case "init": - graphvizInstance._dispatch.call("initEnd", this); - break; - } - }); - } - } - - function postMessage(message, callback) { - this._workerCallbacks.push(callback); - this._workerPort.postMessage(message); - } - - function layout(src, engine, vizOptions, callback) { - var worker = this._worker; - if (this._worker) { - postMessage.call(this, { - dot: src, - engine: engine, - options: vizOptions, - }, function (event) { - callback.call(this, event.data); - }); - } else { - try { - var svgDoc = this.layoutSync(src, "svg", engine, vizOptions); - callback.call(this, {type: 'done', svg: svgDoc}); - } - catch(error) { - callback.call(this, {type: 'error', error: error.message}); - } - } - } - - function dot(src, callback) { - - var graphvizInstance = this; - var worker = this._worker; - var engine = this._options.engine; - var images = this._images; - - this._dispatch.call("start", this); - this._busy = true; - this._dispatch.call("layoutStart", this); - var vizOptions = { - images: images, - }; - if (!this._worker && this.layoutSync == null) { - this._afterInit = this.dot.bind(this, src, callback); - return this; - } - this.layout(src, engine, vizOptions, function (data) { - switch (data.type) { - case "error": - if (graphvizInstance._onerror) { - graphvizInstance._onerror(data.error); - } else { - throw data.error.message - } - break; - case "done": - var svgDoc = data.svg; - layoutDone.call(this, svgDoc, callback); - break; - } - }); - - return this; - } - function layoutDone(svgDoc, callback) { - var keyMode = this._options.keyMode; - var tweenPaths = this._options.tweenPaths; - var tweenShapes = this._options.tweenShapes; - if (typeof this._options.tweenPrecision == 'string' && this._options.tweenPrecision.includes('%')) { - var tweenPrecision = +this._options.tweenPrecision.split('%')[0] / 100; - var tweenPrecisionIsRelative = this._options.tweenPrecision.includes('%'); - } else { - var tweenPrecision = this._options.tweenPrecision; - var tweenPrecisionIsRelative = false; - } - var growEnteringEdges = this._options.growEnteringEdges; - var dictionary = {}; - var prevDictionary = this._dictionary || {}; - var nodeDictionary = {}; - var prevNodeDictionary = this._nodeDictionary || {}; - - function setKey(datum, index) { - var tag = datum.tag; - if (keyMode == 'index') { - datum.key = index; - } else if (tag[0] != '#') { - if (keyMode == 'id') { - datum.key = datum.attributes.id; - } else if (keyMode == 'title') { - var title = datum.children.find(function (childData) { - return childData.tag == 'title'; - }); - if (title) { - if (title.children.length > 0) { - datum.key = title.children[0].text; - } else { - datum.key = ''; - } - } - } - } - if (datum.key == null) { - if (tweenShapes) { - if (tag == 'ellipse' || tag == 'polygon') { - tag = 'path'; - } - } - datum.key = tag + '-' + index; - } - } - - function setId(datum, parentData) { - var id = (parentData ? parentData.id + '.' : '') + datum.key; - datum.id = id; - } - - function addToDictionary(datum) { - dictionary[datum.id] = datum; - } - - function calculateAlternativeShapeData(datum, prevDatum) { - if (tweenShapes && datum.id in prevDictionary) { - if ((prevDatum.tag == 'polygon' || prevDatum.tag == 'ellipse' || prevDatum.tag == 'path') && (prevDatum.tag != datum.tag || datum.tag == 'polygon')) { - if (prevDatum.tag != 'path') { - datum.alternativeOld = convertToPathData(prevDatum, datum); - } - if (datum.tag != 'path') { - datum.alternativeNew = convertToPathData(datum, prevDatum); - } - } - } - } - - function calculatePathTweenPoints(datum, prevDatum) { - if (tweenPaths && prevDatum && (prevDatum.tag == 'path' || (datum.alternativeOld && datum.alternativeOld.tag == 'path'))) { - var attribute_d = (datum.alternativeNew || datum).attributes.d; - if (datum.alternativeOld) { - var oldNode = createElementWithAttributes(datum.alternativeOld); - } else { - var oldNode = createElementWithAttributes(prevDatum); - } - (datum.alternativeOld || (datum.alternativeOld = {})).points = pathTweenPoints(oldNode, attribute_d, tweenPrecision, tweenPrecisionIsRelative); - } - } - - function postProcessDataPass1Local(datum, index=0, parentData) { - setKey(datum, index); - setId(datum, parentData); - var id = datum.id; - var prevDatum = prevDictionary[id]; - addToDictionary(datum); - calculateAlternativeShapeData(datum, prevDatum); - calculatePathTweenPoints(datum, prevDatum); - var childTagIndexes = {}; - datum.children.forEach(function (childData) { - var childTag = childData.tag; - if (childTag == 'ellipse' || childTag == 'polygon') { - childTag = 'path'; - } - if (childTagIndexes[childTag] == null) { - childTagIndexes[childTag] = 0; - } - var childIndex = childTagIndexes[childTag]++; - postProcessDataPass1Local(childData, childIndex, datum); - }); - } - - function addToNodeDictionary(datum) { - var tag = datum.tag; - if (growEnteringEdges && datum.parent) { - if (datum.parent.attributes.class == 'node') { - if (tag == 'title') { - if (datum.children.length > 0) { - var child = datum.children[0]; - var nodeId = child.text; - } else { - var nodeId = ''; - } - nodeDictionary[nodeId] = datum.parent; - } - } - } - } - - function extractGrowingEdgesData(datum) { - var id = datum.id; - var tag = datum.tag; - var prevDatum = prevDictionary[id]; - if (growEnteringEdges && !prevDatum && datum.parent) { - if (isEdgeElement(datum)) { - if (tag == 'path' || tag == 'polygon') { - if (tag == 'polygon') { - var path = datum.parent.children.find(function (e) { - return e.tag == 'path'; - }); - if (path) { - datum.totalLength = path.totalLength; - } - } - var title = getEdgeTitle(datum); - var child = title.children[0]; - var nodeIds = child.text.split('->'); - if (nodeIds.length != 2) { - nodeIds = child.text.split('--'); - } - var startNodeId = nodeIds[0]; - var startNode = nodeDictionary[startNodeId]; - var prevStartNode = prevNodeDictionary[startNodeId]; - if (prevStartNode) { - var i = startNode.children.findIndex(function (element, index) { - return element.tag == 'g'; - }); - if (i >= 0) { - var j = startNode.children[i].children.findIndex(function (element, index) { - return element.tag == 'a'; - }); - startNode = startNode.children[i].children[j]; - } - var i = prevStartNode.children.findIndex(function (element, index) { - return element.tag == 'g'; - }); - if (i >= 0) { - var j = prevStartNode.children[i].children.findIndex(function (element, index) { - return element.tag == 'a'; - }); - prevStartNode = prevStartNode.children[i].children[j]; - } - var startShapes = startNode.children; - for (var i = 0; i < startShapes.length; i++) { - if (startShapes[i].tag == 'polygon' || startShapes[i].tag == 'ellipse' || startShapes[i].tag == 'path' || startShapes[i].tag == 'text') { - var startShape = startShapes[i]; - break; - } - } - var prevStartShapes = prevStartNode.children; - for (var i = 0; i < prevStartShapes.length; i++) { - if (prevStartShapes[i].tag == 'polygon' || prevStartShapes[i].tag == 'ellipse' || prevStartShapes[i].tag == 'path' || prevStartShapes[i].tag == 'text') { - var prevStartShape = prevStartShapes[i]; - break; - } - } - if (prevStartShape && startShape) { - datum.offset = { - x: prevStartShape.center.x - startShape.center.x, - y: prevStartShape.center.y - startShape.center.y, - }; - } else { - datum.offset = {x: 0, y: 0}; - } - } - } - } - } - } - - function postProcessDataPass2Global(datum) { - addToNodeDictionary(datum); - extractGrowingEdgesData(datum); - datum.children.forEach(function (childData) { - postProcessDataPass2Global(childData); - }); - } - - this._dispatch.call("layoutEnd", this); - - var newDoc = d3.select(document.createDocumentFragment()) - .append('div'); - - var parser = new window.DOMParser(); - var doc = parser.parseFromString(svgDoc, "image/svg+xml"); - - newDoc - .append(function() { - return doc.documentElement; - }); - - var newSvg = newDoc - .select('svg'); - - var data = extractAllElementsData(newSvg); - this._dispatch.call('dataExtractEnd', this); - postProcessDataPass1Local(data); - this._dispatch.call('dataProcessPass1End', this); - postProcessDataPass2Global(data); - this._dispatch.call('dataProcessPass2End', this); - this._data = data; - this._dictionary = dictionary; - this._nodeDictionary = nodeDictionary; - - this._extractData = function (element, childIndex, parentData) { - var data = extractAllElementsData(element); - postProcessDataPass1Local(data, childIndex, parentData); - postProcessDataPass2Global(data); - return data; - }; - this._busy = false; - this._dispatch.call('dataProcessEnd', this); - if (callback) { - callback.call(this); - } - if (this._queue.length > 0) { - var job = this._queue.shift(); - job.call(this); - } - } - - function renderDot(src, callback) { - - var graphvizInstance = this; - - this - .dot(src, render); - - function render() { - graphvizInstance - .render(callback); - } - - return this; - } - - function transition(name) { - - if (name instanceof Function) { - this._transitionFactory = name; - } else { - this._transition = d3Transition.transition(name); - } - - return this; - } - function active(name) { - - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - if (svg.size() != 0) { - return d3Transition.active(svg.node(), name); - } else { - return null; - } - } - - function options(options) { - - if (typeof options == 'undefined') { - return Object.assign({}, this._options); - } else { - for (var option of Object.keys(options)) { - this._options[option] = options[option]; - } - return this; - } - } - - function width(width) { - - this._options.width = width; - - return this; - } - - function height(height) { - - this._options.height = height; - - return this; - } - - function scale(scale) { - - this._options.scale = scale; - - return this; - } - - function fit(fit) { - - this._options.fit = fit; - - return this; - } - - function attributer(callback) { - - this._attributer = callback; - - return this; - } - - function engine(engine) { - - this._options.engine = engine; - - return this; - } - - function images(path, width, height) { - - this._images.push({path:path, width: width, height:height}); - - return this; - } - - function keyMode(keyMode) { - - if (!this._keyModes.has(keyMode)) { - throw Error('Illegal keyMode: ' + keyMode); - } - if (keyMode != this._options.keyMode && this._data != null) { - throw Error('Too late to change keyMode'); - } - this._options.keyMode = keyMode; - - return this; - } - - function fade(enable) { - - this._options.fade = enable; - - return this; - } - - function tweenPaths(enable) { - - this._options.tweenPaths = enable; - - return this; - } - - function tweenShapes(enable) { - - this._options.tweenShapes = enable; - if (enable) { - this._options.tweenPaths = true; - } - - return this; - } - - function convertEqualSidedPolygons(enable) { - - this._options.convertEqualSidedPolygons = enable; - - return this; - } - - function tweenPrecision(precision) { - - this._options.tweenPrecision = precision; - - return this; - } - - function growEnteringEdges(enable) { - - this._options.growEnteringEdges = enable; - - return this; - } - - function on(typenames, callback) { - - this._dispatch.on(typenames, callback); - - return this; - } - - function onerror(callback) { - - this._onerror = callback; - - return this; - } - - function logEvents(enable) { - - var t0 = Date.now(); - var times = {}; - var eventTypes = this._eventTypes; - var maxEventTypeLength = Math.max(...(eventTypes.map(eventType => eventType.length))); - for (let i = 0; i < eventTypes.length; i++) { - let eventType = eventTypes[i]; - times[eventType] = []; - var graphvizInstance = this; - var expectedDelay; - var expectedDuration; - this - .on(eventType + '.log', enable ? function () { - var t = Date.now(); - var seqNo = times[eventType].length; - times[eventType].push(t); - var string = ''; - string += 'Event '; - string += d3Format.format(' >2')(i) + ' '; - string += eventType + ' '.repeat(maxEventTypeLength - eventType.length); - string += d3Format.format(' >5')(t - t0) + ' '; - if (eventType != 'initEnd') { - string += d3Format.format(' >5')(t - times['start'][seqNo]); - } - if (eventType == 'dataProcessEnd') { - string += ' prepare ' + d3Format.format(' >5')((t - times['layoutEnd'][seqNo])); - } - if (eventType == 'renderEnd' && graphvizInstance._transition) { - string += ' transition start margin ' + d3Format.format(' >5')(graphvizInstance._transition.delay() - (t - times['renderStart'][seqNo])); - expectedDelay = graphvizInstance._transition.delay(); - expectedDuration = graphvizInstance._transition.duration(); - } - if (eventType == 'transitionStart') { - var actualDelay = (t - times['renderStart'][seqNo]); - string += ' transition delay ' + d3Format.format(' >5')(t - times['renderStart'][seqNo]); - string += ' expected ' + d3Format.format(' >5')(expectedDelay); - string += ' diff ' + d3Format.format(' >5')(actualDelay - expectedDelay); - } - if (eventType == 'transitionEnd') { - var actualDuration = t - times['transitionStart'][seqNo]; - string += ' transition duration ' + d3Format.format(' >5')(actualDuration); - string += ' expected ' + d3Format.format(' >5')(expectedDuration); - string += ' diff ' + d3Format.format(' >5')(actualDuration - expectedDuration); - } - console.log(string); - t0 = t; - } : null); - } - return this; - } - - function destroy() { - - delete this._selection.node().__graphviz__; - if (this._worker) { - this._workerPortClose(); - } - return this; - } - - function rotate(x, y, cosA, sinA) { - // (x + j * y) * (cosA + j * sinA) = x * cosA - y * sinA + j * (x * sinA + y * cosA) - y = -y; - sinA = -sinA; - [x, y] = [x * cosA - y * sinA, x * sinA + y * cosA]; - y = -y; - return [x, y]; - } - - function drawEdge(x1, y1, x2, y2, attributes, options={}) { - attributes = Object.assign({}, attributes); - if (attributes.style && attributes.style.includes('invis')) { - var newEdge = d3.select(null); - } else { - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - var graph0 = svg.selectWithoutDataPropagation("g"); - var newEdge0 = createEdge.call(this, attributes); - var edgeData = extractAllElementsData(newEdge0); - var newEdge = graph0.append('g') - .data([edgeData]); - attributeElement.call(newEdge.node(), edgeData); - _updateEdge.call(this, newEdge, x1, y1, x2, y2, attributes, options); - } - this._drawnEdge = { - g: newEdge, - x1: x1, - y1: y1, - x2: x2, - y2: y2, - attributes: attributes, - }; - - return this; - } - - function updateDrawnEdge(x1, y1, x2, y2, attributes={}, options={}) { - if (!this._drawnEdge) { - throw Error('No edge has been drawn'); - } - var edge = this._drawnEdge.g; - attributes = Object.assign(this._drawnEdge.attributes, attributes); - this._drawnEdge.x1 = x1; - this._drawnEdge.y1 = y1; - this._drawnEdge.x2 = x2; - this._drawnEdge.y2 = y2; - if (edge.empty() && !(attributes.style && attributes.style.includes('invis'))) { - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - var graph0 = svg.selectWithoutDataPropagation("g"); - var edge = graph0.append('g'); - this._drawnEdge.g = edge; - } - if (!edge.empty()) { - _updateEdge.call(this, edge, x1, y1, x2, y2, attributes, options); - } - - return this; - } - - function _updateEdge(edge, x1, y1, x2, y2, attributes, options) { - - var newEdge = createEdge.call(this, attributes); - var edgeData = extractAllElementsData(newEdge); - edge.data([edgeData]); - attributeElement.call(edge.node(), edgeData); - _moveEdge(edge, x1, y1, x2, y2, attributes, options); - } - - function _moveEdge(edge, x1, y1, x2, y2, attributes, options) { - - var shortening = options.shortening || 0; - var arrowHeadLength = 10; - var arrowHeadWidth = 7; - var margin = 0.1; - - var arrowHeadPoints = [ - [0, -arrowHeadWidth / 2], - [arrowHeadLength, 0], - [0, arrowHeadWidth / 2], - [0, -arrowHeadWidth / 2], - ]; - - var dx = x2 - x1; - var dy = y2 - y1; - var length = Math.sqrt(dx * dx + dy * dy); - if (length == 0) { - var cosA = 1; - var sinA = 0; - } else { - var cosA = dx / length; - var sinA = dy / length; - } - x2 = x1 + (length - shortening - arrowHeadLength - margin) * cosA; - y2 = y1 + (length - shortening - arrowHeadLength - margin) * sinA; - - if (attributes.URL || attributes.tooltip) { - var a = edge.selectWithoutDataPropagation("g").selectWithoutDataPropagation("a"); - var line = a.selectWithoutDataPropagation("path"); - var arrowHead = a.selectWithoutDataPropagation("polygon"); - } else { - var line = edge.selectWithoutDataPropagation("path"); - var arrowHead = edge.selectWithoutDataPropagation("polygon"); - } - - var path1 = d3Path.path(); - path1.moveTo(x1, y1); - path1.lineTo(x2, y2); - - line - .attr("d", path1); - - x2 = x1 + (length - shortening - arrowHeadLength) * cosA; - y2 = y1 + (length - shortening - arrowHeadLength) * sinA; - for (var i = 0; i < arrowHeadPoints.length; i++) { - var point = arrowHeadPoints[i]; - arrowHeadPoints[i] = rotate(point[0], point[1], cosA, sinA); - } - for (var i = 0; i < arrowHeadPoints.length; i++) { - var point = arrowHeadPoints[i]; - arrowHeadPoints[i] = [x2 + point[0], y2 + point[1]]; - } - var allPoints = []; - for (var i = 0; i < arrowHeadPoints.length; i++) { - var point = arrowHeadPoints[i]; - allPoints.push(point.join(',')); - } - var pointsAttr = allPoints.join(' '); - - arrowHead - .attr("points", pointsAttr); - - return this; - } - - function moveDrawnEdgeEndPoint(x2, y2, options={}) { - - if (!this._drawnEdge) { - throw Error('No edge has been drawn'); - } - var edge = this._drawnEdge.g; - var x1 = this._drawnEdge.x1; - var y1 = this._drawnEdge.y1; - var attributes = this._drawnEdge.attributes; - - this._drawnEdge.x2 = x2; - this._drawnEdge.y2 = y2; - _moveEdge(edge, x1, y1, x2, y2, attributes, options); - - return this - } - - function removeDrawnEdge() { - - if (!this._drawnEdge) { - return this; - } - - var edge = this._drawnEdge.g; - - edge.remove(); - - this._drawnEdge = null; - - return this - } - - function insertDrawnEdge(name) { - - if (!this._drawnEdge) { - throw Error('No edge has been drawn'); - } - - var edge = this._drawnEdge.g; - if (edge.empty()) { - return this; - } - var attributes = this._drawnEdge.attributes; - - var title = edge.selectWithoutDataPropagation("title"); - title - .text(name); - - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - var graph0 = svg.selectWithoutDataPropagation("g"); - var graph0Datum = graph0.datum(); - var edgeData = this._extractData(edge, graph0Datum.children.length, graph0.datum()); - graph0Datum.children.push(edgeData); - - insertAllElementsData(edge, edgeData); - - this._drawnEdge = null; - - return this - - } - - function drawnEdgeSelection() { - - if (this._drawnEdge) { - return this._drawnEdge.g; - } else { - return d3.select(null); - } - - } - - - function createEdge(attributes) { - var attributesString = ''; - for (var name of Object.keys(attributes)) { - if (attributes[name] != null) { - attributesString += ' "' + name + '"="' + attributes[name] + '"'; - } - } - var dotSrc = 'digraph {a -> b [' + attributesString + ']}'; - var svgDoc = this.layoutSync(dotSrc, 'svg', 'dot'); - var parser = new window.DOMParser(); - var doc = parser.parseFromString(svgDoc, "image/svg+xml"); - var newDoc = d3.select(document.createDocumentFragment()) - .append(function() { - return doc.documentElement; - }); - var edge = newDoc.select('.edge'); - - return edge; - } - - function drawNode(x, y, nodeId, attributes={}, options={}) { - attributes = Object.assign({}, attributes); - if (attributes.style && attributes.style.includes('invis')) { - var newNode = d3.select(null); - } else { - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - var graph0 = svg.selectWithoutDataPropagation("g"); - var newNode0 = createNode.call(this, nodeId, attributes); - var nodeData = extractAllElementsData(newNode0); - var newNode = graph0.append('g') - .data([nodeData]); - attributeElement.call(newNode.node(), nodeData); - _updateNode.call(this, newNode, x, y, nodeId, attributes, options); - } - this._drawnNode = { - g: newNode, - nodeId: nodeId, - x: x, - y: y, - attributes: attributes, - }; - - return this; - } - - function updateDrawnNode(x, y, nodeId, attributes={}, options={}) { - if (!this._drawnNode) { - throw Error('No node has been drawn'); - } - - var node = this._drawnNode.g; - if (nodeId == null) { - nodeId = this._drawnNode.nodeId; - } - attributes = Object.assign(this._drawnNode.attributes, attributes); - this._drawnNode.nodeId = nodeId; - this._drawnNode.x = x; - this._drawnNode.y = y; - if (node.empty() && !(attributes.style && attributes.style.includes('invis'))) { - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - var graph0 = svg.selectWithoutDataPropagation("g"); - var node = graph0.append('g'); - this._drawnNode.g = node; - } - if (!node.empty()) { - _updateNode.call(this, node, x, y, nodeId, attributes, options); - } - - return this; - } - - function _updateNode(node, x, y, nodeId, attributes, options) { - - var newNode = createNode.call(this, nodeId, attributes); - var nodeData = extractAllElementsData(newNode); - node.data([nodeData]); - attributeElement.call(node.node(), nodeData); - _moveNode(node, x, y, attributes); - - return this; - } - - function _moveNode(node, x, y, attributes, options) { - if (attributes.URL || attributes.tooltip) { - var subParent = node.selectWithoutDataPropagation("g").selectWithoutDataPropagation("a"); - } else { - var subParent = node; - } - var svgElements = subParent.selectAll('ellipse,polygon,path,polyline'); - var text = node.selectWithoutDataPropagation("text"); - - if (svgElements.size() != 0) { - var bbox = svgElements.node().getBBox(); - bbox.cx = bbox.x + bbox.width / 2; - bbox.cy = bbox.y + bbox.height / 2; - } else if (text.size() != 0) { - bbox = { - x: +text.attr('x'), - y: +text.attr('y'), - width: 0, - height: 0, - cx: +text.attr('x'), - cy: +text.attr('y'), - }; - } - svgElements.each(function(data, index) { - var svgElement = d3.select(this); - if (svgElement.attr("cx")) { - svgElement - .attr("cx", roundTo2Decimals(x)) - .attr("cy", roundTo2Decimals(y)); - } else if (svgElement.attr("points")) { - var pointsString = svgElement.attr('points').trim(); - svgElement - .attr("points", translatePointsAttribute(pointsString, x - bbox.cx, y - bbox.cy)); - } else { - var d = svgElement.attr('d'); - svgElement - .attr("d", translateDAttribute(d, x - bbox.cx, y - bbox.cy)); - } - }); - - if (text.size() != 0) { - text - .attr("x", roundTo2Decimals(+text.attr("x") + x - bbox.cx)) - .attr("y", roundTo2Decimals(+text.attr("y") + y - bbox.cy)); - } - return this; - } - - function moveDrawnNode(x, y, options={}) { - - if (!this._drawnNode) { - throw Error('No node has been drawn'); - } - var node = this._drawnNode.g; - var attributes = this._drawnNode.attributes; - - this._drawnNode.x = x; - this._drawnNode.y = y; - - if (!node.empty()) { - _moveNode(node, x, y, attributes); - } - - return this - } - - function removeDrawnNode() { - - if (!this._drawnNode) { - return this; - } - - var node = this._drawnNode.g; - - if (!node.empty()) { - node.remove(); - } - - this._drawnNode = null; - - return this - } - - function insertDrawnNode(nodeId) { - - if (!this._drawnNode) { - throw Error('No node has been drawn'); - } - - if (nodeId == null) { - nodeId = this._drawnNode.nodeId; - } - var node = this._drawnNode.g; - if (node.empty()) { - return this; - } - var attributes = this._drawnNode.attributes; - - var title = node.selectWithoutDataPropagation("title"); - title - .text(nodeId); - if (attributes.URL || attributes.tooltip) { - var ga = node.selectWithoutDataPropagation("g"); - var a = ga.selectWithoutDataPropagation("a"); - var svgElement = a.selectWithoutDataPropagation('ellipse,polygon,path,polyline'); - var text = a.selectWithoutDataPropagation('text'); - } else { - var svgElement = node.selectWithoutDataPropagation('ellipse,polygon,path,polyline'); - var text = node.selectWithoutDataPropagation('text'); - } - text - .text(attributes.label || nodeId); - - var root = this._selection; - var svg = root.selectWithoutDataPropagation("svg"); - var graph0 = svg.selectWithoutDataPropagation("g"); - var graph0Datum = graph0.datum(); - var nodeData = this._extractData(node, graph0Datum.children.length, graph0.datum()); - graph0Datum.children.push(nodeData); - - insertAllElementsData(node, nodeData); - - this._drawnNode = null; - - return this - - } - - function drawnNodeSelection() { - - if (this._drawnNode) { - return this._drawnNode.g; - } else { - return d3.select(null); - } - - } - - function createNode(nodeId, attributes) { - var attributesString = ''; - for (var name of Object.keys(attributes)) { - if (attributes[name] != null) { - attributesString += ' "' + name + '"="' + attributes[name] + '"'; - } - } - var dotSrc = 'graph {"' + nodeId + '" [' + attributesString + ']}'; - var svgDoc = this.layoutSync(dotSrc, 'svg', 'dot'); - var parser = new window.DOMParser(); - var doc = parser.parseFromString(svgDoc, "image/svg+xml"); - var newDoc = d3.select(document.createDocumentFragment()) - .append(function() { - return doc.documentElement; - }); - var node = newDoc.select('.node'); - - return node; - } - - /* This file is excluded from coverage because the intrumented code - * translates "self" which gives a reference error. - */ - - /* istanbul ignore next */ - - function workerCodeBody(port) { - - self.document = {}; // Workaround for "ReferenceError: document is not defined" in hpccWasm - - port.addEventListener('message', function(event) { - let hpccWasm = self["@hpcc-js/wasm"]; - if (hpccWasm == undefined && event.data.vizURL) { - importScripts(event.data.vizURL); - hpccWasm = self["@hpcc-js/wasm"]; - hpccWasm.wasmFolder(event.data.vizURL.match(/.*\//)[0]); - // This is an alternative workaround where wasmFolder() is not needed - // document = {currentScript: {src: event.data.vizURL}}; - } - hpccWasm.graphviz.layout(event.data.dot, "svg", event.data.engine, event.data.options).then((svg) => { - if (svg) { - port.postMessage({ - type: "done", - svg: svg, - }); - } else if (event.data.vizURL) { - port.postMessage({ - type: "init", - }); - } else { - port.postMessage({ - type: "skip", - }); - } - }).catch(error => { - port.postMessage({ - type: "error", - error: error.message, - }); - }); - }); - } - - /* istanbul ignore next */ - - function workerCode() { - - const port = self; - workerCodeBody(port); - } - - /* istanbul ignore next */ - - function sharedWorkerCode() { - self.onconnect = function(e) { - const port = e.ports[0]; - workerCodeBody(port); - port.start(); - }; - } - - function Graphviz(selection, options) { - this._options = { - useWorker: true, - useSharedWorker: false, - engine: 'dot', - keyMode: 'title', - fade: true, - tweenPaths: true, - tweenShapes: true, - convertEqualSidedPolygons: true, - tweenPrecision: 1, - growEnteringEdges: true, - zoom: true, - zoomScaleExtent: [0.1, 10], - zoomTranslateExtent: [[-Infinity, -Infinity], [+Infinity, +Infinity]], - width: null, - height: null, - scale: 1, - fit: false, - }; - if (options instanceof Object) { - for (var option of Object.keys(options)) { - this._options[option] = options[option]; - } - } else if (typeof options == 'boolean') { - this._options.useWorker = options; - } - var useWorker = this._options.useWorker; - var useSharedWorker = this._options.useSharedWorker; - if (typeof Worker == 'undefined') { - useWorker = false; - } - if (typeof SharedWorker == 'undefined') { - useSharedWorker = false; - } - if (useWorker || useSharedWorker) { - var scripts = d3.selectAll('script'); - var vizScript = scripts.filter(function() { - return d3.select(this).attr('type') == 'javascript/worker' || (d3.select(this).attr('src') && d3.select(this).attr('src').match(/.*\/@hpcc-js\/wasm/)); - }); - if (vizScript.size() == 0) { - console.warn('No script tag of type "javascript/worker" was found and "useWorker" is true. Not using web worker.'); - useWorker = false; - useSharedWorker = false; - } else { - this._vizURL = vizScript.attr('src'); - if (!this._vizURL) { - console.warn('No "src" attribute of was found on the "javascript/worker" script tag and "useWorker" is true. Not using web worker.'); - useWorker = false; - useSharedWorker = false; - } - } - } - if (useSharedWorker) { - const url = 'data:application/javascript;base64,' + btoa(workerCodeBody.toString() + '(' + sharedWorkerCode.toString() + ')()'); - this._worker = this._worker = new SharedWorker(url); - this._workerPort = this._worker.port; - this._workerPortClose = this._worker.port.close.bind(this._workerPort); - this._worker.port.start(); - this._workerCallbacks = []; - } - else if (useWorker) { - var blob = new Blob([workerCodeBody.toString() + '(' + workerCode.toString() + ')()']); - var blobURL = window.URL.createObjectURL(blob); - this._worker = new Worker(blobURL); - this._workerPort = this._worker; - this._workerPortClose = this._worker.terminate.bind(this._worker); - this._workerCallbacks = []; - } - this._selection = selection; - this._active = false; - this._busy = false; - this._jobs = []; - this._queue = []; - this._keyModes = new Set([ - 'title', - 'id', - 'tag-index', - 'index' - ]); - this._images = []; - this._translation = undefined; - this._scale = undefined; - this._eventTypes = [ - 'initEnd', - 'start', - 'layoutStart', - 'layoutEnd', - 'dataExtractEnd', - 'dataProcessPass1End', - 'dataProcessPass2End', - 'dataProcessEnd', - 'renderStart', - 'renderEnd', - 'transitionStart', - 'transitionEnd', - 'restoreEnd', - 'end' - ]; - this._dispatch = d3Dispatch.dispatch(...this._eventTypes); - initViz.call(this); - selection.node().__graphviz__ = this; - } - - function graphviz(selector, options) { - var g = d3.select(selector).graphviz(options); - return g; - } - - Graphviz.prototype = graphviz.prototype = { - constructor: Graphviz, - engine: engine, - addImage: images, - keyMode: keyMode, - fade: fade, - tweenPaths: tweenPaths, - tweenShapes: tweenShapes, - convertEqualSidedPolygons: convertEqualSidedPolygons, - tweenPrecision: tweenPrecision, - growEnteringEdges: growEnteringEdges, - zoom: zoom, - resetZoom: resetZoom, - zoomBehavior: zoomBehavior, - zoomSelection: zoomSelection, - zoomScaleExtent: zoomScaleExtent, - zoomTranslateExtent: zoomTranslateExtent, - render: render, - layout: layout, - dot: dot, - data: data, - renderDot: renderDot, - transition: transition, - active: active, - options: options, - width: width, - height: height, - scale: scale, - fit: fit, - attributer: attributer, - on: on, - onerror: onerror, - logEvents: logEvents, - destroy: destroy, - drawEdge: drawEdge, - updateDrawnEdge: updateDrawnEdge, - moveDrawnEdgeEndPoint, - insertDrawnEdge, - removeDrawnEdge, removeDrawnEdge, - drawnEdgeSelection, drawnEdgeSelection, - drawNode: drawNode, - updateDrawnNode: updateDrawnNode, - moveDrawnNode: moveDrawnNode, - insertDrawnNode, - removeDrawnNode, removeDrawnNode, - drawnNodeSelection, drawnNodeSelection, - }; - - function selection_graphviz(options) { - - var g = this.node().__graphviz__; - if (g) { - g.options(options); - // Ensure a possible new initEnd event handler is attached before calling it - d3Timer.timeout(function () { - g._dispatch.call("initEnd", this); - }.bind(this), 0); - } else { - g = new Graphviz(this, options); - } - return g; - } - - function selection_selectWithoutDataPropagation(name) { - - return d3.select(this.size() > 0 ? this.node().querySelector(name) : null); - } - - d3.selection.prototype.graphviz = selection_graphviz; - d3.selection.prototype.selectWithoutDataPropagation = selection_selectWithoutDataPropagation; - - exports.graphviz = graphviz; - - Object.defineProperty(exports, '__esModule', { value: true }); - -}))); -//# sourceMappingURL=d3-graphviz.js.map diff --git a/blueprint/web/js/d3.min.js b/blueprint/web/js/d3.min.js deleted file mode 100644 index 057c25890..000000000 --- a/blueprint/web/js/d3.min.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://d3js.org v5.15.0 Copyright 2019 Mike Bostock -!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t=t||self).d3=t.d3||{})}(this,function(t){"use strict";function n(t,n){return tn?1:t>=n?0:NaN}function e(t){var e;return 1===t.length&&(e=t,t=function(t,r){return n(e(t),r)}),{left:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)<0?r=o+1:i=o}return r},right:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)>0?i=o:r=o+1}return r}}}var r=e(n),i=r.right,o=r.left;function a(t,n){return[t,n]}function u(t){return null===t?NaN:+t}function c(t,n){var e,r,i=t.length,o=0,a=-1,c=0,f=0;if(null==n)for(;++a1)return f/(o-1)}function f(t,n){var e=c(t,n);return e?Math.sqrt(e):e}function s(t,n){var e,r,i,o=t.length,a=-1;if(null==n){for(;++a=e)for(r=i=e;++ae&&(r=e),i=e)for(r=i=e;++ae&&(r=e),i0)return[t];if((r=n0)for(t=Math.ceil(t/a),n=Math.floor(n/a),o=new Array(i=Math.ceil(n-t+1));++u=0?(o>=y?10:o>=_?5:o>=b?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=y?10:o>=_?5:o>=b?2:1)}function w(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=y?i*=10:o>=_?i*=5:o>=b&&(i*=2),n=1)return+e(t[r-1],r-1,t);var r,i=(r-1)*n,o=Math.floor(i),a=+e(t[o],o,t);return a+(+e(t[o+1],o+1,t)-a)*(i-o)}}function T(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++or&&(r=e)}else for(;++o=e)for(r=e;++or&&(r=e);return r}function A(t){for(var n,e,r,i=t.length,o=-1,a=0;++o=0;)for(n=(r=t[i]).length;--n>=0;)e[--a]=r[n];return e}function S(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++oe&&(r=e)}else for(;++o=e)for(r=e;++oe&&(r=e);return r}function k(t){if(!(i=t.length))return[];for(var n=-1,e=S(t,E),r=new Array(e);++n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}})}function V(t,n){for(var e,r=0,i=t.length;r0)for(var e,r,i=new Array(e),o=0;o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),$.hasOwnProperty(n)?{space:$[n],local:t}:t}function Z(t){var n=W(t);return(n.local?function(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}:function(t){return function(){var n=this.ownerDocument,e=this.namespaceURI;return e===G&&n.documentElement.namespaceURI===G?n.createElement(t):n.createElementNS(e,t)}})(n)}function Q(){}function K(t){return null==t?Q:function(){return this.querySelector(t)}}function J(){return[]}function tt(t){return null==t?J:function(){return this.querySelectorAll(t)}}function nt(t){return function(){return this.matches(t)}}function et(t){return new Array(t.length)}function rt(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}rt.prototype={constructor:rt,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var it="$";function ot(t,n,e,r,i,o){for(var a,u=0,c=n.length,f=o.length;un?1:t>=n?0:NaN}function ct(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function ft(t,n){return t.style.getPropertyValue(n)||ct(t).getComputedStyle(t,null).getPropertyValue(n)}function st(t){return t.trim().split(/^|\s+/)}function lt(t){return t.classList||new ht(t)}function ht(t){this._node=t,this._names=st(t.getAttribute("class")||"")}function dt(t,n){for(var e=lt(t),r=-1,i=n.length;++r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var Mt={};(t.event=null,"undefined"!=typeof document)&&("onmouseenter"in document.documentElement||(Mt={mouseenter:"mouseover",mouseleave:"mouseout"}));function Nt(t,n,e){return t=Tt(t,n,e),function(n){var e=n.relatedTarget;e&&(e===this||8&e.compareDocumentPosition(this))||t.call(this,n)}}function Tt(n,e,r){return function(i){var o=t.event;t.event=i;try{n.call(this,this.__data__,e,r)}finally{t.event=o}}}function At(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=m&&(m=b+1);!(_=g[m])&&++m=0;)(r=i[o])&&(a&&4^r.compareDocumentPosition(a)&&a.parentNode.insertBefore(r,a),a=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=ut);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?function(t){return function(){this.style.removeProperty(t)}}:"function"==typeof n?function(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}:function(t,n,e){return function(){this.style.setProperty(t,n,e)}})(t,n,null==e?"":e)):ft(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?function(t){return function(){delete this[t]}}:"function"==typeof n?function(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}:function(t,n){return function(){this[t]=n}})(t,n)):this.node()[t]},classed:function(t,n){var e=st(t+"");if(arguments.length<2){for(var r=lt(this.node()),i=-1,o=e.length;++i=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}})}(t+""),a=o.length;if(!(arguments.length<2)){for(u=n?St:At,null==e&&(e=!1),r=0;r>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1):8===e?new bn(n>>24&255,n>>16&255,n>>8&255,(255&n)/255):4===e?new bn(n>>12&15|n>>8&240,n>>8&15|n>>4&240,n>>4&15|240&n,((15&n)<<4|15&n)/255):null):(n=on.exec(t))?new bn(n[1],n[2],n[3],1):(n=an.exec(t))?new bn(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=un.exec(t))?gn(n[1],n[2],n[3],n[4]):(n=cn.exec(t))?gn(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=fn.exec(t))?Mn(n[1],n[2]/100,n[3]/100,1):(n=sn.exec(t))?Mn(n[1],n[2]/100,n[3]/100,n[4]):ln.hasOwnProperty(t)?vn(ln[t]):"transparent"===t?new bn(NaN,NaN,NaN,0):null}function vn(t){return new bn(t>>16&255,t>>8&255,255&t,1)}function gn(t,n,e,r){return r<=0&&(t=n=e=NaN),new bn(t,n,e,r)}function yn(t){return t instanceof Jt||(t=pn(t)),t?new bn((t=t.rgb()).r,t.g,t.b,t.opacity):new bn}function _n(t,n,e,r){return 1===arguments.length?yn(t):new bn(t,n,e,null==r?1:r)}function bn(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function mn(){return"#"+wn(this.r)+wn(this.g)+wn(this.b)}function xn(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function wn(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Mn(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new An(t,n,e,r)}function Nn(t){if(t instanceof An)return new An(t.h,t.s,t.l,t.opacity);if(t instanceof Jt||(t=pn(t)),!t)return new An;if(t instanceof An)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),a=NaN,u=o-i,c=(o+i)/2;return u?(a=n===o?(e-r)/u+6*(e0&&c<1?0:a,new An(a,u,c,t.opacity)}function Tn(t,n,e,r){return 1===arguments.length?Nn(t):new An(t,n,e,null==r?1:r)}function An(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Sn(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}Qt(Jt,pn,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:hn,formatHex:hn,formatHsl:function(){return Nn(this).formatHsl()},formatRgb:dn,toString:dn}),Qt(bn,_n,Kt(Jt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new bn(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new bn(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:mn,formatHex:mn,formatRgb:xn,toString:xn})),Qt(An,Tn,Kt(Jt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new An(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new An(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new bn(Sn(t>=240?t-240:t+120,i,r),Sn(t,i,r),Sn(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var kn=Math.PI/180,En=180/Math.PI,Cn=.96422,Pn=1,zn=.82521,Rn=4/29,Dn=6/29,qn=3*Dn*Dn,Ln=Dn*Dn*Dn;function Un(t){if(t instanceof Bn)return new Bn(t.l,t.a,t.b,t.opacity);if(t instanceof Xn)return Gn(t);t instanceof bn||(t=yn(t));var n,e,r=Hn(t.r),i=Hn(t.g),o=Hn(t.b),a=Fn((.2225045*r+.7168786*i+.0606169*o)/Pn);return r===i&&i===o?n=e=a:(n=Fn((.4360747*r+.3850649*i+.1430804*o)/Cn),e=Fn((.0139322*r+.0971045*i+.7141733*o)/zn)),new Bn(116*a-16,500*(n-a),200*(a-e),t.opacity)}function On(t,n,e,r){return 1===arguments.length?Un(t):new Bn(t,n,e,null==r?1:r)}function Bn(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Fn(t){return t>Ln?Math.pow(t,1/3):t/qn+Rn}function Yn(t){return t>Dn?t*t*t:qn*(t-Rn)}function In(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Hn(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function jn(t){if(t instanceof Xn)return new Xn(t.h,t.c,t.l,t.opacity);if(t instanceof Bn||(t=Un(t)),0===t.a&&0===t.b)return new Xn(NaN,0=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],a=r>0?t[r-1]:2*i-o,u=r180||e<-180?e-360*Math.round(e/360):e):ue(isNaN(t)?n:t)}function se(t){return 1==(t=+t)?le:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):ue(isNaN(n)?e:n)}}function le(t,n){var e=n-t;return e?ce(t,e):ue(isNaN(t)?n:t)}Qt(re,ee,Kt(Jt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new re(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new re(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=isNaN(this.h)?0:(this.h+120)*kn,n=+this.l,e=isNaN(this.s)?0:this.s*n*(1-n),r=Math.cos(t),i=Math.sin(t);return new bn(255*(n+e*($n*r+Wn*i)),255*(n+e*(Zn*r+Qn*i)),255*(n+e*(Kn*r)),this.opacity)}}));var he=function t(n){var e=se(n);function r(t,n){var r=e((t=_n(t)).r,(n=_n(n)).r),i=e(t.g,n.g),o=e(t.b,n.b),a=le(t.opacity,n.opacity);return function(n){return t.r=r(n),t.g=i(n),t.b=o(n),t.opacity=a(n),t+""}}return r.gamma=t,r}(1);function de(t){return function(n){var e,r,i=n.length,o=new Array(i),a=new Array(i),u=new Array(i);for(e=0;eo&&(i=n.slice(o,i),u[a]?u[a]+=i:u[++a]=i),(e=e[0])===(r=r[0])?u[a]?u[a]+=r:u[++a]=r:(u[++a]=null,c.push({i:a,x:me(e,r)})),o=Me.lastIndex;return o180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:me(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,a.rotate,u,c),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:me(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,a.skewX,u,c),function(t,n,e,r,o,a){if(t!==e||n!==r){var u=o.push(i(o)+"scale(",null,",",null,")");a.push({i:u-4,x:me(t,e)},{i:u-2,x:me(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,a.scaleX,a.scaleY,u,c),o=a=null,function(t){for(var n,e=-1,r=c.length;++e=0&&n._call.call(null,t),n=n._next;--tr}function pr(){or=(ir=ur.now())+ar,tr=nr=0;try{dr()}finally{tr=0,function(){var t,n,e=Ke,r=1/0;for(;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Ke=n);Je=t,gr(r)}(),or=0}}function vr(){var t=ur.now(),n=t-ir;n>rr&&(ar-=n,ir=t)}function gr(t){tr||(nr&&(nr=clearTimeout(nr)),t-or>24?(t<1/0&&(nr=setTimeout(pr,t-ur.now()-ar)),er&&(er=clearInterval(er))):(er||(ir=ur.now(),er=setInterval(vr,rr)),tr=1,cr(pr)))}function yr(t,n,e){var r=new lr;return n=null==n?0:+n,r.restart(function(e){r.stop(),t(e+n)},n,e),r}lr.prototype=hr.prototype={constructor:lr,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?fr():+e)+(null==n?0:+n),this._next||Je===this||(Je?Je._next=this:Ke=this,Je=this),this._call=t,this._time=e,gr()},stop:function(){this._call&&(this._call=null,this._time=1/0,gr())}};var _r=I("start","end","cancel","interrupt"),br=[],mr=0,xr=1,wr=2,Mr=3,Nr=4,Tr=5,Ar=6;function Sr(t,n,e,r,i,o){var a=t.__transition;if(a){if(e in a)return}else t.__transition={};!function(t,n,e){var r,i=t.__transition;function o(c){var f,s,l,h;if(e.state!==xr)return u();for(f in i)if((h=i[f]).name===e.name){if(h.state===Mr)return yr(o);h.state===Nr?(h.state=Ar,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete i[f]):+fmr)throw new Error("too late; already scheduled");return e}function Er(t,n){var e=Cr(t,n);if(e.state>Mr)throw new Error("too late; already running");return e}function Cr(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}function Pr(t,n){var e,r,i,o=t.__transition,a=!0;if(o){for(i in n=null==n?null:n+"",o)(e=o[i]).name===n?(r=e.state>wr&&e.state=0&&(t=t.slice(0,n)),!t||"start"===t})}(n)?kr:Er;return function(){var a=o(this,t),u=a.on;u!==r&&(i=(r=u).copy()).on(n,e),a.on=i}}(e,t,n))},attr:function(t,n){var e=W(t),r="transform"===e?Le:Rr;return this.attrTween(t,"function"==typeof n?(e.local?function(t,n,e){var r,i,o;return function(){var a,u,c=e(this);if(null!=c)return(a=this.getAttributeNS(t.space,t.local))===(u=c+"")?null:a===r&&u===i?o:(i=u,o=n(r=a,c));this.removeAttributeNS(t.space,t.local)}}:function(t,n,e){var r,i,o;return function(){var a,u,c=e(this);if(null!=c)return(a=this.getAttribute(t))===(u=c+"")?null:a===r&&u===i?o:(i=u,o=n(r=a,c));this.removeAttribute(t)}})(e,r,zr(this,"attr."+t,n)):null==n?(e.local?function(t){return function(){this.removeAttributeNS(t.space,t.local)}}:function(t){return function(){this.removeAttribute(t)}})(e):(e.local?function(t,n,e){var r,i,o=e+"";return function(){var a=this.getAttributeNS(t.space,t.local);return a===o?null:a===r?i:i=n(r=a,e)}}:function(t,n,e){var r,i,o=e+"";return function(){var a=this.getAttribute(t);return a===o?null:a===r?i:i=n(r=a,e)}})(e,r,n))},attrTween:function(t,n){var e="attr."+t;if(arguments.length<2)return(e=this.tween(e))&&e._value;if(null==n)return this.tween(e,null);if("function"!=typeof n)throw new Error;var r=W(t);return this.tween(e,(r.local?function(t,n){var e,r;function i(){var i=n.apply(this,arguments);return i!==r&&(e=(r=i)&&function(t,n){return function(e){this.setAttributeNS(t.space,t.local,n.call(this,e))}}(t,i)),e}return i._value=n,i}:function(t,n){var e,r;function i(){var i=n.apply(this,arguments);return i!==r&&(e=(r=i)&&function(t,n){return function(e){this.setAttribute(t,n.call(this,e))}}(t,i)),e}return i._value=n,i})(r,n))},style:function(t,n,e){var r="transform"==(t+="")?qe:Rr;return null==n?this.styleTween(t,function(t,n){var e,r,i;return function(){var o=ft(this,t),a=(this.style.removeProperty(t),ft(this,t));return o===a?null:o===e&&a===r?i:i=n(e=o,r=a)}}(t,r)).on("end.style."+t,qr(t)):"function"==typeof n?this.styleTween(t,function(t,n,e){var r,i,o;return function(){var a=ft(this,t),u=e(this),c=u+"";return null==u&&(this.style.removeProperty(t),c=u=ft(this,t)),a===c?null:a===r&&c===i?o:(i=c,o=n(r=a,u))}}(t,r,zr(this,"style."+t,n))).each(function(t,n){var e,r,i,o,a="style."+n,u="end."+a;return function(){var c=Er(this,t),f=c.on,s=null==c.value[a]?o||(o=qr(n)):void 0;f===e&&i===s||(r=(e=f).copy()).on(u,i=s),c.on=r}}(this._id,t)):this.styleTween(t,function(t,n,e){var r,i,o=e+"";return function(){var a=ft(this,t);return a===o?null:a===r?i:i=n(r=a,e)}}(t,r,n),e).on("end.style."+t,null)},styleTween:function(t,n,e){var r="style."+(t+="");if(arguments.length<2)return(r=this.tween(r))&&r._value;if(null==n)return this.tween(r,null);if("function"!=typeof n)throw new Error;return this.tween(r,function(t,n,e){var r,i;function o(){var o=n.apply(this,arguments);return o!==i&&(r=(i=o)&&function(t,n,e){return function(r){this.style.setProperty(t,n.call(this,r),e)}}(t,o,e)),r}return o._value=n,o}(t,n,null==e?"":e))},text:function(t){return this.tween("text","function"==typeof t?function(t){return function(){var n=t(this);this.textContent=null==n?"":n}}(zr(this,"text",t)):function(t){return function(){this.textContent=t}}(null==t?"":t+""))},textTween:function(t){var n="text";if(arguments.length<1)return(n=this.tween(n))&&n._value;if(null==t)return this.tween(n,null);if("function"!=typeof t)throw new Error;return this.tween(n,function(t){var n,e;function r(){var r=t.apply(this,arguments);return r!==e&&(n=(e=r)&&function(t){return function(n){this.textContent=t.call(this,n)}}(r)),n}return r._value=t,r}(t))},remove:function(){return this.on("end.remove",function(t){return function(){var n=this.parentNode;for(var e in this.__transition)if(+e!==t)return;n&&n.removeChild(this)}}(this._id))},tween:function(t,n){var e=this._id;if(t+="",arguments.length<2){for(var r,i=Cr(this.node(),e).tween,o=0,a=i.length;o0&&(r=o-P),M<0?d=p-z:M>0&&(u=c-z),x=Mi,B.attr("cursor",Pi.selection),I());break;default:return}xi()},!0).on("keyup.brush",function(){switch(t.event.keyCode){case 16:R&&(g=y=R=!1,I());break;case 18:x===Ti&&(w<0?f=h:w>0&&(r=o),M<0?d=p:M>0&&(u=c),x=Ni,I());break;case 32:x===Mi&&(t.event.altKey?(w&&(f=h-P*w,r=o+P*w),M&&(d=p-z*M,u=c+z*M),x=Ti):(w<0?f=h:w>0&&(r=o),M<0?d=p:M>0&&(u=c),x=Ni),B.attr("cursor",Pi[m]),I());break;default:return}xi()},!0),Ht(t.event.view)}mi(),Pr(b),s.call(b),U.start()}function Y(){var t=D(b);!R||g||y||(Math.abs(t[0]-L[0])>Math.abs(t[1]-L[1])?y=!0:g=!0),L=t,v=!0,xi(),I()}function I(){var t;switch(P=L[0]-q[0],z=L[1]-q[1],x){case Mi:case wi:w&&(P=Math.max(S-r,Math.min(E-f,P)),o=r+P,h=f+P),M&&(z=Math.max(k-u,Math.min(C-d,z)),c=u+z,p=d+z);break;case Ni:w<0?(P=Math.max(S-r,Math.min(E-r,P)),o=r+P,h=f):w>0&&(P=Math.max(S-f,Math.min(E-f,P)),o=r,h=f+P),M<0?(z=Math.max(k-u,Math.min(C-u,z)),c=u+z,p=d):M>0&&(z=Math.max(k-d,Math.min(C-d,z)),c=u,p=d+z);break;case Ti:w&&(o=Math.max(S,Math.min(E,r-P*w)),h=Math.max(S,Math.min(E,f+P*w))),M&&(c=Math.max(k,Math.min(C,u-z*M)),p=Math.max(k,Math.min(C,d+z*M)))}h1e-6)if(Math.abs(s*u-c*f)>1e-6&&i){var h=e-o,d=r-a,p=u*u+c*c,v=h*h+d*d,g=Math.sqrt(p),y=Math.sqrt(l),_=i*Math.tan((Qi-Math.acos((p+l-v)/(2*g*y)))/2),b=_/y,m=_/g;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*f)+","+(n+b*s)),this._+="A"+i+","+i+",0,0,"+ +(s*h>f*d)+","+(this._x1=t+m*u)+","+(this._y1=n+m*c)}else this._+="L"+(this._x1=t)+","+(this._y1=n);else;},arc:function(t,n,e,r,i,o){t=+t,n=+n,o=!!o;var a=(e=+e)*Math.cos(r),u=e*Math.sin(r),c=t+a,f=n+u,s=1^o,l=o?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);null===this._x1?this._+="M"+c+","+f:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-f)>1e-6)&&(this._+="L"+c+","+f),e&&(l<0&&(l=l%Ki+Ki),l>Ji?this._+="A"+e+","+e+",0,1,"+s+","+(t-a)+","+(n-u)+"A"+e+","+e+",0,1,"+s+","+(this._x1=c)+","+(this._y1=f):l>1e-6&&(this._+="A"+e+","+e+",0,"+ +(l>=Qi)+","+s+","+(this._x1=t+e*Math.cos(i))+","+(this._y1=n+e*Math.sin(i))))},rect:function(t,n,e,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};function uo(){}function co(t,n){var e=new uo;if(t instanceof uo)t.each(function(t,n){e.set(n,t)});else if(Array.isArray(t)){var r,i=-1,o=t.length;if(null==n)for(;++ir!=d>r&&e<(h-f)*(r-s)/(d-s)+f&&(i=-i)}return i}function wo(t,n,e){var r,i,o,a;return function(t,n,e){return(n[0]-t[0])*(e[1]-t[1])==(e[0]-t[0])*(n[1]-t[1])}(t,n,e)&&(i=t[r=+(t[0]===n[0])],o=e[r],a=n[r],i<=o&&o<=a||a<=o&&o<=i)}function Mo(){}var No=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]];function To(){var t=1,n=1,e=M,r=u;function i(t){var n=e(t);if(Array.isArray(n))n=n.slice().sort(_o);else{var r=s(t),i=r[0],a=r[1];n=w(i,a,n),n=g(Math.floor(i/n)*n,Math.floor(a/n)*n,n)}return n.map(function(n){return o(t,n)})}function o(e,i){var o=[],u=[];return function(e,r,i){var o,u,c,f,s,l,h=new Array,d=new Array;o=u=-1,f=e[0]>=r,No[f<<1].forEach(p);for(;++o=r,No[c|f<<1].forEach(p);No[f<<0].forEach(p);for(;++u=r,s=e[u*t]>=r,No[f<<1|s<<2].forEach(p);++o=r,l=s,s=e[u*t+o+1]>=r,No[c|f<<1|s<<2|l<<3].forEach(p);No[f|s<<3].forEach(p)}o=-1,s=e[u*t]>=r,No[s<<2].forEach(p);for(;++o=r,No[s<<2|l<<3].forEach(p);function p(t){var n,e,r=[t[0][0]+o,t[0][1]+u],c=[t[1][0]+o,t[1][1]+u],f=a(r),s=a(c);(n=d[f])?(e=h[s])?(delete d[n.end],delete h[e.start],n===e?(n.ring.push(c),i(n.ring)):h[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete d[n.end],n.ring.push(c),d[n.end=s]=n):(n=h[s])?(e=d[f])?(delete h[n.start],delete d[e.end],n===e?(n.ring.push(c),i(n.ring)):h[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete h[n.start],n.ring.unshift(r),h[n.start=f]=n):h[f]=d[s]={start:f,end:s,ring:[r,c]}}No[s<<3].forEach(p)}(e,i,function(t){r(t,e,i),function(t){for(var n=0,e=t.length,r=t[e-1][1]*t[0][0]-t[e-1][0]*t[0][1];++n0?o.push([t]):u.push(t)}),u.forEach(function(t){for(var n,e=0,r=o.length;e0&&a0&&u0&&o>0))throw new Error("invalid size");return t=r,n=o,i},i.thresholds=function(t){return arguments.length?(e="function"==typeof t?t:Array.isArray(t)?bo(yo.call(t)):bo(t),i):e},i.smooth=function(t){return arguments.length?(r=t?u:Mo,i):r===u},i}function Ao(t,n,e){for(var r=t.width,i=t.height,o=1+(e<<1),a=0;a=e&&(u>=o&&(c-=t.data[u-o+a*r]),n.data[u-e+a*r]=c/Math.min(u+1,r-1+o-u,o))}function So(t,n,e){for(var r=t.width,i=t.height,o=1+(e<<1),a=0;a=e&&(u>=o&&(c-=t.data[a+(u-o)*r]),n.data[a+(u-e)*r]=c/Math.min(u+1,i-1+o-u,o))}function ko(t){return t[0]}function Eo(t){return t[1]}function Co(){return 1}var Po={},zo={},Ro=34,Do=10,qo=13;function Lo(t){return new Function("d","return {"+t.map(function(t,n){return JSON.stringify(t)+": d["+n+'] || ""'}).join(",")+"}")}function Uo(t){var n=Object.create(null),e=[];return t.forEach(function(t){for(var r in t)r in n||e.push(n[r]=r)}),e}function Oo(t,n){var e=t+"",r=e.length;return r9999?"+"+Oo(t,6):Oo(t,4)}(t.getUTCFullYear())+"-"+Oo(t.getUTCMonth()+1,2)+"-"+Oo(t.getUTCDate(),2)+(i?"T"+Oo(n,2)+":"+Oo(e,2)+":"+Oo(r,2)+"."+Oo(i,3)+"Z":r?"T"+Oo(n,2)+":"+Oo(e,2)+":"+Oo(r,2)+"Z":e||n?"T"+Oo(n,2)+":"+Oo(e,2)+"Z":"")}function Fo(t){var n=new RegExp('["'+t+"\n\r]"),e=t.charCodeAt(0);function r(t,n){var r,i=[],o=t.length,a=0,u=0,c=o<=0,f=!1;function s(){if(c)return zo;if(f)return f=!1,Po;var n,r,i=a;if(t.charCodeAt(i)===Ro){for(;a++=o?c=!0:(r=t.charCodeAt(a++))===Do?f=!0:r===qo&&(f=!0,t.charCodeAt(a)===Do&&++a),t.slice(i+1,n-1).replace(/""/g,'"')}for(;a=(o=(v+y)/2))?v=o:y=o,(s=e>=(a=(g+_)/2))?g=a:_=a,i=d,!(d=d[l=s<<1|f]))return i[l]=p,t;if(u=+t._x.call(null,d.data),c=+t._y.call(null,d.data),n===u&&e===c)return p.next=d,i?i[l]=p:t._root=p,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(f=n>=(o=(v+y)/2))?v=o:y=o,(s=e>=(a=(g+_)/2))?g=a:_=a}while((l=s<<1|f)==(h=(c>=a)<<1|u>=o));return i[h]=d,i[l]=p,t}function ba(t,n,e,r,i){this.node=t,this.x0=n,this.y0=e,this.x1=r,this.y1=i}function ma(t){return t[0]}function xa(t){return t[1]}function wa(t,n,e){var r=new Ma(null==n?ma:n,null==e?xa:e,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ma(t,n,e,r,i,o){this._x=t,this._y=n,this._x0=e,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function Na(t){for(var n={data:t.data},e=n;t=t.next;)e=e.next={data:t.data};return n}var Ta=wa.prototype=Ma.prototype;function Aa(t){return t.x+t.vx}function Sa(t){return t.y+t.vy}function ka(t){return t.index}function Ea(t,n){var e=t.get(n);if(!e)throw new Error("missing: "+n);return e}function Ca(t){return t.x}function Pa(t){return t.y}Ta.copy=function(){var t,n,e=new Ma(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return e;if(!r.length)return e._root=Na(r),e;for(t=[{source:r,target:e._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(n=r.source[i])&&(n.length?t.push({source:n,target:r.target[i]=new Array(4)}):r.target[i]=Na(n));return e},Ta.add=function(t){var n=+this._x.call(null,t),e=+this._y.call(null,t);return _a(this.cover(n,e),n,e,t)},Ta.addAll=function(t){var n,e,r,i,o=t.length,a=new Array(o),u=new Array(o),c=1/0,f=1/0,s=-1/0,l=-1/0;for(e=0;es&&(s=r),il&&(l=i));if(c>s||f>l)return this;for(this.cover(c,f).cover(s,l),e=0;et||t>=i||r>n||n>=o;)switch(u=(nh||(o=c.y0)>d||(a=c.x1)=y)<<1|t>=g)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-f],p[p.length-1-f]=c)}else{var _=t-+this._x.call(null,v.data),b=n-+this._y.call(null,v.data),m=_*_+b*b;if(m=(u=(p+g)/2))?p=u:g=u,(s=a>=(c=(v+y)/2))?v=c:y=c,n=d,!(d=d[l=s<<1|f]))return this;if(!d.length)break;(n[l+1&3]||n[l+2&3]||n[l+3&3])&&(e=n,h=l)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):n?(i?n[l]=i:delete n[l],(d=n[0]||n[1]||n[2]||n[3])&&d===(n[3]||n[2]||n[1]||n[0])&&!d.length&&(e?e[h]=d:this._root=d),this):(this._root=i,this)},Ta.removeAll=function(t){for(var n=0,e=t.length;n1?r[0]+r.slice(2):r,+t.slice(e+1)]}function qa(t){return(t=Da(Math.abs(t)))?t[1]:NaN}var La,Ua=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Oa(t){if(!(n=Ua.exec(t)))throw new Error("invalid format: "+t);var n;return new Ba({fill:n[1],align:n[2],sign:n[3],symbol:n[4],zero:n[5],width:n[6],comma:n[7],precision:n[8]&&n[8].slice(1),trim:n[9],type:n[10]})}function Ba(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function Fa(t,n){var e=Da(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Oa.prototype=Ba.prototype,Ba.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var Ya={"%":function(t,n){return(100*t).toFixed(n)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},g:function(t,n){return t.toPrecision(n)},o:function(t){return Math.round(t).toString(8)},p:function(t,n){return Fa(100*t,n)},r:Fa,s:function(t,n){var e=Da(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(La=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,a=r.length;return o===a?r:o>a?r+new Array(o-a+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+Da(t,Math.max(0,n+o-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}};function Ia(t){return t}var Ha,ja=Array.prototype.map,Va=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function Xa(t){var n,e,r=void 0===t.grouping||void 0===t.thousands?Ia:(n=ja.call(t.grouping,Number),e=t.thousands+"",function(t,r){for(var i=t.length,o=[],a=0,u=n[0],c=0;i>0&&u>0&&(c+u+1>r&&(u=Math.max(1,r-c)),o.push(t.substring(i-=u,i+u)),!((c+=u+1)>r));)u=n[a=(a+1)%n.length];return o.reverse().join(e)}),i=void 0===t.currency?"":t.currency[0]+"",o=void 0===t.currency?"":t.currency[1]+"",a=void 0===t.decimal?".":t.decimal+"",u=void 0===t.numerals?Ia:function(t){return function(n){return n.replace(/[0-9]/g,function(n){return t[+n]})}}(ja.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",f=void 0===t.minus?"-":t.minus+"",s=void 0===t.nan?"NaN":t.nan+"";function l(t){var n=(t=Oa(t)).fill,e=t.align,l=t.sign,h=t.symbol,d=t.zero,p=t.width,v=t.comma,g=t.precision,y=t.trim,_=t.type;"n"===_?(v=!0,_="g"):Ya[_]||(void 0===g&&(g=12),y=!0,_="g"),(d||"0"===n&&"="===e)&&(d=!0,n="0",e="=");var b="$"===h?i:"#"===h&&/[boxX]/.test(_)?"0"+_.toLowerCase():"",m="$"===h?o:/[%p]/.test(_)?c:"",x=Ya[_],w=/[defgprs%]/.test(_);function M(t){var i,o,c,h=b,M=m;if("c"===_)M=x(t)+M,t="";else{var N=(t=+t)<0;if(t=isNaN(t)?s:x(Math.abs(t),g),y&&(t=function(t){t:for(var n,e=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(n+1):t}(t)),N&&0==+t&&(N=!1),h=(N?"("===l?l:f:"-"===l||"("===l?"":l)+h,M=("s"===_?Va[8+La/3]:"")+M+(N&&"("===l?")":""),w)for(i=-1,o=t.length;++i(c=t.charCodeAt(i))||c>57){M=(46===c?a+t.slice(i+1):t.slice(i))+M,t=t.slice(0,i);break}}v&&!d&&(t=r(t,1/0));var T=h.length+t.length+M.length,A=T>1)+h+t+M+A.slice(T);break;default:t=A+h+t+M}return u(t)}return g=void 0===g?6:/[gprs]/.test(_)?Math.max(1,Math.min(21,g)):Math.max(0,Math.min(20,g)),M.toString=function(){return t+""},M}return{format:l,formatPrefix:function(t,n){var e=l(((t=Oa(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(qa(n)/3))),i=Math.pow(10,-r),o=Va[8+r/3];return function(t){return e(i*t)+o}}}}function Ga(n){return Ha=Xa(n),t.format=Ha.format,t.formatPrefix=Ha.formatPrefix,Ha}function $a(t){return Math.max(0,-qa(Math.abs(t)))}function Wa(t,n){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(qa(n)/3)))-qa(Math.abs(t)))}function Za(t,n){return t=Math.abs(t),n=Math.abs(n)-t,Math.max(0,qa(n)-qa(t))+1}function Qa(){return new Ka}function Ka(){this.reset()}Ga({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"}),Ka.prototype={constructor:Ka,reset:function(){this.s=this.t=0},add:function(t){tu(Ja,t,this.t),tu(this,Ja.s,this.s),this.s?this.t+=Ja.t:this.s=Ja.t},valueOf:function(){return this.s}};var Ja=new Ka;function tu(t,n,e){var r=t.s=n+e,i=r-n,o=r-i;t.t=n-o+(e-i)}var nu=1e-6,eu=1e-12,ru=Math.PI,iu=ru/2,ou=ru/4,au=2*ru,uu=180/ru,cu=ru/180,fu=Math.abs,su=Math.atan,lu=Math.atan2,hu=Math.cos,du=Math.ceil,pu=Math.exp,vu=Math.log,gu=Math.pow,yu=Math.sin,_u=Math.sign||function(t){return t>0?1:t<0?-1:0},bu=Math.sqrt,mu=Math.tan;function xu(t){return t>1?0:t<-1?ru:Math.acos(t)}function wu(t){return t>1?iu:t<-1?-iu:Math.asin(t)}function Mu(t){return(t=yu(t/2))*t}function Nu(){}function Tu(t,n){t&&Su.hasOwnProperty(t.type)&&Su[t.type](t,n)}var Au={Feature:function(t,n){Tu(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r=0?1:-1,i=r*e,o=hu(n=(n*=cu)/2+ou),a=yu(n),u=qu*a,c=Du*o+u*hu(i),f=u*r*yu(i);Lu.add(lu(f,c)),Ru=t,Du=o,qu=a}function Hu(t){return[lu(t[1],t[0]),wu(t[2])]}function ju(t){var n=t[0],e=t[1],r=hu(e);return[r*hu(n),r*yu(n),yu(e)]}function Vu(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function Xu(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function Gu(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function $u(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function Wu(t){var n=bu(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}var Zu,Qu,Ku,Ju,tc,nc,ec,rc,ic,oc,ac,uc,cc,fc,sc,lc,hc,dc,pc,vc,gc,yc,_c,bc,mc,xc,wc=Qa(),Mc={point:Nc,lineStart:Ac,lineEnd:Sc,polygonStart:function(){Mc.point=kc,Mc.lineStart=Ec,Mc.lineEnd=Cc,wc.reset(),Ou.polygonStart()},polygonEnd:function(){Ou.polygonEnd(),Mc.point=Nc,Mc.lineStart=Ac,Mc.lineEnd=Sc,Lu<0?(Zu=-(Ku=180),Qu=-(Ju=90)):wc>nu?Ju=90:wc<-nu&&(Qu=-90),oc[0]=Zu,oc[1]=Ku},sphere:function(){Zu=-(Ku=180),Qu=-(Ju=90)}};function Nc(t,n){ic.push(oc=[Zu=t,Ku=t]),nJu&&(Ju=n)}function Tc(t,n){var e=ju([t*cu,n*cu]);if(rc){var r=Xu(rc,e),i=Xu([r[1],-r[0],0],r);Wu(i),i=Hu(i);var o,a=t-tc,u=a>0?1:-1,c=i[0]*uu*u,f=fu(a)>180;f^(u*tcJu&&(Ju=o):f^(u*tc<(c=(c+360)%360-180)&&cJu&&(Ju=n)),f?tPc(Zu,Ku)&&(Ku=t):Pc(t,Ku)>Pc(Zu,Ku)&&(Zu=t):Ku>=Zu?(tKu&&(Ku=t)):t>tc?Pc(Zu,t)>Pc(Zu,Ku)&&(Ku=t):Pc(t,Ku)>Pc(Zu,Ku)&&(Zu=t)}else ic.push(oc=[Zu=t,Ku=t]);nJu&&(Ju=n),rc=e,tc=t}function Ac(){Mc.point=Tc}function Sc(){oc[0]=Zu,oc[1]=Ku,Mc.point=Nc,rc=null}function kc(t,n){if(rc){var e=t-tc;wc.add(fu(e)>180?e+(e>0?360:-360):e)}else nc=t,ec=n;Ou.point(t,n),Tc(t,n)}function Ec(){Ou.lineStart()}function Cc(){kc(nc,ec),Ou.lineEnd(),fu(wc)>nu&&(Zu=-(Ku=180)),oc[0]=Zu,oc[1]=Ku,rc=null}function Pc(t,n){return(n-=t)<0?n+360:n}function zc(t,n){return t[0]-n[0]}function Rc(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nru?t+Math.round(-t/au)*au:t,n]}function $c(t,n,e){return(t%=au)?n||e?Xc(Zc(t),Qc(n,e)):Zc(t):n||e?Qc(n,e):Gc}function Wc(t){return function(n,e){return[(n+=t)>ru?n-au:n<-ru?n+au:n,e]}}function Zc(t){var n=Wc(t);return n.invert=Wc(-t),n}function Qc(t,n){var e=hu(t),r=yu(t),i=hu(n),o=yu(n);function a(t,n){var a=hu(n),u=hu(t)*a,c=yu(t)*a,f=yu(n),s=f*e+u*r;return[lu(c*i-s*o,u*e-f*r),wu(s*i+c*o)]}return a.invert=function(t,n){var a=hu(n),u=hu(t)*a,c=yu(t)*a,f=yu(n),s=f*i-c*o;return[lu(c*i+f*o,u*e+s*r),wu(s*e-u*r)]},a}function Kc(t){function n(n){return(n=t(n[0]*cu,n[1]*cu))[0]*=uu,n[1]*=uu,n}return t=$c(t[0]*cu,t[1]*cu,t.length>2?t[2]*cu:0),n.invert=function(n){return(n=t.invert(n[0]*cu,n[1]*cu))[0]*=uu,n[1]*=uu,n},n}function Jc(t,n,e,r,i,o){if(e){var a=hu(n),u=yu(n),c=r*e;null==i?(i=n+r*au,o=n-c/2):(i=tf(a,i),o=tf(a,o),(r>0?io)&&(i+=r*au));for(var f,s=i;r>0?s>o:s1&&n.push(n.pop().concat(n.shift()))},result:function(){var e=n;return n=[],t=null,e}}}function ef(t,n){return fu(t[0]-n[0])=0;--o)i.point((s=f[o])[0],s[1]);else r(h.x,h.p.x,-1,i);h=h.p}f=(h=h.o).z,d=!d}while(!h.v);i.lineEnd()}}}function af(t){if(n=t.length){for(var n,e,r=0,i=t[0];++r=0?1:-1,T=N*M,A=T>ru,S=v*x;if(uf.add(lu(S*N*yu(T),g*w+S*hu(T))),a+=A?M+N*au:M,A^d>=e^b>=e){var k=Xu(ju(h),ju(_));Wu(k);var E=Xu(o,k);Wu(E);var C=(A^M>=0?-1:1)*wu(E[2]);(r>C||r===C&&(k[0]||k[1]))&&(u+=A^M>=0?1:-1)}}return(a<-nu||a0){for(l||(i.polygonStart(),l=!0),i.lineStart(),t=0;t1&&2&c&&h.push(h.pop().concat(h.shift())),a.push(h.filter(lf))}return h}}function lf(t){return t.length>1}function hf(t,n){return((t=t.x)[0]<0?t[1]-iu-nu:iu-t[1])-((n=n.x)[0]<0?n[1]-iu-nu:iu-n[1])}var df=sf(function(){return!0},function(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,a){var u=o>0?ru:-ru,c=fu(o-e);fu(c-ru)0?iu:-iu),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(u,r),t.point(o,r),n=0):i!==u&&c>=ru&&(fu(e-i)nu?su((yu(n)*(o=hu(r))*yu(e)-yu(r)*(i=hu(n))*yu(t))/(i*o*a)):(n+r)/2}(e,r,o,a),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(u,r),n=0),t.point(e=o,r=a),i=u},lineEnd:function(){t.lineEnd(),e=r=NaN},clean:function(){return 2-n}}},function(t,n,e,r){var i;if(null==t)i=e*iu,r.point(-ru,i),r.point(0,i),r.point(ru,i),r.point(ru,0),r.point(ru,-i),r.point(0,-i),r.point(-ru,-i),r.point(-ru,0),r.point(-ru,i);else if(fu(t[0]-n[0])>nu){var o=t[0]0,i=fu(n)>nu;function o(t,e){return hu(t)*hu(e)>n}function a(t,e,r){var i=[1,0,0],o=Xu(ju(t),ju(e)),a=Vu(o,o),u=o[0],c=a-u*u;if(!c)return!r&&t;var f=n*a/c,s=-n*u/c,l=Xu(i,o),h=$u(i,f);Gu(h,$u(o,s));var d=l,p=Vu(h,d),v=Vu(d,d),g=p*p-v*(Vu(h,h)-1);if(!(g<0)){var y=bu(g),_=$u(d,(-p-y)/v);if(Gu(_,h),_=Hu(_),!r)return _;var b,m=t[0],x=e[0],w=t[1],M=e[1];x0^_[1]<(fu(_[0]-m)ru^(m<=_[0]&&_[0]<=x)){var A=$u(d,(-p+y)/v);return Gu(A,h),[_,Hu(A)]}}}function u(n,e){var i=r?t:ru-t,o=0;return n<-i?o|=1:n>i&&(o|=2),e<-i?o|=4:e>i&&(o|=8),o}return sf(o,function(t){var n,e,c,f,s;return{lineStart:function(){f=c=!1,s=1},point:function(l,h){var d,p=[l,h],v=o(l,h),g=r?v?0:u(l,h):v?u(l+(l<0?ru:-ru),h):0;if(!n&&(f=c=v)&&t.lineStart(),v!==c&&(!(d=a(n,p))||ef(n,d)||ef(p,d))&&(p[0]+=nu,p[1]+=nu,v=o(p[0],p[1])),v!==c)s=0,v?(t.lineStart(),d=a(p,n),t.point(d[0],d[1])):(d=a(n,p),t.point(d[0],d[1]),t.lineEnd()),n=d;else if(i&&n&&r^v){var y;g&e||!(y=a(p,n,!0))||(s=0,r?(t.lineStart(),t.point(y[0][0],y[0][1]),t.point(y[1][0],y[1][1]),t.lineEnd()):(t.point(y[1][0],y[1][1]),t.lineEnd(),t.lineStart(),t.point(y[0][0],y[0][1])))}!v||n&&ef(n,p)||t.point(p[0],p[1]),n=p,c=v,e=g},lineEnd:function(){c&&t.lineEnd(),n=null},clean:function(){return s|(f&&c)<<1}}},function(n,r,i,o){Jc(o,t,e,i,n,r)},r?[0,-t]:[-ru,t-ru])}var vf=1e9,gf=-vf;function yf(t,n,e,r){function i(i,o){return t<=i&&i<=e&&n<=o&&o<=r}function o(i,o,u,f){var s=0,l=0;if(null==i||(s=a(i,u))!==(l=a(o,u))||c(i,o)<0^u>0)do{f.point(0===s||3===s?t:e,s>1?r:n)}while((s=(s+u+4)%4)!==l);else f.point(o[0],o[1])}function a(r,i){return fu(r[0]-t)0?0:3:fu(r[0]-e)0?2:1:fu(r[1]-n)0?1:0:i>0?3:2}function u(t,n){return c(t.x,n.x)}function c(t,n){var e=a(t,1),r=a(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(a){var c,f,s,l,h,d,p,v,g,y,_,b=a,m=nf(),x={point:w,lineStart:function(){x.point=M,f&&f.push(s=[]);y=!0,g=!1,p=v=NaN},lineEnd:function(){c&&(M(l,h),d&&g&&m.rejoin(),c.push(m.result()));x.point=w,g&&b.lineEnd()},polygonStart:function(){b=m,c=[],f=[],_=!0},polygonEnd:function(){var n=function(){for(var n=0,e=0,i=f.length;er&&(h-o)*(r-a)>(d-a)*(t-o)&&++n:d<=r&&(h-o)*(r-a)<(d-a)*(t-o)&&--n;return n}(),e=_&&n,i=(c=A(c)).length;(e||i)&&(a.polygonStart(),e&&(a.lineStart(),o(null,null,1,a),a.lineEnd()),i&&of(c,u,n,o,a),a.polygonEnd());b=a,c=f=s=null}};function w(t,n){i(t,n)&&b.point(t,n)}function M(o,a){var u=i(o,a);if(f&&s.push([o,a]),y)l=o,h=a,d=u,y=!1,u&&(b.lineStart(),b.point(o,a));else if(u&&g)b.point(o,a);else{var c=[p=Math.max(gf,Math.min(vf,p)),v=Math.max(gf,Math.min(vf,v))],m=[o=Math.max(gf,Math.min(vf,o)),a=Math.max(gf,Math.min(vf,a))];!function(t,n,e,r,i,o){var a,u=t[0],c=t[1],f=0,s=1,l=n[0]-u,h=n[1]-c;if(a=e-u,l||!(a>0)){if(a/=l,l<0){if(a0){if(a>s)return;a>f&&(f=a)}if(a=i-u,l||!(a<0)){if(a/=l,l<0){if(a>s)return;a>f&&(f=a)}else if(l>0){if(a0)){if(a/=h,h<0){if(a0){if(a>s)return;a>f&&(f=a)}if(a=o-c,h||!(a<0)){if(a/=h,h<0){if(a>s)return;a>f&&(f=a)}else if(h>0){if(a0&&(t[0]=u+f*l,t[1]=c+f*h),s<1&&(n[0]=u+s*l,n[1]=c+s*h),!0}}}}}(c,m,t,n,e,r)?u&&(b.lineStart(),b.point(o,a),_=!1):(g||(b.lineStart(),b.point(c[0],c[1])),b.point(m[0],m[1]),u||b.lineEnd(),_=!1)}p=o,v=a,g=u}return x}}var _f,bf,mf,xf=Qa(),wf={sphere:Nu,point:Nu,lineStart:function(){wf.point=Nf,wf.lineEnd=Mf},lineEnd:Nu,polygonStart:Nu,polygonEnd:Nu};function Mf(){wf.point=wf.lineEnd=Nu}function Nf(t,n){_f=t*=cu,bf=yu(n*=cu),mf=hu(n),wf.point=Tf}function Tf(t,n){t*=cu;var e=yu(n*=cu),r=hu(n),i=fu(t-_f),o=hu(i),a=r*yu(i),u=mf*e-bf*r*o,c=bf*e+mf*r*o;xf.add(lu(bu(a*a+u*u),c)),_f=t,bf=e,mf=r}function Af(t){return xf.reset(),Cu(t,wf),+xf}var Sf=[null,null],kf={type:"LineString",coordinates:Sf};function Ef(t,n){return Sf[0]=t,Sf[1]=n,Af(kf)}var Cf={Feature:function(t,n){return zf(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r0&&(i=Ef(t[o],t[o-1]))>0&&e<=i&&r<=i&&(e+r-i)*(1-Math.pow((e-r)/i,2))nu}).map(c)).concat(g(du(o/d)*d,i,d).filter(function(t){return fu(t%v)>nu}).map(f))}return _.lines=function(){return b().map(function(t){return{type:"LineString",coordinates:t}})},_.outline=function(){return{type:"Polygon",coordinates:[s(r).concat(l(a).slice(1),s(e).reverse().slice(1),l(u).reverse().slice(1))]}},_.extent=function(t){return arguments.length?_.extentMajor(t).extentMinor(t):_.extentMinor()},_.extentMajor=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],u=+t[0][1],a=+t[1][1],r>e&&(t=r,r=e,e=t),u>a&&(t=u,u=a,a=t),_.precision(y)):[[r,u],[e,a]]},_.extentMinor=function(e){return arguments.length?(n=+e[0][0],t=+e[1][0],o=+e[0][1],i=+e[1][1],n>t&&(e=n,n=t,t=e),o>i&&(e=o,o=i,i=e),_.precision(y)):[[n,o],[t,i]]},_.step=function(t){return arguments.length?_.stepMajor(t).stepMinor(t):_.stepMinor()},_.stepMajor=function(t){return arguments.length?(p=+t[0],v=+t[1],_):[p,v]},_.stepMinor=function(t){return arguments.length?(h=+t[0],d=+t[1],_):[h,d]},_.precision=function(h){return arguments.length?(y=+h,c=Of(o,i,90),f=Bf(n,t,y),s=Of(u,a,90),l=Bf(r,e,y),_):y},_.extentMajor([[-180,-90+nu],[180,90-nu]]).extentMinor([[-180,-80-nu],[180,80+nu]])}function Yf(t){return t}var If,Hf,jf,Vf,Xf=Qa(),Gf=Qa(),$f={point:Nu,lineStart:Nu,lineEnd:Nu,polygonStart:function(){$f.lineStart=Wf,$f.lineEnd=Kf},polygonEnd:function(){$f.lineStart=$f.lineEnd=$f.point=Nu,Xf.add(fu(Gf)),Gf.reset()},result:function(){var t=Xf/2;return Xf.reset(),t}};function Wf(){$f.point=Zf}function Zf(t,n){$f.point=Qf,If=jf=t,Hf=Vf=n}function Qf(t,n){Gf.add(Vf*t-jf*n),jf=t,Vf=n}function Kf(){Qf(If,Hf)}var Jf=1/0,ts=Jf,ns=-Jf,es=ns,rs={point:function(t,n){tns&&(ns=t);nes&&(es=n)},lineStart:Nu,lineEnd:Nu,polygonStart:Nu,polygonEnd:Nu,result:function(){var t=[[Jf,ts],[ns,es]];return ns=es=-(ts=Jf=1/0),t}};var is,os,as,us,cs=0,fs=0,ss=0,ls=0,hs=0,ds=0,ps=0,vs=0,gs=0,ys={point:_s,lineStart:bs,lineEnd:ws,polygonStart:function(){ys.lineStart=Ms,ys.lineEnd=Ns},polygonEnd:function(){ys.point=_s,ys.lineStart=bs,ys.lineEnd=ws},result:function(){var t=gs?[ps/gs,vs/gs]:ds?[ls/ds,hs/ds]:ss?[cs/ss,fs/ss]:[NaN,NaN];return cs=fs=ss=ls=hs=ds=ps=vs=gs=0,t}};function _s(t,n){cs+=t,fs+=n,++ss}function bs(){ys.point=ms}function ms(t,n){ys.point=xs,_s(as=t,us=n)}function xs(t,n){var e=t-as,r=n-us,i=bu(e*e+r*r);ls+=i*(as+t)/2,hs+=i*(us+n)/2,ds+=i,_s(as=t,us=n)}function ws(){ys.point=_s}function Ms(){ys.point=Ts}function Ns(){As(is,os)}function Ts(t,n){ys.point=As,_s(is=as=t,os=us=n)}function As(t,n){var e=t-as,r=n-us,i=bu(e*e+r*r);ls+=i*(as+t)/2,hs+=i*(us+n)/2,ds+=i,ps+=(i=us*t-as*n)*(as+t),vs+=i*(us+n),gs+=3*i,_s(as=t,us=n)}function Ss(t){this._context=t}Ss.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._context.moveTo(t,n),this._point=1;break;case 1:this._context.lineTo(t,n);break;default:this._context.moveTo(t+this._radius,n),this._context.arc(t,n,this._radius,0,au)}},result:Nu};var ks,Es,Cs,Ps,zs,Rs=Qa(),Ds={point:Nu,lineStart:function(){Ds.point=qs},lineEnd:function(){ks&&Ls(Es,Cs),Ds.point=Nu},polygonStart:function(){ks=!0},polygonEnd:function(){ks=null},result:function(){var t=+Rs;return Rs.reset(),t}};function qs(t,n){Ds.point=Ls,Es=Ps=t,Cs=zs=n}function Ls(t,n){Ps-=t,zs-=n,Rs.add(bu(Ps*Ps+zs*zs)),Ps=t,zs=n}function Us(){this._string=[]}function Os(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Bs(t){return function(n){var e=new Fs;for(var r in t)e[r]=t[r];return e.stream=n,e}}function Fs(){}function Ys(t,n,e){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),Cu(e,t.stream(rs)),n(rs.result()),null!=r&&t.clipExtent(r),t}function Is(t,n,e){return Ys(t,function(e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=Math.min(r/(e[1][0]-e[0][0]),i/(e[1][1]-e[0][1])),a=+n[0][0]+(r-o*(e[1][0]+e[0][0]))/2,u=+n[0][1]+(i-o*(e[1][1]+e[0][1]))/2;t.scale(150*o).translate([a,u])},e)}function Hs(t,n,e){return Is(t,[[0,0],n],e)}function js(t,n,e){return Ys(t,function(e){var r=+n,i=r/(e[1][0]-e[0][0]),o=(r-i*(e[1][0]+e[0][0]))/2,a=-i*e[0][1];t.scale(150*i).translate([o,a])},e)}function Vs(t,n,e){return Ys(t,function(e){var r=+n,i=r/(e[1][1]-e[0][1]),o=-i*e[0][0],a=(r-i*(e[1][1]+e[0][1]))/2;t.scale(150*i).translate([o,a])},e)}Us.prototype={_radius:4.5,_circle:Os(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._string.push("M",t,",",n),this._point=1;break;case 1:this._string.push("L",t,",",n);break;default:null==this._circle&&(this._circle=Os(this._radius)),this._string.push("M",t,",",n,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}},Fs.prototype={constructor:Fs,point:function(t,n){this.stream.point(t,n)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var Xs=16,Gs=hu(30*cu);function $s(t,n){return+n?function(t,n){function e(r,i,o,a,u,c,f,s,l,h,d,p,v,g){var y=f-r,_=s-i,b=y*y+_*_;if(b>4*n&&v--){var m=a+h,x=u+d,w=c+p,M=bu(m*m+x*x+w*w),N=wu(w/=M),T=fu(fu(w)-1)n||fu((y*E+_*C)/b-.5)>.3||a*h+u*d+c*p2?t[2]%360*cu:0,S()):[g*uu,y*uu,_*uu]},T.angle=function(t){return arguments.length?(b=t%360*cu,S()):b*uu},T.precision=function(t){return arguments.length?(a=$s(u,N=t*t),k()):bu(N)},T.fitExtent=function(t,n){return Is(T,t,n)},T.fitSize=function(t,n){return Hs(T,t,n)},T.fitWidth=function(t,n){return js(T,t,n)},T.fitHeight=function(t,n){return Vs(T,t,n)},function(){return n=t.apply(this,arguments),T.invert=n.invert&&A,S()}}function Js(t){var n=0,e=ru/3,r=Ks(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*cu,e=t[1]*cu):[n*uu,e*uu]},i}function tl(t,n){var e=yu(t),r=(e+yu(n))/2;if(fu(r)0?n<-iu+nu&&(n=-iu+nu):n>iu-nu&&(n=iu-nu);var e=i/gu(fl(n),r);return[e*yu(r*t),i-e*hu(r*t)]}return o.invert=function(t,n){var e=i-n,o=_u(r)*bu(t*t+e*e);return[lu(t,fu(e))/r*_u(e),2*su(gu(i/o,1/r))-iu]},o}function ll(t,n){return[t,n]}function hl(t,n){var e=hu(t),r=t===n?yu(t):(e-hu(n))/(n-t),i=e/r+t;if(fu(r)=0;)n+=e[r].value;else n=1;t.value=n}function El(t,n){var e,r,i,o,a,u=new Rl(t),c=+t.value&&(u.value=t.value),f=[u];for(null==n&&(n=Cl);e=f.pop();)if(c&&(e.value=+e.data.value),(i=n(e.data))&&(a=i.length))for(e.children=new Array(a),o=a-1;o>=0;--o)f.push(r=e.children[o]=new Rl(i[o])),r.parent=e,r.depth=e.depth+1;return u.eachBefore(zl)}function Cl(t){return t.children}function Pl(t){t.data=t.data.data}function zl(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function Rl(t){this.data=t,this.depth=this.height=0,this.parent=null}_l.invert=function(t,n){for(var e,r=n,i=r*r,o=i*i*i,a=0;a<12&&(o=(i=(r-=e=(r*(dl+pl*i+o*(vl+gl*i))-n)/(dl+3*pl*i+o*(7*vl+9*gl*i)))*r)*i*i,!(fu(e)nu&&--i>0);return[t/(.8707+(o=r*r)*(o*(o*o*o*(.003971-.001529*o)-.013791)-.131979)),r]},wl.invert=il(wu),Ml.invert=il(function(t){return 2*su(t)}),Nl.invert=function(t,n){return[-n,2*su(pu(t))-iu]},Rl.prototype=El.prototype={constructor:Rl,count:function(){return this.eachAfter(kl)},each:function(t){var n,e,r,i,o=this,a=[o];do{for(n=a.reverse(),a=[];o=n.pop();)if(t(o),e=o.children)for(r=0,i=e.length;r=0;--e)i.push(n[e]);return this},sum:function(t){return this.eachAfter(function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e})},sort:function(t){return this.eachBefore(function(n){n.children&&n.children.sort(t)})},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){var t=[];return this.each(function(n){t.push(n)}),t},leaves:function(){var t=[];return this.eachBefore(function(n){n.children||t.push(n)}),t},links:function(){var t=this,n=[];return t.each(function(e){e!==t&&n.push({source:e.parent,target:e})}),n},copy:function(){return El(this).eachBefore(Pl)}};var Dl=Array.prototype.slice;function ql(t){for(var n,e,r=0,i=(t=function(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}(Dl.call(t))).length,o=[];r0&&e*e>r*r+i*i}function Bl(t,n){for(var e=0;e(a*=a)?(r=(f+a-i)/(2*f),o=Math.sqrt(Math.max(0,a/f-r*r)),e.x=t.x-r*u-o*c,e.y=t.y-r*c+o*u):(r=(f+i-a)/(2*f),o=Math.sqrt(Math.max(0,i/f-r*r)),e.x=n.x+r*u-o*c,e.y=n.y+r*c+o*u)):(e.x=n.x+e.r,e.y=n.y)}function jl(t,n){var e=t.r+n.r-1e-6,r=n.x-t.x,i=n.y-t.y;return e>0&&e*e>r*r+i*i}function Vl(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function Xl(t){this._=t,this.next=null,this.previous=null}function Gl(t){if(!(i=t.length))return 0;var n,e,r,i,o,a,u,c,f,s,l;if((n=t[0]).x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;Hl(e,n,r=t[2]),n=new Xl(n),e=new Xl(e),r=new Xl(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(u=3;uh&&(h=u),g=s*s*v,(d=Math.max(h/g,g/l))>p){s-=u;break}p=d}y.push(a={value:s,dice:c1?n:1)},e}(gh);var bh=function t(n){function e(t,e,r,i,o){if((a=t._squarify)&&a.ratio===n)for(var a,u,c,f,s,l=-1,h=a.length,d=t.value;++l1?n:1)},e}(gh);function mh(t,n,e){return(n[0]-t[0])*(e[1]-t[1])-(n[1]-t[1])*(e[0]-t[0])}function xh(t,n){return t[0]-n[0]||t[1]-n[1]}function wh(t){for(var n=t.length,e=[0,1],r=2,i=2;i1&&mh(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function Mh(){return Math.random()}var Nh=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,1===arguments.length?(e=t,t=0):e-=t,function(){return n()*e+t}}return e.source=t,e}(Mh),Th=function t(n){function e(t,e){var r,i;return t=null==t?0:+t,e=null==e?1:+e,function(){var o;if(null!=r)o=r,r=null;else do{r=2*n()-1,o=2*n()-1,i=r*r+o*o}while(!i||i>1);return t+e*o*Math.sqrt(-2*Math.log(i)/i)}}return e.source=t,e}(Mh),Ah=function t(n){function e(){var t=Th.source(n).apply(this,arguments);return function(){return Math.exp(t())}}return e.source=t,e}(Mh),Sh=function t(n){function e(t){return function(){for(var e=0,r=0;rr&&(n=e,e=r,r=n),function(t){return Math.max(e,Math.min(r,t))}}function Hh(t,n,e){var r=t[0],i=t[1],o=n[0],a=n[1];return i2?jh:Hh,i=o=null,l}function l(n){return isNaN(n=+n)?e:(i||(i=r(a.map(t),u,c)))(t(f(n)))}return l.invert=function(e){return f(n((o||(o=r(u,a.map(t),me)))(e)))},l.domain=function(t){return arguments.length?(a=Rh.call(t,Oh),f===Fh||(f=Ih(a)),s()):a.slice()},l.range=function(t){return arguments.length?(u=Dh.call(t),s()):u.slice()},l.rangeRound=function(t){return u=Dh.call(t),c=Ae,s()},l.clamp=function(t){return arguments.length?(f=t?Ih(a):Fh,l):f!==Fh},l.interpolate=function(t){return arguments.length?(c=t,s()):c},l.unknown=function(t){return arguments.length?(e=t,l):e},function(e,r){return t=e,n=r,s()}}function Gh(t,n){return Xh()(t,n)}function $h(n,e,r,i){var o,a=w(n,e,r);switch((i=Oa(null==i?",f":i)).type){case"s":var u=Math.max(Math.abs(n),Math.abs(e));return null!=i.precision||isNaN(o=Wa(a,u))||(i.precision=o),t.formatPrefix(i,u);case"":case"e":case"g":case"p":case"r":null!=i.precision||isNaN(o=Za(a,Math.max(Math.abs(n),Math.abs(e))))||(i.precision=o-("e"===i.type));break;case"f":case"%":null!=i.precision||isNaN(o=$a(a))||(i.precision=o-2*("%"===i.type))}return t.format(i)}function Wh(t){var n=t.domain;return t.ticks=function(t){var e=n();return m(e[0],e[e.length-1],null==t?10:t)},t.tickFormat=function(t,e){var r=n();return $h(r[0],r[r.length-1],null==t?10:t,e)},t.nice=function(e){null==e&&(e=10);var r,i=n(),o=0,a=i.length-1,u=i[o],c=i[a];return c0?r=x(u=Math.floor(u/r)*r,c=Math.ceil(c/r)*r,e):r<0&&(r=x(u=Math.ceil(u*r)/r,c=Math.floor(c*r)/r,e)),r>0?(i[o]=Math.floor(u/r)*r,i[a]=Math.ceil(c/r)*r,n(i)):r<0&&(i[o]=Math.ceil(u*r)/r,i[a]=Math.floor(c*r)/r,n(i)),t},t}function Zh(t,n){var e,r=0,i=(t=t.slice()).length-1,o=t[r],a=t[i];return a0){for(;hc)break;v.push(l)}}else for(;h=1;--s)if(!((l=f*s)c)break;v.push(l)}}else v=m(h,d,Math.min(d-h,p)).map(r);return n?v.reverse():v},i.tickFormat=function(n,o){if(null==o&&(o=10===a?".0e":","),"function"!=typeof o&&(o=t.format(o)),n===1/0)return o;null==n&&(n=10);var u=Math.max(1,a*n/i.ticks().length);return function(t){var n=t/r(Math.round(e(t)));return n*a0))return u;do{u.push(a=new Date(+e)),n(e,o),t(e)}while(a=n)for(;t(n),!e(n);)n.setTime(n-1)},function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););})},e&&(i.count=function(n,r){return hd.setTime(+n),dd.setTime(+r),t(hd),t(dd),Math.floor(e(hd,dd))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0}:function(n){return i.count(0,n)%t==0}):i:null}),i}var vd=pd(function(){},function(t,n){t.setTime(+t+n)},function(t,n){return n-t});vd.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?pd(function(n){n.setTime(Math.floor(n/t)*t)},function(n,e){n.setTime(+n+e*t)},function(n,e){return(e-n)/t}):vd:null};var gd=vd.range,yd=6e4,_d=6048e5,bd=pd(function(t){t.setTime(t-t.getMilliseconds())},function(t,n){t.setTime(+t+1e3*n)},function(t,n){return(n-t)/1e3},function(t){return t.getUTCSeconds()}),md=bd.range,xd=pd(function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())},function(t,n){t.setTime(+t+n*yd)},function(t,n){return(n-t)/yd},function(t){return t.getMinutes()}),wd=xd.range,Md=pd(function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-t.getMinutes()*yd)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getHours()}),Nd=Md.range,Td=pd(function(t){t.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*yd)/864e5},function(t){return t.getDate()-1}),Ad=Td.range;function Sd(t){return pd(function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+7*n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*yd)/_d})}var kd=Sd(0),Ed=Sd(1),Cd=Sd(2),Pd=Sd(3),zd=Sd(4),Rd=Sd(5),Dd=Sd(6),qd=kd.range,Ld=Ed.range,Ud=Cd.range,Od=Pd.range,Bd=zd.range,Fd=Rd.range,Yd=Dd.range,Id=pd(function(t){t.setDate(1),t.setHours(0,0,0,0)},function(t,n){t.setMonth(t.getMonth()+n)},function(t,n){return n.getMonth()-t.getMonth()+12*(n.getFullYear()-t.getFullYear())},function(t){return t.getMonth()}),Hd=Id.range,jd=pd(function(t){t.setMonth(0,1),t.setHours(0,0,0,0)},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t,n){return n.getFullYear()-t.getFullYear()},function(t){return t.getFullYear()});jd.every=function(t){return isFinite(t=Math.floor(t))&&t>0?pd(function(n){n.setFullYear(Math.floor(n.getFullYear()/t)*t),n.setMonth(0,1),n.setHours(0,0,0,0)},function(n,e){n.setFullYear(n.getFullYear()+e*t)}):null};var Vd=jd.range,Xd=pd(function(t){t.setUTCSeconds(0,0)},function(t,n){t.setTime(+t+n*yd)},function(t,n){return(n-t)/yd},function(t){return t.getUTCMinutes()}),Gd=Xd.range,$d=pd(function(t){t.setUTCMinutes(0,0,0)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getUTCHours()}),Wd=$d.range,Zd=pd(function(t){t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+n)},function(t,n){return(n-t)/864e5},function(t){return t.getUTCDate()-1}),Qd=Zd.range;function Kd(t){return pd(function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+7*n)},function(t,n){return(n-t)/_d})}var Jd=Kd(0),tp=Kd(1),np=Kd(2),ep=Kd(3),rp=Kd(4),ip=Kd(5),op=Kd(6),ap=Jd.range,up=tp.range,cp=np.range,fp=ep.range,sp=rp.range,lp=ip.range,hp=op.range,dp=pd(function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCMonth(t.getUTCMonth()+n)},function(t,n){return n.getUTCMonth()-t.getUTCMonth()+12*(n.getUTCFullYear()-t.getUTCFullYear())},function(t){return t.getUTCMonth()}),pp=dp.range,vp=pd(function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCFullYear(t.getUTCFullYear()+n)},function(t,n){return n.getUTCFullYear()-t.getUTCFullYear()},function(t){return t.getUTCFullYear()});vp.every=function(t){return isFinite(t=Math.floor(t))&&t>0?pd(function(n){n.setUTCFullYear(Math.floor(n.getUTCFullYear()/t)*t),n.setUTCMonth(0,1),n.setUTCHours(0,0,0,0)},function(n,e){n.setUTCFullYear(n.getUTCFullYear()+e*t)}):null};var gp=vp.range;function yp(t){if(0<=t.y&&t.y<100){var n=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return n.setFullYear(t.y),n}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function _p(t){if(0<=t.y&&t.y<100){var n=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return n.setUTCFullYear(t.y),n}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function bp(t,n,e){return{y:t,m:n,d:e,H:0,M:0,S:0,L:0}}function mp(t){var n=t.dateTime,e=t.date,r=t.time,i=t.periods,o=t.days,a=t.shortDays,u=t.months,c=t.shortMonths,f=kp(i),s=Ep(i),l=kp(o),h=Ep(o),d=kp(a),p=Ep(a),v=kp(u),g=Ep(u),y=kp(c),_=Ep(c),b={a:function(t){return a[t.getDay()]},A:function(t){return o[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return u[t.getMonth()]},c:null,d:Zp,e:Zp,f:nv,H:Qp,I:Kp,j:Jp,L:tv,m:ev,M:rv,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:Pv,s:zv,S:iv,u:ov,U:av,V:uv,w:cv,W:fv,x:null,X:null,y:sv,Y:lv,Z:hv,"%":Cv},m={a:function(t){return a[t.getUTCDay()]},A:function(t){return o[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return u[t.getUTCMonth()]},c:null,d:dv,e:dv,f:_v,H:pv,I:vv,j:gv,L:yv,m:bv,M:mv,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:Pv,s:zv,S:xv,u:wv,U:Mv,V:Nv,w:Tv,W:Av,x:null,X:null,y:Sv,Y:kv,Z:Ev,"%":Cv},x={a:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=p[r[0].toLowerCase()],e+r[0].length):-1},A:function(t,n,e){var r=l.exec(n.slice(e));return r?(t.w=h[r[0].toLowerCase()],e+r[0].length):-1},b:function(t,n,e){var r=y.exec(n.slice(e));return r?(t.m=_[r[0].toLowerCase()],e+r[0].length):-1},B:function(t,n,e){var r=v.exec(n.slice(e));return r?(t.m=g[r[0].toLowerCase()],e+r[0].length):-1},c:function(t,e,r){return N(t,n,e,r)},d:Fp,e:Fp,f:Xp,H:Ip,I:Ip,j:Yp,L:Vp,m:Bp,M:Hp,p:function(t,n,e){var r=f.exec(n.slice(e));return r?(t.p=s[r[0].toLowerCase()],e+r[0].length):-1},q:Op,Q:$p,s:Wp,S:jp,u:Pp,U:zp,V:Rp,w:Cp,W:Dp,x:function(t,n,r){return N(t,e,n,r)},X:function(t,n,e){return N(t,r,n,e)},y:Lp,Y:qp,Z:Up,"%":Gp};function w(t,n){return function(e){var r,i,o,a=[],u=-1,c=0,f=t.length;for(e instanceof Date||(e=new Date(+e));++u53)return null;"w"in o||(o.w=1),"Z"in o?(i=(r=_p(bp(o.y,0,1))).getUTCDay(),r=i>4||0===i?tp.ceil(r):tp(r),r=Zd.offset(r,7*(o.V-1)),o.y=r.getUTCFullYear(),o.m=r.getUTCMonth(),o.d=r.getUTCDate()+(o.w+6)%7):(i=(r=yp(bp(o.y,0,1))).getDay(),r=i>4||0===i?Ed.ceil(r):Ed(r),r=Td.offset(r,7*(o.V-1)),o.y=r.getFullYear(),o.m=r.getMonth(),o.d=r.getDate()+(o.w+6)%7)}else("W"in o||"U"in o)&&("w"in o||(o.w="u"in o?o.u%7:"W"in o?1:0),i="Z"in o?_p(bp(o.y,0,1)).getUTCDay():yp(bp(o.y,0,1)).getDay(),o.m=0,o.d="W"in o?(o.w+6)%7+7*o.W-(i+5)%7:o.w+7*o.U-(i+6)%7);return"Z"in o?(o.H+=o.Z/100|0,o.M+=o.Z%100,_p(o)):yp(o)}}function N(t,n,e,r){for(var i,o,a=0,u=n.length,c=e.length;a=c)return-1;if(37===(i=n.charCodeAt(a++))){if(i=n.charAt(a++),!(o=x[i in wp?n.charAt(a++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}return b.x=w(e,b),b.X=w(r,b),b.c=w(n,b),m.x=w(e,m),m.X=w(r,m),m.c=w(n,m),{format:function(t){var n=w(t+="",b);return n.toString=function(){return t},n},parse:function(t){var n=M(t+="",!1);return n.toString=function(){return t},n},utcFormat:function(t){var n=w(t+="",m);return n.toString=function(){return t},n},utcParse:function(t){var n=M(t+="",!0);return n.toString=function(){return t},n}}}var xp,wp={"-":"",_:" ",0:"0"},Mp=/^\s*\d+/,Np=/^%/,Tp=/[\\^$*+?|[\]().{}]/g;function Ap(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o68?1900:2e3),e+r[0].length):-1}function Up(t,n,e){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function Op(t,n,e){var r=Mp.exec(n.slice(e,e+1));return r?(t.q=3*r[0]-3,e+r[0].length):-1}function Bp(t,n,e){var r=Mp.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function Fp(t,n,e){var r=Mp.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function Yp(t,n,e){var r=Mp.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function Ip(t,n,e){var r=Mp.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Hp(t,n,e){var r=Mp.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function jp(t,n,e){var r=Mp.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function Vp(t,n,e){var r=Mp.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function Xp(t,n,e){var r=Mp.exec(n.slice(e,e+6));return r?(t.L=Math.floor(r[0]/1e3),e+r[0].length):-1}function Gp(t,n,e){var r=Np.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function $p(t,n,e){var r=Mp.exec(n.slice(e));return r?(t.Q=+r[0],e+r[0].length):-1}function Wp(t,n,e){var r=Mp.exec(n.slice(e));return r?(t.s=+r[0],e+r[0].length):-1}function Zp(t,n){return Ap(t.getDate(),n,2)}function Qp(t,n){return Ap(t.getHours(),n,2)}function Kp(t,n){return Ap(t.getHours()%12||12,n,2)}function Jp(t,n){return Ap(1+Td.count(jd(t),t),n,3)}function tv(t,n){return Ap(t.getMilliseconds(),n,3)}function nv(t,n){return tv(t,n)+"000"}function ev(t,n){return Ap(t.getMonth()+1,n,2)}function rv(t,n){return Ap(t.getMinutes(),n,2)}function iv(t,n){return Ap(t.getSeconds(),n,2)}function ov(t){var n=t.getDay();return 0===n?7:n}function av(t,n){return Ap(kd.count(jd(t)-1,t),n,2)}function uv(t,n){var e=t.getDay();return t=e>=4||0===e?zd(t):zd.ceil(t),Ap(zd.count(jd(t),t)+(4===jd(t).getDay()),n,2)}function cv(t){return t.getDay()}function fv(t,n){return Ap(Ed.count(jd(t)-1,t),n,2)}function sv(t,n){return Ap(t.getFullYear()%100,n,2)}function lv(t,n){return Ap(t.getFullYear()%1e4,n,4)}function hv(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+Ap(n/60|0,"0",2)+Ap(n%60,"0",2)}function dv(t,n){return Ap(t.getUTCDate(),n,2)}function pv(t,n){return Ap(t.getUTCHours(),n,2)}function vv(t,n){return Ap(t.getUTCHours()%12||12,n,2)}function gv(t,n){return Ap(1+Zd.count(vp(t),t),n,3)}function yv(t,n){return Ap(t.getUTCMilliseconds(),n,3)}function _v(t,n){return yv(t,n)+"000"}function bv(t,n){return Ap(t.getUTCMonth()+1,n,2)}function mv(t,n){return Ap(t.getUTCMinutes(),n,2)}function xv(t,n){return Ap(t.getUTCSeconds(),n,2)}function wv(t){var n=t.getUTCDay();return 0===n?7:n}function Mv(t,n){return Ap(Jd.count(vp(t)-1,t),n,2)}function Nv(t,n){var e=t.getUTCDay();return t=e>=4||0===e?rp(t):rp.ceil(t),Ap(rp.count(vp(t),t)+(4===vp(t).getUTCDay()),n,2)}function Tv(t){return t.getUTCDay()}function Av(t,n){return Ap(tp.count(vp(t)-1,t),n,2)}function Sv(t,n){return Ap(t.getUTCFullYear()%100,n,2)}function kv(t,n){return Ap(t.getUTCFullYear()%1e4,n,4)}function Ev(){return"+0000"}function Cv(){return"%"}function Pv(t){return+t}function zv(t){return Math.floor(+t/1e3)}function Rv(n){return xp=mp(n),t.timeFormat=xp.format,t.timeParse=xp.parse,t.utcFormat=xp.utcFormat,t.utcParse=xp.utcParse,xp}Rv({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var Dv=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat("%Y-%m-%dT%H:%M:%S.%LZ");var qv=+new Date("2000-01-01T00:00:00.000Z")?function(t){var n=new Date(t);return isNaN(n)?null:n}:t.utcParse("%Y-%m-%dT%H:%M:%S.%LZ"),Lv=1e3,Uv=60*Lv,Ov=60*Uv,Bv=24*Ov,Fv=7*Bv,Yv=30*Bv,Iv=365*Bv;function Hv(t){return new Date(t)}function jv(t){return t instanceof Date?+t:+new Date(+t)}function Vv(t,n,r,i,o,a,u,c,f){var s=Gh(Fh,Fh),l=s.invert,h=s.domain,d=f(".%L"),p=f(":%S"),v=f("%I:%M"),g=f("%I %p"),y=f("%a %d"),_=f("%b %d"),b=f("%B"),m=f("%Y"),x=[[u,1,Lv],[u,5,5*Lv],[u,15,15*Lv],[u,30,30*Lv],[a,1,Uv],[a,5,5*Uv],[a,15,15*Uv],[a,30,30*Uv],[o,1,Ov],[o,3,3*Ov],[o,6,6*Ov],[o,12,12*Ov],[i,1,Bv],[i,2,2*Bv],[r,1,Fv],[n,1,Yv],[n,3,3*Yv],[t,1,Iv]];function M(e){return(u(e)=1?Py:t<=-1?-Py:Math.asin(t)}function Dy(t){return t.innerRadius}function qy(t){return t.outerRadius}function Ly(t){return t.startAngle}function Uy(t){return t.endAngle}function Oy(t){return t&&t.padAngle}function By(t,n,e,r,i,o,a){var u=t-e,c=n-r,f=(a?o:-o)/ky(u*u+c*c),s=f*c,l=-f*u,h=t+s,d=n+l,p=e+s,v=r+l,g=(h+p)/2,y=(d+v)/2,_=p-h,b=v-d,m=_*_+b*b,x=i-o,w=h*v-p*d,M=(b<0?-1:1)*ky(Ty(0,x*x*m-w*w)),N=(w*b-_*M)/m,T=(-w*_-b*M)/m,A=(w*b+_*M)/m,S=(-w*_+b*M)/m,k=N-g,E=T-y,C=A-g,P=S-y;return k*k+E*E>C*C+P*P&&(N=A,T=S),{cx:N,cy:T,x01:-s,y01:-l,x11:N*(i/x-1),y11:T*(i/x-1)}}function Fy(t){this._context=t}function Yy(t){return new Fy(t)}function Iy(t){return t[0]}function Hy(t){return t[1]}function jy(){var t=Iy,n=Hy,e=xy(!0),r=null,i=Yy,o=null;function a(a){var u,c,f,s=a.length,l=!1;for(null==r&&(o=i(f=no())),u=0;u<=s;++u)!(u=s;--l)u.point(g[l],y[l]);u.lineEnd(),u.areaEnd()}v&&(g[f]=+t(h,f,c),y[f]=+e(h,f,c),u.point(n?+n(h,f,c):g[f],r?+r(h,f,c):y[f]))}if(d)return u=null,d+""||null}function f(){return jy().defined(i).curve(a).context(o)}return c.x=function(e){return arguments.length?(t="function"==typeof e?e:xy(+e),n=null,c):t},c.x0=function(n){return arguments.length?(t="function"==typeof n?n:xy(+n),c):t},c.x1=function(t){return arguments.length?(n=null==t?null:"function"==typeof t?t:xy(+t),c):n},c.y=function(t){return arguments.length?(e="function"==typeof t?t:xy(+t),r=null,c):e},c.y0=function(t){return arguments.length?(e="function"==typeof t?t:xy(+t),c):e},c.y1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:xy(+t),c):r},c.lineX0=c.lineY0=function(){return f().x(t).y(e)},c.lineY1=function(){return f().x(t).y(r)},c.lineX1=function(){return f().x(n).y(e)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:xy(!!t),c):i},c.curve=function(t){return arguments.length?(a=t,null!=o&&(u=a(o)),c):a},c.context=function(t){return arguments.length?(null==t?o=u=null:u=a(o=t),c):o},c}function Xy(t,n){return nt?1:n>=t?0:NaN}function Gy(t){return t}Fy.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._context.lineTo(t,n)}}};var $y=Zy(Yy);function Wy(t){this._curve=t}function Zy(t){function n(n){return new Wy(t(n))}return n._curve=t,n}function Qy(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(Zy(t)):n()._curve},t}function Ky(){return Qy(jy().curve($y))}function Jy(){var t=Vy().curve($y),n=t.curve,e=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Qy(e())},delete t.lineX0,t.lineEndAngle=function(){return Qy(r())},delete t.lineX1,t.lineInnerRadius=function(){return Qy(i())},delete t.lineY0,t.lineOuterRadius=function(){return Qy(o())},delete t.lineY1,t.curve=function(t){return arguments.length?n(Zy(t)):n()._curve},t}function t_(t,n){return[(n=+n)*Math.cos(t-=Math.PI/2),n*Math.sin(t)]}Wy.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,n){this._curve.point(n*Math.sin(t),n*-Math.cos(t))}};var n_=Array.prototype.slice;function e_(t){return t.source}function r_(t){return t.target}function i_(t){var n=e_,e=r_,r=Iy,i=Hy,o=null;function a(){var a,u=n_.call(arguments),c=n.apply(this,u),f=e.apply(this,u);if(o||(o=a=no()),t(o,+r.apply(this,(u[0]=c,u)),+i.apply(this,u),+r.apply(this,(u[0]=f,u)),+i.apply(this,u)),a)return o=null,a+""||null}return a.source=function(t){return arguments.length?(n=t,a):n},a.target=function(t){return arguments.length?(e=t,a):e},a.x=function(t){return arguments.length?(r="function"==typeof t?t:xy(+t),a):r},a.y=function(t){return arguments.length?(i="function"==typeof t?t:xy(+t),a):i},a.context=function(t){return arguments.length?(o=null==t?null:t,a):o},a}function o_(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function a_(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function u_(t,n,e,r,i){var o=t_(n,e),a=t_(n,e=(e+i)/2),u=t_(r,e),c=t_(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(a[0],a[1],u[0],u[1],c[0],c[1])}var c_={draw:function(t,n){var e=Math.sqrt(n/Cy);t.moveTo(e,0),t.arc(0,0,e,0,zy)}},f_={draw:function(t,n){var e=Math.sqrt(n/5)/2;t.moveTo(-3*e,-e),t.lineTo(-e,-e),t.lineTo(-e,-3*e),t.lineTo(e,-3*e),t.lineTo(e,-e),t.lineTo(3*e,-e),t.lineTo(3*e,e),t.lineTo(e,e),t.lineTo(e,3*e),t.lineTo(-e,3*e),t.lineTo(-e,e),t.lineTo(-3*e,e),t.closePath()}},s_=Math.sqrt(1/3),l_=2*s_,h_={draw:function(t,n){var e=Math.sqrt(n/l_),r=e*s_;t.moveTo(0,-e),t.lineTo(r,0),t.lineTo(0,e),t.lineTo(-r,0),t.closePath()}},d_=Math.sin(Cy/10)/Math.sin(7*Cy/10),p_=Math.sin(zy/10)*d_,v_=-Math.cos(zy/10)*d_,g_={draw:function(t,n){var e=Math.sqrt(.8908130915292852*n),r=p_*e,i=v_*e;t.moveTo(0,-e),t.lineTo(r,i);for(var o=1;o<5;++o){var a=zy*o/5,u=Math.cos(a),c=Math.sin(a);t.lineTo(c*e,-u*e),t.lineTo(u*r-c*i,c*r+u*i)}t.closePath()}},y_={draw:function(t,n){var e=Math.sqrt(n),r=-e/2;t.rect(r,r,e,e)}},__=Math.sqrt(3),b_={draw:function(t,n){var e=-Math.sqrt(n/(3*__));t.moveTo(0,2*e),t.lineTo(-__*e,-e),t.lineTo(__*e,-e),t.closePath()}},m_=Math.sqrt(3)/2,x_=1/Math.sqrt(12),w_=3*(x_/2+1),M_={draw:function(t,n){var e=Math.sqrt(n/w_),r=e/2,i=e*x_,o=r,a=e*x_+e,u=-o,c=a;t.moveTo(r,i),t.lineTo(o,a),t.lineTo(u,c),t.lineTo(-.5*r-m_*i,m_*r+-.5*i),t.lineTo(-.5*o-m_*a,m_*o+-.5*a),t.lineTo(-.5*u-m_*c,m_*u+-.5*c),t.lineTo(-.5*r+m_*i,-.5*i-m_*r),t.lineTo(-.5*o+m_*a,-.5*a-m_*o),t.lineTo(-.5*u+m_*c,-.5*c-m_*u),t.closePath()}},N_=[c_,f_,h_,y_,g_,b_,M_];function T_(){}function A_(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function S_(t){this._context=t}function k_(t){this._context=t}function E_(t){this._context=t}function C_(t,n){this._basis=new S_(t),this._beta=n}S_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:A_(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:A_(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},k_.prototype={areaStart:T_,areaEnd:T_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x2=t,this._y2=n;break;case 1:this._point=2,this._x3=t,this._y3=n;break;case 2:this._point=3,this._x4=t,this._y4=n,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+n)/6);break;default:A_(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},E_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var e=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+n)/6;this._line?this._context.lineTo(e,r):this._context.moveTo(e,r);break;case 3:this._point=4;default:A_(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},C_.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,n=this._y,e=t.length-1;if(e>0)for(var r,i=t[0],o=n[0],a=t[e]-i,u=n[e]-o,c=-1;++c<=e;)r=c/e,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*a),this._beta*n[c]+(1-this._beta)*(o+r*u));this._x=this._y=null,this._basis.lineEnd()},point:function(t,n){this._x.push(+t),this._y.push(+n)}};var P_=function t(n){function e(t){return 1===n?new S_(t):new C_(t,n)}return e.beta=function(n){return t(+n)},e}(.85);function z_(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function R_(t,n){this._context=t,this._k=(1-n)/6}R_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:z_(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2,this._x1=t,this._y1=n;break;case 2:this._point=3;default:z_(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var D_=function t(n){function e(t){return new R_(t,n)}return e.tension=function(n){return t(+n)},e}(0);function q_(t,n){this._context=t,this._k=(1-n)/6}q_.prototype={areaStart:T_,areaEnd:T_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:z_(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var L_=function t(n){function e(t){return new q_(t,n)}return e.tension=function(n){return t(+n)},e}(0);function U_(t,n){this._context=t,this._k=(1-n)/6}U_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:z_(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var O_=function t(n){function e(t){return new U_(t,n)}return e.tension=function(n){return t(+n)},e}(0);function B_(t,n,e){var r=t._x1,i=t._y1,o=t._x2,a=t._y2;if(t._l01_a>Ey){var u=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*u-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*u-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>Ey){var f=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,s=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*f+t._x1*t._l23_2a-n*t._l12_2a)/s,a=(a*f+t._y1*t._l23_2a-e*t._l12_2a)/s}t._context.bezierCurveTo(r,i,o,a,t._x2,t._y2)}function F_(t,n){this._context=t,this._alpha=n}F_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3;default:B_(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Y_=function t(n){function e(t){return n?new F_(t,n):new R_(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function I_(t,n){this._context=t,this._alpha=n}I_.prototype={areaStart:T_,areaEnd:T_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:B_(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var H_=function t(n){function e(t){return n?new I_(t,n):new q_(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function j_(t,n){this._context=t,this._alpha=n}j_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:B_(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var V_=function t(n){function e(t){return n?new j_(t,n):new U_(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function X_(t){this._context=t}function G_(t){return t<0?-1:1}function $_(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),a=(e-t._y1)/(i||r<0&&-0),u=(o*i+a*r)/(r+i);return(G_(o)+G_(a))*Math.min(Math.abs(o),Math.abs(a),.5*Math.abs(u))||0}function W_(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function Z_(t,n,e){var r=t._x0,i=t._y0,o=t._x1,a=t._y1,u=(o-r)/3;t._context.bezierCurveTo(r+u,i+u*n,o-u,a-u*e,o,a)}function Q_(t){this._context=t}function K_(t){this._context=new J_(t)}function J_(t){this._context=t}function tb(t){this._context=t}function nb(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),a=new Array(r);for(i[0]=0,o[0]=2,a[0]=t[0]+2*t[1],n=1;n=0;--n)i[n]=(a[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n1)for(var e,r,i,o=1,a=t[n[0]],u=a.length;o=0;)e[n]=n;return e}function ob(t,n){return t[n]}function ab(t){var n=t.map(ub);return ib(t).sort(function(t,e){return n[t]-n[e]})}function ub(t){for(var n,e=-1,r=0,i=t.length,o=-1/0;++eo&&(o=n,r=e);return r}function cb(t){var n=t.map(fb);return ib(t).sort(function(t,e){return n[t]-n[e]})}function fb(t){for(var n,e=0,r=-1,i=t.length;++r0)){if(o/=h,h<0){if(o0){if(o>l)return;o>s&&(s=o)}if(o=r-c,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>s&&(s=o)}else if(h>0){if(o0)){if(o/=d,d<0){if(o0){if(o>l)return;o>s&&(s=o)}if(o=i-f,d||!(o<0)){if(o/=d,d<0){if(o>l)return;o>s&&(s=o)}else if(d>0){if(o0||l<1)||(s>0&&(t[0]=[c+s*h,f+s*d]),l<1&&(t[1]=[c+l*h,f+l*d]),!0)}}}}}function wb(t,n,e,r,i){var o=t[1];if(o)return!0;var a,u,c=t[0],f=t.left,s=t.right,l=f[0],h=f[1],d=s[0],p=s[1],v=(l+d)/2,g=(h+p)/2;if(p===h){if(v=r)return;if(l>d){if(c){if(c[1]>=i)return}else c=[v,e];o=[v,i]}else{if(c){if(c[1]1)if(l>d){if(c){if(c[1]>=i)return}else c=[(e-u)/a,e];o=[(i-u)/a,i]}else{if(c){if(c[1]=r)return}else c=[n,a*n+u];o=[r,a*r+u]}else{if(c){if(c[0]=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,n),this._context.lineTo(t,n);else{var e=this._x*(1-this._t)+t*this._t;this._context.lineTo(e,this._y),this._context.lineTo(e,n)}}this._x=t,this._y=n}},db.prototype={constructor:db,insert:function(t,n){var e,r,i;if(t){if(n.P=t,n.N=t.N,t.N&&(t.N.P=n),t.N=n,t.R){for(t=t.R;t.L;)t=t.L;t.L=n}else t.R=n;e=t}else this._?(t=yb(this._),n.P=null,n.N=t,t.P=t.L=n,e=t):(n.P=n.N=null,this._=n,e=null);for(n.L=n.R=null,n.U=e,n.C=!0,t=n;e&&e.C;)e===(r=e.U).L?(i=r.R)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.R&&(vb(this,e),e=(t=e).U),e.C=!1,r.C=!0,gb(this,r)):(i=r.L)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.L&&(gb(this,e),e=(t=e).U),e.C=!1,r.C=!0,vb(this,r)),e=t.U;this._.C=!1},remove:function(t){t.N&&(t.N.P=t.P),t.P&&(t.P.N=t.N),t.N=t.P=null;var n,e,r,i=t.U,o=t.L,a=t.R;if(e=o?a?yb(a):o:a,i?i.L===t?i.L=e:i.R=e:this._=e,o&&a?(r=e.C,e.C=t.C,e.L=o,o.U=e,e!==a?(i=e.U,e.U=t.U,t=e.R,i.L=t,e.R=a,a.U=e):(e.U=i,i=e,t=e.R)):(r=t.C,t=e),t&&(t.U=i),!r)if(t&&t.C)t.C=!1;else{do{if(t===this._)break;if(t===i.L){if((n=i.R).C&&(n.C=!1,i.C=!0,vb(this,i),n=i.R),n.L&&n.L.C||n.R&&n.R.C){n.R&&n.R.C||(n.L.C=!1,n.C=!0,gb(this,n),n=i.R),n.C=i.C,i.C=n.R.C=!1,vb(this,i),t=this._;break}}else if((n=i.L).C&&(n.C=!1,i.C=!0,gb(this,i),n=i.L),n.L&&n.L.C||n.R&&n.R.C){n.L&&n.L.C||(n.R.C=!1,n.C=!0,vb(this,n),n=i.L),n.C=i.C,i.C=n.L.C=!1,gb(this,i),t=this._;break}n.C=!0,t=i,i=i.U}while(!t.C);t&&(t.C=!1)}}};var Ab,Sb=[];function kb(){pb(this),this.x=this.y=this.arc=this.site=this.cy=null}function Eb(t){var n=t.P,e=t.N;if(n&&e){var r=n.site,i=t.site,o=e.site;if(r!==o){var a=i[0],u=i[1],c=r[0]-a,f=r[1]-u,s=o[0]-a,l=o[1]-u,h=2*(c*l-f*s);if(!(h>=-jb)){var d=c*c+f*f,p=s*s+l*l,v=(l*d-f*p)/h,g=(c*p-s*d)/h,y=Sb.pop()||new kb;y.arc=t,y.site=i,y.x=v+a,y.y=(y.cy=g+u)+Math.sqrt(v*v+g*g),t.circle=y;for(var _=null,b=Yb._;b;)if(y.yHb)u=u.L;else{if(!((i=o-Ob(u,a))>Hb)){r>-Hb?(n=u.P,e=u):i>-Hb?(n=u,e=u.N):n=e=u;break}if(!u.R){n=u;break}u=u.R}!function(t){Fb[t.index]={site:t,halfedges:[]}}(t);var c=Rb(t);if(Bb.insert(n,c),n||e){if(n===e)return Cb(n),e=Rb(n.site),Bb.insert(c,e),c.edge=e.edge=_b(n.site,c.site),Eb(n),void Eb(e);if(e){Cb(n),Cb(e);var f=n.site,s=f[0],l=f[1],h=t[0]-s,d=t[1]-l,p=e.site,v=p[0]-s,g=p[1]-l,y=2*(h*g-d*v),_=h*h+d*d,b=v*v+g*g,m=[(g*_-d*b)/y+s,(h*b-v*_)/y+l];mb(e.edge,f,p,m),c.edge=_b(f,t,null,m),e.edge=_b(t,p,null,m),Eb(n),Eb(e)}else c.edge=_b(n.site,c.site)}}function Ub(t,n){var e=t.site,r=e[0],i=e[1],o=i-n;if(!o)return r;var a=t.P;if(!a)return-1/0;var u=(e=a.site)[0],c=e[1],f=c-n;if(!f)return u;var s=u-r,l=1/o-1/f,h=s/f;return l?(-h+Math.sqrt(h*h-2*l*(s*s/(-2*f)-c+f/2+i-o/2)))/l+r:(r+u)/2}function Ob(t,n){var e=t.N;if(e)return Ub(e,n);var r=t.site;return r[1]===n?r[0]:1/0}var Bb,Fb,Yb,Ib,Hb=1e-6,jb=1e-12;function Vb(t,n,e){return(t[0]-e[0])*(n[1]-t[1])-(t[0]-n[0])*(e[1]-t[1])}function Xb(t,n){return n[1]-t[1]||n[0]-t[0]}function Gb(t,n){var e,r,i,o=t.sort(Xb).pop();for(Ib=[],Fb=new Array(t.length),Bb=new db,Yb=new db;;)if(i=Ab,o&&(!i||o[1]Hb||Math.abs(i[0][1]-i[1][1])>Hb)||delete Ib[o]}(a,u,c,f),function(t,n,e,r){var i,o,a,u,c,f,s,l,h,d,p,v,g=Fb.length,y=!0;for(i=0;iHb||Math.abs(v-h)>Hb)&&(c.splice(u,0,Ib.push(bb(a,d,Math.abs(p-t)Hb?[t,Math.abs(l-t)Hb?[Math.abs(h-r)Hb?[e,Math.abs(l-e)Hb?[Math.abs(h-n)=u)return null;var c=t-i.site[0],f=n-i.site[1],s=c*c+f*f;do{i=o.cells[r=a],a=null,i.halfedges.forEach(function(e){var r=o.edges[e],u=r.left;if(u!==i.site&&u||(u=r.right)){var c=t-u[0],f=n-u[1],l=c*c+f*f;lr?(r+i)/2:Math.min(0,r)||Math.max(0,i),a>o?(o+a)/2:Math.min(0,o)||Math.max(0,a))}Kb.prototype=Zb.prototype,t.FormatSpecifier=Ba,t.active=function(t,n){var e,r,i=t.__transition;if(i)for(r in n=null==n?null:n+"",i)if((e=i[r]).state>xr&&e.name===n)return new Ur([[t]],yi,n,+r);return null},t.arc=function(){var t=Dy,n=qy,e=xy(0),r=null,i=Ly,o=Uy,a=Oy,u=null;function c(){var c,f,s=+t.apply(this,arguments),l=+n.apply(this,arguments),h=i.apply(this,arguments)-Py,d=o.apply(this,arguments)-Py,p=wy(d-h),v=d>h;if(u||(u=c=no()),lEy)if(p>zy-Ey)u.moveTo(l*Ny(h),l*Sy(h)),u.arc(0,0,l,h,d,!v),s>Ey&&(u.moveTo(s*Ny(d),s*Sy(d)),u.arc(0,0,s,d,h,v));else{var g,y,_=h,b=d,m=h,x=d,w=p,M=p,N=a.apply(this,arguments)/2,T=N>Ey&&(r?+r.apply(this,arguments):ky(s*s+l*l)),A=Ay(wy(l-s)/2,+e.apply(this,arguments)),S=A,k=A;if(T>Ey){var E=Ry(T/s*Sy(N)),C=Ry(T/l*Sy(N));(w-=2*E)>Ey?(m+=E*=v?1:-1,x-=E):(w=0,m=x=(h+d)/2),(M-=2*C)>Ey?(_+=C*=v?1:-1,b-=C):(M=0,_=b=(h+d)/2)}var P=l*Ny(_),z=l*Sy(_),R=s*Ny(x),D=s*Sy(x);if(A>Ey){var q,L=l*Ny(b),U=l*Sy(b),O=s*Ny(m),B=s*Sy(m);if(p1?0:t<-1?Cy:Math.acos(t)}((F*I+Y*H)/(ky(F*F+Y*Y)*ky(I*I+H*H)))/2),V=ky(q[0]*q[0]+q[1]*q[1]);S=Ay(A,(s-V)/(j-1)),k=Ay(A,(l-V)/(j+1))}}M>Ey?k>Ey?(g=By(O,B,P,z,l,k,v),y=By(L,U,R,D,l,k,v),u.moveTo(g.cx+g.x01,g.cy+g.y01),kEy&&w>Ey?S>Ey?(g=By(R,D,L,U,s,-S,v),y=By(P,z,O,B,s,-S,v),u.lineTo(g.cx+g.x01,g.cy+g.y01),S>a,f=i+2*u>>a,s=bo(20);function l(r){var i=new Float32Array(c*f),l=new Float32Array(c*f);r.forEach(function(r,o,s){var l=+t(r,o,s)+u>>a,h=+n(r,o,s)+u>>a,d=+e(r,o,s);l>=0&&l=0&&h>a),So({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a),Ao({width:c,height:f,data:i},{width:c,height:f,data:l},o>>a),So({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a),Ao({width:c,height:f,data:i},{width:c,height:f,data:l},o>>a),So({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a);var d=s(i);if(!Array.isArray(d)){var p=T(i);d=w(0,p,d),(d=g(0,Math.floor(p/d)*d,d)).shift()}return To().thresholds(d).size([c,f])(i).map(h)}function h(t){return t.value*=Math.pow(2,-2*a),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(v)}function v(t){t[0]=t[0]*Math.pow(2,a)-u,t[1]=t[1]*Math.pow(2,a)-u}function y(){return c=r+2*(u=3*o)>>a,f=i+2*u>>a,l}return l.x=function(n){return arguments.length?(t="function"==typeof n?n:bo(+n),l):t},l.y=function(t){return arguments.length?(n="function"==typeof t?t:bo(+t),l):n},l.weight=function(t){return arguments.length?(e="function"==typeof t?t:bo(+t),l):e},l.size=function(t){if(!arguments.length)return[r,i];var n=Math.ceil(t[0]),e=Math.ceil(t[1]);if(!(n>=0||n>=0))throw new Error("invalid size");return r=n,i=e,y()},l.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return a=Math.floor(Math.log(t)/Math.LN2),y()},l.thresholds=function(t){return arguments.length?(s="function"==typeof t?t:Array.isArray(t)?bo(yo.call(t)):bo(t),l):s},l.bandwidth=function(t){if(!arguments.length)return Math.sqrt(o*(o+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return o=Math.round((Math.sqrt(4*t*t+1)-1)/2),y()},l},t.contours=To,t.create=function(t){return Rt(Z(t).call(document.documentElement))},t.creator=Z,t.cross=function(t,n,e){var r,i,o,u,c=t.length,f=n.length,s=new Array(c*f);for(null==e&&(e=a),r=o=0;rt?1:n>=t?0:NaN},t.deviation=f,t.dispatch=I,t.drag=function(){var n,e,r,i,o=Gt,a=$t,u=Wt,c=Zt,f={},s=I("start","drag","end"),l=0,h=0;function d(t){t.on("mousedown.drag",p).filter(c).on("touchstart.drag",y).on("touchmove.drag",_).on("touchend.drag touchcancel.drag",b).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function p(){if(!i&&o.apply(this,arguments)){var u=m("mouse",a.apply(this,arguments),Bt,this,arguments);u&&(Rt(t.event.view).on("mousemove.drag",v,!0).on("mouseup.drag",g,!0),Ht(t.event.view),Yt(),r=!1,n=t.event.clientX,e=t.event.clientY,u("start"))}}function v(){if(It(),!r){var i=t.event.clientX-n,o=t.event.clientY-e;r=i*i+o*o>h}f.mouse("drag")}function g(){Rt(t.event.view).on("mousemove.drag mouseup.drag",null),jt(t.event.view,r),It(),f.mouse("end")}function y(){if(o.apply(this,arguments)){var n,e,r=t.event.changedTouches,i=a.apply(this,arguments),u=r.length;for(n=0;nc+d||if+d||ou.index){var p=c-a.x-a.vx,v=f-a.y-a.vy,g=p*p+v*v;gt.r&&(t.r=t[n].r)}function u(){if(n){var r,i,o=n.length;for(e=new Array(o),r=0;r=a)){(t.data!==n||t.next)&&(0===s&&(d+=(s=ya())*s),0===l&&(d+=(l=ya())*l),d1?(null==e?u.remove(t):u.set(t,d(e)),n):u.get(t)},find:function(n,e,r){var i,o,a,u,c,f=0,s=t.length;for(null==r?r=1/0:r*=r,f=0;f1?(f.on(t,e),n):f.on(t)}}},t.forceX=function(t){var n,e,r,i=ga(.1);function o(t){for(var i,o=0,a=n.length;o=.12&&i<.234&&r>=-.425&&r<-.214?u:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:a).invert(t)},s.stream=function(e){return t&&n===e?t:(r=[a.stream(n=e),u.stream(e),c.stream(e)],i=r.length,t={point:function(t,n){for(var e=-1;++ePc(r[0],r[1])&&(r[1]=i[1]),Pc(i[0],r[1])>Pc(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(a=-1/0,n=0,r=o[e=o.length-1];n<=e;r=i,++n)i=o[n],(u=Pc(r[1],i[0]))>a&&(a=u,Zu=i[0],Ku=r[1])}return ic=oc=null,Zu===1/0||Qu===1/0?[[NaN,NaN],[NaN,NaN]]:[[Zu,Qu],[Ku,Ju]]},t.geoCentroid=function(t){ac=uc=cc=fc=sc=lc=hc=dc=pc=vc=gc=0,Cu(t,Dc);var n=pc,e=vc,r=gc,i=n*n+e*e+r*r;return i2?t[2]+90:90]):[(t=e())[0],t[1],t[2]-90]},e([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=Nl,t.gray=function(t,n){return new Bn(t,0,0,null==n?1:n)},t.hcl=Vn,t.hierarchy=El,t.histogram=function(){var t=v,n=s,e=M;function r(r){var o,a,u=r.length,c=new Array(u);for(o=0;ol;)h.pop(),--d;var p,v=new Array(d+1);for(o=0;o<=d;++o)(p=v[o]=[]).x0=o>0?h[o-1]:s,p.x1=o1)&&(t-=Math.floor(t));var n=Math.abs(t-.5);return hy.h=360*t-100,hy.s=1.5-1.5*n,hy.l=.8-.9*n,hy+""},t.interpolateRdBu=_g,t.interpolateRdGy=mg,t.interpolateRdPu=Ig,t.interpolateRdYlBu=wg,t.interpolateRdYlGn=Ng,t.interpolateReds=ay,t.interpolateRgb=he,t.interpolateRgbBasis=pe,t.interpolateRgbBasisClosed=ve,t.interpolateRound=Ae,t.interpolateSinebow=function(t){var n;return t=(.5-t)*Math.PI,dy.r=255*(n=Math.sin(t))*n,dy.g=255*(n=Math.sin(t+py))*n,dy.b=255*(n=Math.sin(t+vy))*n,dy+""},t.interpolateSpectral=Ag,t.interpolateString=Ne,t.interpolateTransformCss=qe,t.interpolateTransformSvg=Le,t.interpolateTurbo=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"},t.interpolateViridis=yy,t.interpolateWarm=sy,t.interpolateYlGn=Xg,t.interpolateYlGnBu=jg,t.interpolateYlOrBr=$g,t.interpolateYlOrRd=Zg,t.interpolateZoom=Ie,t.interrupt=Pr,t.interval=function(t,n,e){var r=new lr,i=n;return null==n?(r.restart(t,n,e),r):(n=+n,e=null==e?fr():+e,r.restart(function o(a){a+=i,r.restart(o,i+=n,e),t(a)},n,e),r)},t.isoFormat=Dv,t.isoParse=qv,t.json=function(t,n){return fetch(t,n).then(la)},t.keys=function(t){var n=[];for(var e in t)n.push(e);return n},t.lab=On,t.lch=function(t,n,e,r){return 1===arguments.length?jn(t):new Xn(e,n,t,null==r?1:r)},t.line=jy,t.lineRadial=Ky,t.linkHorizontal=function(){return i_(o_)},t.linkRadial=function(){var t=i_(u_);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t},t.linkVertical=function(){return i_(a_)},t.local=qt,t.map=co,t.matcher=nt,t.max=T,t.mean=function(t,n){var e,r=t.length,i=r,o=-1,a=0;if(null==n)for(;++o=r.length)return null!=t&&e.sort(t),null!=n?n(e):e;for(var c,f,s,l=-1,h=e.length,d=r[i++],p=co(),v=a();++lr.length)return e;var a,u=i[o-1];return null!=n&&o>=r.length?a=e.entries():(a=[],e.each(function(n,e){a.push({key:e,values:t(n,o)})})),null!=u?a.sort(function(t,n){return u(t.key,n.key)}):a}(o(t,0,lo,ho),0)},key:function(t){return r.push(t),e},sortKeys:function(t){return i[r.length-1]=t,e},sortValues:function(n){return t=n,e},rollup:function(t){return n=t,e}}},t.now=fr,t.pack=function(){var t=null,n=1,e=1,r=Zl;function i(i){return i.x=n/2,i.y=e/2,t?i.eachBefore(Jl(t)).eachAfter(th(r,.5)).eachBefore(nh(1)):i.eachBefore(Jl(Kl)).eachAfter(th(Zl,1)).eachAfter(th(r,i.r/Math.min(n,e))).eachBefore(nh(Math.min(n,e)/(2*i.r))),i}return i.radius=function(n){return arguments.length?(t=$l(n),i):t},i.size=function(t){return arguments.length?(n=+t[0],e=+t[1],i):[n,e]},i.padding=function(t){return arguments.length?(r="function"==typeof t?t:Ql(+t),i):r},i},t.packEnclose=ql,t.packSiblings=function(t){return Gl(t),t},t.pairs=function(t,n){null==n&&(n=a);for(var e=0,r=t.length-1,i=t[0],o=new Array(r<0?0:r);e0&&(d+=l);for(null!=n?p.sort(function(t,e){return n(v[t],v[e])}):null!=e&&p.sort(function(t,n){return e(a[t],a[n])}),u=0,f=d?(y-h*b)/d:0;u0?l*f:0)+b,v[c]={data:a[c],index:u,value:l,startAngle:g,endAngle:s,padAngle:_};return v}return a.value=function(n){return arguments.length?(t="function"==typeof n?n:xy(+n),a):t},a.sortValues=function(t){return arguments.length?(n=t,e=null,a):n},a.sort=function(t){return arguments.length?(e=t,n=null,a):e},a.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:xy(+t),a):r},a.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:xy(+t),a):i},a.padAngle=function(t){return arguments.length?(o="function"==typeof t?t:xy(+t),a):o},a},t.piecewise=function(t,n){for(var e=0,r=n.length-1,i=n[0],o=new Array(r<0?0:r);eu!=f>u&&a<(c-e)*(u-r)/(f-r)+e&&(s=!s),c=e,f=r;return s},t.polygonHull=function(t){if((e=t.length)<3)return null;var n,e,r=new Array(e),i=new Array(e);for(n=0;n=0;--n)f.push(t[r[o[n]][2]]);for(n=+u;n0?a[n-1]:r[0],n=o?[a[o-1],r]:[a[n-1],a[n]]},c.unknown=function(t){return arguments.length?(n=t,c):c},c.thresholds=function(){return a.slice()},c.copy=function(){return t().domain([e,r]).range(u).unknown(n)},Ch.apply(Wh(c),arguments)},t.scaleSequential=function t(){var n=Wh(Xv()(Fh));return n.copy=function(){return Gv(n,t())},Ph.apply(n,arguments)},t.scaleSequentialLog=function t(){var n=rd(Xv()).domain([1,10]);return n.copy=function(){return Gv(n,t()).base(n.base())},Ph.apply(n,arguments)},t.scaleSequentialPow=$v,t.scaleSequentialQuantile=function t(){var e=[],r=Fh;function o(t){if(!isNaN(t=+t))return r((i(e,t)-1)/(e.length-1))}return o.domain=function(t){if(!arguments.length)return e.slice();e=[];for(var r,i=0,a=t.length;i0)for(var e,r,i,o,a,u,c=0,f=t[n[0]].length;c0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=a,r[0]=a+=i):(r[0]=0,r[1]=i)},t.stackOffsetExpand=function(t,n){if((r=t.length)>0){for(var e,r,i,o=0,a=t[0].length;o0){for(var e,r=0,i=t[n[0]],o=i.length;r0&&(r=(e=t[n[0]]).length)>0){for(var e,r,i,o=0,a=1;a0)throw new Error("cycle");return o}return e.id=function(n){return arguments.length?(t=Wl(n),e):t},e.parentId=function(t){return arguments.length?(n=Wl(t),e):n},e},t.style=ft,t.sum=function(t,n){var e,r=t.length,i=-1,o=0;if(null==n)for(;++i=0;--i)u.push(e=n.children[i]=new ph(r[i],i)),e.parent=n;return(a.parent=new ph(null,0)).children=[a],a}(i);if(c.eachAfter(o),c.parent.m=-c.z,c.eachBefore(a),r)i.eachBefore(u);else{var f=i,s=i,l=i;i.eachBefore(function(t){t.xs.x&&(s=t),t.depth>l.depth&&(l=t)});var h=f===s?1:t(f,s)/2,d=h-f.x,p=n/(s.x+h+d),v=e/(l.depth||1);i.eachBefore(function(t){t.x=(t.x+d)*p,t.y=t.depth*v})}return i}function o(n){var e=n.children,r=n.parent.children,i=n.i?r[n.i-1]:null;if(e){!function(t){for(var n,e=0,r=0,i=t.children,o=i.length;--o>=0;)(n=i[o]).z+=e,n.m+=e,e+=n.s+(r+=n.c)}(n);var o=(e[0].z+e[e.length-1].z)/2;i?(n.z=i.z+t(n._,i._),n.m=n.z-o):n.z=o}else i&&(n.z=i.z+t(n._,i._));n.parent.A=function(n,e,r){if(e){for(var i,o=n,a=n,u=e,c=o.parent.children[0],f=o.m,s=a.m,l=u.m,h=c.m;u=lh(u),o=sh(o),u&&o;)c=sh(c),(a=lh(a)).a=n,(i=u.z+l-o.z-f+t(u._,o._))>0&&(hh(dh(u,n,r),n,i),f+=i,s+=i),l+=u.m,f+=o.m,h+=c.m,s+=a.m;u&&!lh(a)&&(a.t=u,a.m+=l-s),o&&!sh(c)&&(c.t=o,c.m+=f-h,r=n)}return r}(n,i,n.parent.A||r[0])}function a(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function u(t){t.x*=n,t.y=t.depth*e}return i.separation=function(n){return arguments.length?(t=n,i):t},i.size=function(t){return arguments.length?(r=!1,n=+t[0],e=+t[1],i):r?null:[n,e]},i.nodeSize=function(t){return arguments.length?(r=!0,n=+t[0],e=+t[1],i):r?[n,e]:null},i},t.treemap=function(){var t=_h,n=!1,e=1,r=1,i=[0],o=Zl,a=Zl,u=Zl,c=Zl,f=Zl;function s(t){return t.x0=t.y0=0,t.x1=e,t.y1=r,t.eachBefore(l),i=[0],n&&t.eachBefore(eh),t}function l(n){var e=i[n.depth],r=n.x0+e,s=n.y0+e,l=n.x1-e,h=n.y1-e;l=e-1){var s=u[n];return s.x0=i,s.y0=o,s.x1=a,void(s.y1=c)}for(var l=f[n],h=r/2+l,d=n+1,p=e-1;d>>1;f[v]c-o){var _=(i*y+a*g)/r;t(n,d,g,i,o,_,c),t(d,e,y,_,o,a,c)}else{var b=(o*y+c*g)/r;t(n,d,g,i,o,a,b),t(d,e,y,i,b,a,c)}}(0,c,t.value,n,e,r,i)},t.treemapDice=rh,t.treemapResquarify=bh,t.treemapSlice=vh,t.treemapSliceDice=function(t,n,e,r,i){(1&t.depth?vh:rh)(t,n,e,r,i)},t.treemapSquarify=_h,t.tsv=sa,t.tsvFormat=Ko,t.tsvFormatBody=Jo,t.tsvFormatRow=na,t.tsvFormatRows=ta,t.tsvFormatValue=ea,t.tsvParse=Zo,t.tsvParseRows=Qo,t.utcDay=Zd,t.utcDays=Qd,t.utcFriday=ip,t.utcFridays=lp,t.utcHour=$d,t.utcHours=Wd,t.utcMillisecond=vd,t.utcMilliseconds=gd,t.utcMinute=Xd,t.utcMinutes=Gd,t.utcMonday=tp,t.utcMondays=up,t.utcMonth=dp,t.utcMonths=pp,t.utcSaturday=op,t.utcSaturdays=hp,t.utcSecond=bd,t.utcSeconds=md,t.utcSunday=Jd,t.utcSundays=ap,t.utcThursday=rp,t.utcThursdays=sp,t.utcTuesday=np,t.utcTuesdays=cp,t.utcWednesday=ep,t.utcWednesdays=fp,t.utcWeek=Jd,t.utcWeeks=ap,t.utcYear=vp,t.utcYears=gp,t.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},t.variance=c,t.version="5.15.0",t.voronoi=function(){var t=lb,n=hb,e=null;function r(r){return new Gb(r.map(function(e,i){var o=[Math.round(t(e,i,r)/Hb)*Hb,Math.round(n(e,i,r)/Hb)*Hb];return o.index=i,o.data=e,o}),e)}return r.polygons=function(t){return r(t).polygons()},r.links=function(t){return r(t).links()},r.triangles=function(t){return r(t).triangles()},r.x=function(n){return arguments.length?(t="function"==typeof n?n:sb(+n),r):t},r.y=function(t){return arguments.length?(n="function"==typeof t?t:sb(+t),r):n},r.extent=function(t){return arguments.length?(e=null==t?null:[[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]],r):e&&[[e[0][0],e[0][1]],[e[1][0],e[1][1]]]},r.size=function(t){return arguments.length?(e=null==t?null:[[0,0],[+t[0],+t[1]]],r):e&&[e[1][0]-e[0][0],e[1][1]-e[0][1]]},r},t.window=ct,t.xml=da,t.zip=function(){return k(arguments)},t.zoom=function(){var n,e,r=nm,i=em,o=am,a=im,u=om,c=[0,1/0],f=[[-1/0,-1/0],[1/0,1/0]],s=250,l=Ie,h=I("start","zoom","end"),d=500,p=150,v=0;function g(t){t.property("__zoom",rm).on("wheel.zoom",M).on("mousedown.zoom",N).on("dblclick.zoom",T).filter(u).on("touchstart.zoom",A).on("touchmove.zoom",S).on("touchend.zoom touchcancel.zoom",k).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function y(t,n){return(n=Math.max(c[0],Math.min(c[1],n)))===t.k?t:new Zb(n,t.x,t.y)}function _(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new Zb(t.k,r,i)}function b(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function m(t,n,e){t.on("start.zoom",function(){x(this,arguments).start()}).on("interrupt.zoom end.zoom",function(){x(this,arguments).end()}).tween("zoom",function(){var t=this,r=arguments,o=x(t,r),a=i.apply(t,r),u=null==e?b(a):"function"==typeof e?e.apply(t,r):e,c=Math.max(a[1][0]-a[0][0],a[1][1]-a[0][1]),f=t.__zoom,s="function"==typeof n?n.apply(t,r):n,h=l(f.invert(u).concat(c/f.k),s.invert(u).concat(c/s.k));return function(t){if(1===t)t=s;else{var n=h(t),e=c/n[2];t=new Zb(e,u[0]-n[0]*e,u[1]-n[1]*e)}o.zoom(null,t)}})}function x(t,n,e){return!e&&t.__zooming||new w(t,n)}function w(t,n){this.that=t,this.args=n,this.active=0,this.extent=i.apply(t,n),this.taps=0}function M(){if(r.apply(this,arguments)){var t=x(this,arguments),n=this.__zoom,e=Math.max(c[0],Math.min(c[1],n.k*Math.pow(2,a.apply(this,arguments)))),i=Bt(this);if(t.wheel)t.mouse[0][0]===i[0]&&t.mouse[0][1]===i[1]||(t.mouse[1]=n.invert(t.mouse[0]=i)),clearTimeout(t.wheel);else{if(n.k===e)return;t.mouse=[i,n.invert(i)],Pr(this),t.start()}tm(),t.wheel=setTimeout(function(){t.wheel=null,t.end()},p),t.zoom("mouse",o(_(y(n,e),t.mouse[0],t.mouse[1]),t.extent,f))}}function N(){if(!e&&r.apply(this,arguments)){var n=x(this,arguments,!0),i=Rt(t.event.view).on("mousemove.zoom",function(){if(tm(),!n.moved){var e=t.event.clientX-u,r=t.event.clientY-c;n.moved=e*e+r*r>v}n.zoom("mouse",o(_(n.that.__zoom,n.mouse[0]=Bt(n.that),n.mouse[1]),n.extent,f))},!0).on("mouseup.zoom",function(){i.on("mousemove.zoom mouseup.zoom",null),jt(t.event.view,n.moved),tm(),n.end()},!0),a=Bt(this),u=t.event.clientX,c=t.event.clientY;Ht(t.event.view),Jb(),n.mouse=[a,this.__zoom.invert(a)],Pr(this),n.start()}}function T(){if(r.apply(this,arguments)){var n=this.__zoom,e=Bt(this),a=n.invert(e),u=n.k*(t.event.shiftKey?.5:2),c=o(_(y(n,u),e,a),i.apply(this,arguments),f);tm(),s>0?Rt(this).transition().duration(s).call(m,c,e):Rt(this).call(g.transform,c)}}function A(){if(r.apply(this,arguments)){var e,i,o,a,u=t.event.touches,c=u.length,f=x(this,arguments,t.event.changedTouches.length===c);for(Jb(),i=0;i=n);)++o;if(o-t>16&&r.subarray&&g)return g.decode(r.subarray(t,o));for(var i="";t>10,56320|1023&c)}}else i+=String.fromCharCode((31&a)<<6|s)}else i+=String.fromCharCode(a)}return i}function k(r,t){return r?E(v,r,t):""}function b(r,t,e,n){if(!(n>0))return 0;for(var o=e,i=e+n-1,a=0;a=55296&&s<=57343&&(s=65536+((1023&s)<<10)|1023&r.charCodeAt(++a)),s<=127){if(e>=i)break;t[e++]=s}else if(s<=2047){if(e+1>=i)break;t[e++]=192|s>>6,t[e++]=128|63&s}else if(s<=65535){if(e+2>=i)break;t[e++]=224|s>>12,t[e++]=128|s>>6&63,t[e++]=128|63&s}else{if(e+3>=i)break;t[e++]=240|s>>18,t[e++]=128|s>>12&63,t[e++]=128|s>>6&63,t[e++]=128|63&s}}return t[e]=0,e-o}function D(r){for(var t=0,e=0;e=55296&&n<=57343&&(n=65536+((1023&n)<<10)|1023&r.charCodeAt(++e)),n<=127?++t:t+=n<=2047?2:n<=65535?3:4}return t}function S(r){h=r,n.HEAP8=y=new Int8Array(r),n.HEAP16=new Int16Array(r),n.HEAP32=_=new Int32Array(r),n.HEAPU8=v=new Uint8Array(r),n.HEAPU16=new Uint16Array(r),n.HEAPU32=new Uint32Array(r),n.HEAPF32=new Float32Array(r),n.HEAPF64=w=new Float64Array(r)}"undefined"!=typeof TextDecoder&&new TextDecoder("utf-16le");var F=n.TOTAL_MEMORY||16777216;function P(r){for(;r.length>0;){var t=r.shift();if("function"!=typeof t){var e=t.func;"number"==typeof e?void 0===t.arg?n.dynCall_v(e):n.dynCall_vi(e,t.arg):e(void 0===t.arg?null:t.arg)}else t()}}(c=n.wasmMemory?n.wasmMemory:new WebAssembly.Memory({initial:F/65536}))&&(h=c.buffer),F=h.byteLength,S(h),_[2080]=5251360;var x=[],A=[],M=[],C=[],R=!1,O=Math.abs,j=Math.ceil,T=Math.floor,z=Math.min,B=0,N=null;function L(r){B++,n.monitorRunDependencies&&n.monitorRunDependencies(B)}function I(r){if(B--,n.monitorRunDependencies&&n.monitorRunDependencies(B),0==B&&N){var t=N;N=null,t()}}function H(r){throw n.onAbort&&n.onAbort(r),f(r+=""),l(r),p=!0,r="abort("+r+"). Build with -s ASSERTIONS=1 for more info.",new WebAssembly.RuntimeError(r)}function U(r){return String.prototype.startsWith?r.startsWith("data:application/octet-stream;base64,"):0===r.indexOf("data:application/octet-stream;base64,")}n.preloadedImages={},n.preloadedAudios={};var W,q,Y,J="expatlib.wasm";function V(){try{if(u)return new Uint8Array(u);throw"both async and sync fetching of the wasm failed"}catch(r){H(r)}}U(J)||(W=J,J=n.locateFile?n.locateFile(W,s):s+W);var X={1184:function(r){var t=n.getCache(n.CExpatJS)[r];if(!t.hasOwnProperty("startElement"))throw"a JSImplementation must implement all functions, you forgot CExpatJS::startElement.";t.startElement()},1404:function(r){var t=n.getCache(n.CExpatJS)[r];if(!t.hasOwnProperty("endElement"))throw"a JSImplementation must implement all functions, you forgot CExpatJS::endElement.";t.endElement()},1616:function(r){var t=n.getCache(n.CExpatJS)[r];if(!t.hasOwnProperty("characterData"))throw"a JSImplementation must implement all functions, you forgot CExpatJS::characterData.";t.characterData()}};function G(){var r=function(){var r=new Error;if(!r.stack){try{throw new Error}catch(t){r=t}if(!r.stack)return"(no stack trace available)"}return r.stack.toString()}();return n.extraStackTrace&&(r+="\n"+n.extraStackTrace()),r.replace(/\b_Z[\w\d_]+/g,(function(r){return r==r?r:r+" ["+r+"]"}))}A.push({func:function(){dr()}});var Z=42,K={splitPath:function(r){return/^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/.exec(r).slice(1)},normalizeArray:function(r,t){for(var e=0,n=r.length-1;n>=0;n--){var o=r[n];"."===o?r.splice(n,1):".."===o?(r.splice(n,1),e++):e&&(r.splice(n,1),e--)}if(t)for(;e;e--)r.unshift("..");return r},normalize:function(r){var t="/"===r.charAt(0),e="/"===r.substr(-1);return(r=K.normalizeArray(r.split("/").filter((function(r){return!!r})),!t).join("/"))||t||(r="."),r&&e&&(r+="/"),(t?"/":"")+r},dirname:function(r){var t=K.splitPath(r),e=t[0],n=t[1];return e||n?(n&&(n=n.substr(0,n.length-1)),e+n):"."},basename:function(r){if("/"===r)return"/";var t=r.lastIndexOf("/");return-1===t?r:r.substr(t+1)},extname:function(r){return K.splitPath(r)[3]},join:function(){var r=Array.prototype.slice.call(arguments,0);return K.normalize(r.join("/"))},join2:function(r,t){return K.normalize(r+"/"+t)}};function $(r){return n.___errno_location&&(_[n.___errno_location()>>2]=r),r}var Q={resolve:function(){for(var r="",t=!1,e=arguments.length-1;e>=-1&&!t;e--){var n=e>=0?arguments[e]:er.cwd();if("string"!=typeof n)throw new TypeError("Arguments to path.resolve must be strings");if(!n)return"";r=n+"/"+r,t="/"===n.charAt(0)}return(t?"/":"")+(r=K.normalizeArray(r.split("/").filter((function(r){return!!r})),!t).join("/"))||"."},relative:function(r,t){function e(r){for(var t=0;t=0&&""===r[e];e--);return t>e?[]:r.slice(t,e-t+1)}r=Q.resolve(r).substr(1),t=Q.resolve(t).substr(1);for(var n=e(r.split("/")),o=e(t.split("/")),i=Math.min(n.length,o.length),a=i,s=0;s0&&(f(E(r.output,0)),r.output=[])}},default_tty1_ops:{put_char:function(r,t){null===t||10===t?(l(E(r.output,0)),r.output=[]):0!=t&&r.output.push(t)},flush:function(r){r.output&&r.output.length>0&&(l(E(r.output,0)),r.output=[])}}},tr={ops_table:null,mount:function(r){return tr.createNode(null,"/",16895,0)},createNode:function(r,t,e,n){if(er.isBlkdev(e)||er.isFIFO(e))throw new er.ErrnoError(63);tr.ops_table||(tr.ops_table={dir:{node:{getattr:tr.node_ops.getattr,setattr:tr.node_ops.setattr,lookup:tr.node_ops.lookup,mknod:tr.node_ops.mknod,rename:tr.node_ops.rename,unlink:tr.node_ops.unlink,rmdir:tr.node_ops.rmdir,readdir:tr.node_ops.readdir,symlink:tr.node_ops.symlink},stream:{llseek:tr.stream_ops.llseek}},file:{node:{getattr:tr.node_ops.getattr,setattr:tr.node_ops.setattr},stream:{llseek:tr.stream_ops.llseek,read:tr.stream_ops.read,write:tr.stream_ops.write,allocate:tr.stream_ops.allocate,mmap:tr.stream_ops.mmap,msync:tr.stream_ops.msync}},link:{node:{getattr:tr.node_ops.getattr,setattr:tr.node_ops.setattr,readlink:tr.node_ops.readlink},stream:{}},chrdev:{node:{getattr:tr.node_ops.getattr,setattr:tr.node_ops.setattr},stream:er.chrdev_stream_ops}});var o=er.createNode(r,t,e,n);return er.isDir(o.mode)?(o.node_ops=tr.ops_table.dir.node,o.stream_ops=tr.ops_table.dir.stream,o.contents={}):er.isFile(o.mode)?(o.node_ops=tr.ops_table.file.node,o.stream_ops=tr.ops_table.file.stream,o.usedBytes=0,o.contents=null):er.isLink(o.mode)?(o.node_ops=tr.ops_table.link.node,o.stream_ops=tr.ops_table.link.stream):er.isChrdev(o.mode)&&(o.node_ops=tr.ops_table.chrdev.node,o.stream_ops=tr.ops_table.chrdev.stream),o.timestamp=Date.now(),r&&(r.contents[t]=o),o},getFileDataAsRegularArray:function(r){if(r.contents&&r.contents.subarray){for(var t=[],e=0;e=t)){t=Math.max(t,e*(e<1048576?2:1.125)|0),0!=e&&(t=Math.max(t,256));var n=r.contents;r.contents=new Uint8Array(t),r.usedBytes>0&&r.contents.set(n.subarray(0,r.usedBytes),0)}},resizeFileStorage:function(r,t){if(r.usedBytes!=t){if(0==t)return r.contents=null,void(r.usedBytes=0);if(!r.contents||r.contents.subarray){var e=r.contents;return r.contents=new Uint8Array(t),e&&r.contents.set(e.subarray(0,Math.min(t,r.usedBytes))),void(r.usedBytes=t)}if(r.contents||(r.contents=[]),r.contents.length>t)r.contents.length=t;else for(;r.contents.length=r.node.usedBytes)return 0;var a=Math.min(r.node.usedBytes-o,n);if(a>8&&i.subarray)t.set(i.subarray(o,o+a),e);else for(var s=0;s0||o+n8)throw new er.ErrnoError(32);for(var o=K.normalizeArray(r.split("/").filter((function(r){return!!r})),!1),i=er.root,a="/",s=0;s40)throw new er.ErrnoError(32)}}return{path:a,node:i}},getPath:function(r){for(var t;;){if(er.isRoot(r)){var e=r.mount.mountpoint;return t?"/"!==e[e.length-1]?e+"/"+t:e+t:e}t=t?r.name+"/"+t:r.name,r=r.parent}},hashName:function(r,t){for(var e=0,n=0;n>>0)%er.nameTable.length},hashAddNode:function(r){var t=er.hashName(r.parent.id,r.name);r.name_next=er.nameTable[t],er.nameTable[t]=r},hashRemoveNode:function(r){var t=er.hashName(r.parent.id,r.name);if(er.nameTable[t]===r)er.nameTable[t]=r.name_next;else for(var e=er.nameTable[t];e;){if(e.name_next===r){e.name_next=r.name_next;break}e=e.name_next}},lookupNode:function(r,t){var e=er.mayLookup(r);if(e)throw new er.ErrnoError(e,r);for(var n=er.hashName(r.id,t),o=er.nameTable[n];o;o=o.name_next){var i=o.name;if(o.parent.id===r.id&&i===t)return o}return er.lookup(r,t)},createNode:function(r,t,e,n){er.FSNode||(er.FSNode=function(r,t,e,n){r||(r=this),this.parent=r,this.mount=r.mount,this.mounted=null,this.id=er.nextInode++,this.name=t,this.mode=e,this.node_ops={},this.stream_ops={},this.rdev=n},er.FSNode.prototype={},Object.defineProperties(er.FSNode.prototype,{read:{get:function(){return 365==(365&this.mode)},set:function(r){r?this.mode|=365:this.mode&=-366}},write:{get:function(){return 146==(146&this.mode)},set:function(r){r?this.mode|=146:this.mode&=-147}},isFolder:{get:function(){return er.isDir(this.mode)}},isDevice:{get:function(){return er.isChrdev(this.mode)}}}));var o=new er.FSNode(r,t,e,n);return er.hashAddNode(o),o},destroyNode:function(r){er.hashRemoveNode(r)},isRoot:function(r){return r===r.parent},isMountpoint:function(r){return!!r.mounted},isFile:function(r){return 32768==(61440&r)},isDir:function(r){return 16384==(61440&r)},isLink:function(r){return 40960==(61440&r)},isChrdev:function(r){return 8192==(61440&r)},isBlkdev:function(r){return 24576==(61440&r)},isFIFO:function(r){return 4096==(61440&r)},isSocket:function(r){return 49152==(49152&r)},flagModes:{r:0,rs:1052672,"r+":2,w:577,wx:705,xw:705,"w+":578,"wx+":706,"xw+":706,a:1089,ax:1217,xa:1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function(r){var t=er.flagModes[r];if(void 0===t)throw new Error("Unknown file open mode: "+r);return t},flagsToPermissionString:function(r){var t=["r","w","rw"][3&r];return 512&r&&(t+="w"),t},nodePermissions:function(r,t){return er.ignorePermissions||(-1===t.indexOf("r")||292&r.mode)&&(-1===t.indexOf("w")||146&r.mode)&&(-1===t.indexOf("x")||73&r.mode)?0:2},mayLookup:function(r){var t=er.nodePermissions(r,"x");return t||(r.node_ops.lookup?0:2)},mayCreate:function(r,t){try{return er.lookupNode(r,t),20}catch(r){}return er.nodePermissions(r,"wx")},mayDelete:function(r,t,e){var n;try{n=er.lookupNode(r,t)}catch(r){return r.errno}var o=er.nodePermissions(r,"wx");if(o)return o;if(e){if(!er.isDir(n.mode))return 54;if(er.isRoot(n)||er.getPath(n)===er.cwd())return 10}else if(er.isDir(n.mode))return 31;return 0},mayOpen:function(r,t){return r?er.isLink(r.mode)?32:er.isDir(r.mode)&&("r"!==er.flagsToPermissionString(t)||512&t)?31:er.nodePermissions(r,er.flagsToPermissionString(t)):44},MAX_OPEN_FDS:4096,nextfd:function(r,t){r=r||0,t=t||er.MAX_OPEN_FDS;for(var e=r;e<=t;e++)if(!er.streams[e])return e;throw new er.ErrnoError(33)},getStream:function(r){return er.streams[r]},createStream:function(r,t,e){er.FSStream||(er.FSStream=function(){},er.FSStream.prototype={},Object.defineProperties(er.FSStream.prototype,{object:{get:function(){return this.node},set:function(r){this.node=r}},isRead:{get:function(){return 1!=(2097155&this.flags)}},isWrite:{get:function(){return 0!=(2097155&this.flags)}},isAppend:{get:function(){return 1024&this.flags}}}));var n=new er.FSStream;for(var o in r)n[o]=r[o];r=n;var i=er.nextfd(t,e);return r.fd=i,er.streams[i]=r,r},closeStream:function(r){er.streams[r]=null},chrdev_stream_ops:{open:function(r){var t=er.getDevice(r.node.rdev);r.stream_ops=t.stream_ops,r.stream_ops.open&&r.stream_ops.open(r)},llseek:function(){throw new er.ErrnoError(70)}},major:function(r){return r>>8},minor:function(r){return 255&r},makedev:function(r,t){return r<<8|t},registerDevice:function(r,t){er.devices[r]={stream_ops:t}},getDevice:function(r){return er.devices[r]},getMounts:function(r){for(var t=[],e=[r];e.length;){var n=e.pop();t.push(n),e.push.apply(e,n.mounts)}return t},syncfs:function(r,t){"function"==typeof r&&(t=r,r=!1),er.syncFSRequests++,er.syncFSRequests>1&&l("warning: "+er.syncFSRequests+" FS.syncfs operations in flight at once, probably just doing extra work");var e=er.getMounts(er.root.mount),n=0;function o(r){return er.syncFSRequests--,t(r)}function i(r){if(r)return i.errored?void 0:(i.errored=!0,o(r));++n>=e.length&&o(null)}e.forEach((function(t){if(!t.type.syncfs)return i(null);t.type.syncfs(t,r,i)}))},mount:function(r,t,e){var n,o="/"===e,i=!e;if(o&&er.root)throw new er.ErrnoError(10);if(!o&&!i){var a=er.lookupPath(e,{follow_mount:!1});if(e=a.path,n=a.node,er.isMountpoint(n))throw new er.ErrnoError(10);if(!er.isDir(n.mode))throw new er.ErrnoError(54)}var s={type:r,opts:t,mountpoint:e,mounts:[]},u=r.mount(s);return u.mount=s,s.root=u,o?er.root=u:n&&(n.mounted=s,n.mount&&n.mount.mounts.push(s)),u},unmount:function(r){var t=er.lookupPath(r,{follow_mount:!1});if(!er.isMountpoint(t.node))throw new er.ErrnoError(28);var e=t.node,n=e.mounted,o=er.getMounts(n);Object.keys(er.nameTable).forEach((function(r){for(var t=er.nameTable[r];t;){var e=t.name_next;-1!==o.indexOf(t.mount)&&er.destroyNode(t),t=e}})),e.mounted=null;var i=e.mount.mounts.indexOf(n);e.mount.mounts.splice(i,1)},lookup:function(r,t){return r.node_ops.lookup(r,t)},mknod:function(r,t,e){var n=er.lookupPath(r,{parent:!0}).node,o=K.basename(r);if(!o||"."===o||".."===o)throw new er.ErrnoError(28);var i=er.mayCreate(n,o);if(i)throw new er.ErrnoError(i);if(!n.node_ops.mknod)throw new er.ErrnoError(63);return n.node_ops.mknod(n,o,t,e)},create:function(r,t){return t=void 0!==t?t:438,t&=4095,t|=32768,er.mknod(r,t,0)},mkdir:function(r,t){return t=void 0!==t?t:511,t&=1023,t|=16384,er.mknod(r,t,0)},mkdirTree:function(r,t){for(var e=r.split("/"),n="",o=0;othis.length-1||r<0)){var t=r%this.chunkSize,e=r/this.chunkSize|0;return this.getter(e)[t]}},i.prototype.setDataGetter=function(r){this.getter=r},i.prototype.cacheLength=function(){var r=new XMLHttpRequest;if(r.open("HEAD",e,!1),r.send(null),!(r.status>=200&&r.status<300||304===r.status))throw new Error("Couldn't load "+e+". Status: "+r.status);var t,n=Number(r.getResponseHeader("Content-length")),o=(t=r.getResponseHeader("Accept-Ranges"))&&"bytes"===t,i=(t=r.getResponseHeader("Content-Encoding"))&&"gzip"===t,a=1048576;o||(a=n);var s=this;s.setDataGetter((function(r){var t=r*a,o=(r+1)*a-1;if(o=Math.min(o,n-1),void 0===s.chunks[r]&&(s.chunks[r]=function(r,t){if(r>t)throw new Error("invalid range ("+r+", "+t+") or no bytes requested!");if(t>n-1)throw new Error("only "+n+" bytes available! programmer error!");var o=new XMLHttpRequest;if(o.open("GET",e,!1),n!==a&&o.setRequestHeader("Range","bytes="+r+"-"+t),"undefined"!=typeof Uint8Array&&(o.responseType="arraybuffer"),o.overrideMimeType&&o.overrideMimeType("text/plain; charset=x-user-defined"),o.send(null),!(o.status>=200&&o.status<300||304===o.status))throw new Error("Couldn't load "+e+". Status: "+o.status);return void 0!==o.response?new Uint8Array(o.response||[]):ur(o.responseText||"",!0)}(t,o)),void 0===s.chunks[r])throw new Error("doXHR failed!");return s.chunks[r]})),!i&&n||(a=n=1,n=this.getter(0).length,a=n,f("LazyFiles on gzip forces download of the whole file when length is accessed")),this._length=n,this._chunkSize=a,this.lengthKnown=!0},"undefined"!=typeof XMLHttpRequest)throw"Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc";var a={isDevice:!1,url:e},s=er.createFile(r,t,a,n,o);a.contents?s.contents=a.contents:a.url&&(s.contents=null,s.url=a.url),Object.defineProperties(s,{usedBytes:{get:function(){return this.contents.length}}});var u={};return Object.keys(s.stream_ops).forEach((function(r){var t=s.stream_ops[r];u[r]=function(){if(!er.forceLoadFile(s))throw new er.ErrnoError(29);return t.apply(null,arguments)}})),u.read=function(r,t,e,n,o){if(!er.forceLoadFile(s))throw new er.ErrnoError(29);var i=r.node.contents;if(o>=i.length)return 0;var a=Math.min(i.length-o,n);if(i.slice)for(var u=0;u>2]=n.dev,_[e+4>>2]=0,_[e+8>>2]=n.ino,_[e+12>>2]=n.mode,_[e+16>>2]=n.nlink,_[e+20>>2]=n.uid,_[e+24>>2]=n.gid,_[e+28>>2]=n.rdev,_[e+32>>2]=0,Y=[n.size>>>0,(q=n.size,+O(q)>=1?q>0?(0|z(+T(q/4294967296),4294967295))>>>0:~~+j((q-+(~~q>>>0))/4294967296)>>>0:0)],_[e+40>>2]=Y[0],_[e+44>>2]=Y[1],_[e+48>>2]=4096,_[e+52>>2]=n.blocks,_[e+56>>2]=n.atime.getTime()/1e3|0,_[e+60>>2]=0,_[e+64>>2]=n.mtime.getTime()/1e3|0,_[e+68>>2]=0,_[e+72>>2]=n.ctime.getTime()/1e3|0,_[e+76>>2]=0,Y=[n.ino>>>0,(q=n.ino,+O(q)>=1?q>0?(0|z(+T(q/4294967296),4294967295))>>>0:~~+j((q-+(~~q>>>0))/4294967296)>>>0:0)],_[e+80>>2]=Y[0],_[e+84>>2]=Y[1],0},doMsync:function(r,t,e,n,o){var i=v.slice(r,r+e);er.msync(t,i,o,e,n)},doMkdir:function(r,t){return"/"===(r=K.normalize(r))[r.length-1]&&(r=r.substr(0,r.length-1)),er.mkdir(r,t,0),0},doMknod:function(r,t,e){switch(61440&t){case 32768:case 8192:case 24576:case 4096:case 49152:break;default:return-28}return er.mknod(r,t,e),0},doReadlink:function(r,t,e){if(e<=0)return-28;var n=er.readlink(r),o=Math.min(e,D(n)),i=y[t+o];return b(n,v,t,e+1),y[t+o]=i,o},doAccess:function(r,t){if(-8&t)return-28;var e;if(!(e=er.lookupPath(r,{follow:!0}).node))return-44;var n="";return 4&t&&(n+="r"),2&t&&(n+="w"),1&t&&(n+="x"),n&&er.nodePermissions(e,n)?-2:0},doDup:function(r,t,e){var n=er.getStream(e);return n&&er.close(n),er.open(r,t,0,e,e).fd},doReadv:function(r,t,e,n){for(var o=0,i=0;i>2],s=_[t+(8*i+4)>>2],u=er.read(r,y,a,s,n);if(u<0)return-1;if(o+=u,u>2],s=_[t+(8*i+4)>>2],u=er.write(r,y,a,s,n);if(u<0)return-1;o+=u}return o},varargs:0,get:function(r){return nr.varargs+=4,_[nr.varargs-4>>2]},getStr:function(){return k(nr.get())},getStreamFromFD:function(r){void 0===r&&(r=nr.get());var t=er.getStream(r);if(!t)throw new er.ErrnoError(8);return t},get64:function(){var r=nr.get();return nr.get(),r},getZero:function(){nr.get()}};function or(r){try{return c.grow(r-h.byteLength+65535>>16),S(c.buffer),1}catch(r){}}var ir={};function ar(){if(!ar.strings){var r={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:a||"./this.program"};for(var t in ir)r[t]=ir[t];var e=[];for(var t in r)e.push(t+"="+r[t]);ar.strings=e}return ar.strings}function sr(r,t){sr.array||(sr.array=[]);var e,n=sr.array;for(n.length=0;e=v[r++];)100===e||102===e?(t=t+7&-8,n.push(w[t>>3]),t+=8):(t=t+3&-4,n.push(_[t>>2]),t+=4);return n}function ur(r,t,e){var n=e>0?e:D(r)+1,o=new Array(n),i=b(r,o,0,o.length);return t&&(o.length=i),o}er.staticInit();var cr={h:function(r,t){nr.varargs=t;try{return Z}catch(r){return void 0!==er&&r instanceof er.ErrnoError||H(r),-r.errno}},e:function(){H()},a:function(r,t,e){var n=sr(t,e);return X[r].apply(null,n)},c:function(r,t,e){v.set(v.subarray(t,t+e),r)},d:function(r){var t=v.length;if(r>2147418112)return!1;for(var e,n,o=1;o<=4;o*=2){var i=t*(1+.2/o);if(i=Math.min(i,r+100663296),or(Math.min(2147418112,((e=Math.max(16777216,r,i))%(n=65536)>0&&(e+=n-e%n),e))))return!0}return!1},f:function(r,t){var e=ar(),n=0;return e.forEach((function(e,o){var i=t+n;_[r+4*o>>2]=i,function(r,t,e){for(var n=0;n>0]=r.charCodeAt(n);e||(y[t>>0]=0)}(e,i),n+=e.length+1})),0},g:function(r,t){var e=ar();_[r>>2]=e.length;var n=0;return e.forEach((function(r){n+=r.length+1})),_[t>>2]=n,0},j:function(r){try{var t=nr.getStreamFromFD(r);return er.close(t),0}catch(r){return void 0!==er&&r instanceof er.ErrnoError||H(r),r.errno}},i:function(r,t,e,n,o){try{var i=nr.getStreamFromFD(r),a=4294967296*e+(t>>>0);return a<=-9007199254740992||a>=9007199254740992?-61:(er.llseek(i,a,n),Y=[i.position>>>0,(q=i.position,+O(q)>=1?q>0?(0|z(+T(q/4294967296),4294967295))>>>0:~~+j((q-+(~~q>>>0))/4294967296)>>>0:0)],_[o>>2]=Y[0],_[o+4>>2]=Y[1],i.getdents&&0===a&&0===n&&(i.getdents=null),0)}catch(r){return void 0!==er&&r instanceof er.ErrnoError||H(r),r.errno}},b:function(r,t,e,n){try{var o=nr.getStreamFromFD(r),i=nr.doWritev(o,t,e);return _[n>>2]=i,0}catch(r){return void 0!==er&&r instanceof er.ErrnoError||H(r),r.errno}},k:function(r){var t=Date.now();return _[r>>2]=t/1e3|0,_[r+4>>2]=t%1e3*1e3|0,0},memory:c,table:d},fr=function(){var r={a:cr};function t(r,t){var e=r.exports;n.asm=e,I()}function e(r){t(r.instance)}function o(t){return(u||"function"!=typeof fetch?new Promise((function(r,t){r(V())})):fetch(J,{credentials:"same-origin"}).then((function(r){if(!r.ok)throw"failed to load wasm binary file at '"+J+"'";return r.arrayBuffer()})).catch((function(){return V()}))).then((function(t){return WebAssembly.instantiate(t,r)})).then(t,(function(r){l("failed to asynchronously prepare wasm: "+r),H(r)}))}if(L(),n.instantiateWasm)try{return n.instantiateWasm(r,t)}catch(r){return l("Module.instantiateWasm callback failed with error: "+r),!1}return function(){if(u||"function"!=typeof WebAssembly.instantiateStreaming||U(J)||"function"!=typeof fetch)return o(e);fetch(J,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,r).then(e,(function(r){l("wasm streaming compile failed: "+r),l("falling back to ArrayBuffer instantiation"),o(e)}))}))}(),{}}();n.asm=fr;var lr,dr=n.___wasm_call_ctors=function(){return(dr=n.___wasm_call_ctors=n.asm.l).apply(null,arguments)},pr=n._emscripten_bind_CExpat_CExpat_0=function(){return(pr=n._emscripten_bind_CExpat_CExpat_0=n.asm.m).apply(null,arguments)},mr=n._emscripten_bind_CExpat_create_0=function(){return(mr=n._emscripten_bind_CExpat_create_0=n.asm.n).apply(null,arguments)},hr=n._emscripten_bind_CExpat_destroy_0=function(){return(hr=n._emscripten_bind_CExpat_destroy_0=n.asm.o).apply(null,arguments)},yr=n._emscripten_bind_CExpat_parse_1=function(){return(yr=n._emscripten_bind_CExpat_parse_1=n.asm.p).apply(null,arguments)},vr=n._emscripten_bind_CExpat_tag_0=function(){return(vr=n._emscripten_bind_CExpat_tag_0=n.asm.q).apply(null,arguments)},_r=n._emscripten_bind_CExpat_attrs_0=function(){return(_r=n._emscripten_bind_CExpat_attrs_0=n.asm.r).apply(null,arguments)},wr=n._emscripten_bind_CExpat_content_0=function(){return(wr=n._emscripten_bind_CExpat_content_0=n.asm.s).apply(null,arguments)},gr=n._emscripten_bind_CExpat_startElement_0=function(){return(gr=n._emscripten_bind_CExpat_startElement_0=n.asm.t).apply(null,arguments)},Er=n._emscripten_bind_CExpat_endElement_0=function(){return(Er=n._emscripten_bind_CExpat_endElement_0=n.asm.u).apply(null,arguments)},kr=n._emscripten_bind_CExpat_characterData_0=function(){return(kr=n._emscripten_bind_CExpat_characterData_0=n.asm.v).apply(null,arguments)},br=n._emscripten_bind_CExpat___destroy___0=function(){return(br=n._emscripten_bind_CExpat___destroy___0=n.asm.w).apply(null,arguments)},Dr=n._emscripten_bind_CExpatJS_CExpatJS_0=function(){return(Dr=n._emscripten_bind_CExpatJS_CExpatJS_0=n.asm.x).apply(null,arguments)},Sr=n._emscripten_bind_CExpatJS_startElement_0=function(){return(Sr=n._emscripten_bind_CExpatJS_startElement_0=n.asm.y).apply(null,arguments)},Fr=n._emscripten_bind_CExpatJS_endElement_0=function(){return(Fr=n._emscripten_bind_CExpatJS_endElement_0=n.asm.z).apply(null,arguments)},Pr=n._emscripten_bind_CExpatJS_characterData_0=function(){return(Pr=n._emscripten_bind_CExpatJS_characterData_0=n.asm.A).apply(null,arguments)},xr=n._emscripten_bind_CExpatJS___destroy___0=function(){return(xr=n._emscripten_bind_CExpatJS___destroy___0=n.asm.B).apply(null,arguments)},Ar=n._emscripten_bind_VoidPtr___destroy___0=function(){return(Ar=n._emscripten_bind_VoidPtr___destroy___0=n.asm.C).apply(null,arguments)},Mr=n._malloc=function(){return(Mr=n._malloc=n.asm.D).apply(null,arguments)};function Cr(r){function t(){lr||(lr=!0,p||(R=!0,n.noFSInit||er.init.initialized||er.init(),rr.init(),P(A),er.ignorePermissions=!1,P(M),n.onRuntimeInitialized&&n.onRuntimeInitialized(),function(){if(n.postRun)for("function"==typeof n.postRun&&(n.postRun=[n.postRun]);n.postRun.length;)r=n.postRun.shift(),C.unshift(r);var r;P(C)}()))}B>0||(function(){if(n.preRun)for("function"==typeof n.preRun&&(n.preRun=[n.preRun]);n.preRun.length;)r=n.preRun.shift(),x.unshift(r);var r;P(x)}(),B>0||(n.setStatus?(n.setStatus("Running..."),setTimeout((function(){setTimeout((function(){n.setStatus("")}),1),t()}),1)):t()))}if(n._free=function(){return(n._free=n.asm.E).apply(null,arguments)},n.___errno_location=function(){return(n.___errno_location=n.asm.F).apply(null,arguments)},n.stackAlloc=function(){return(n.stackAlloc=n.asm.G).apply(null,arguments)},n.dynCall_vi=function(){return(n.dynCall_vi=n.asm.H).apply(null,arguments)},n.asm=fr,n.then=function(r){if(lr)r(n);else{var t=n.onRuntimeInitialized;n.onRuntimeInitialized=function(){t&&t(),r(n)}}return n},N=function r(){lr||Cr(),lr||(N=r)},n.run=Cr,n.preInit)for("function"==typeof n.preInit&&(n.preInit=[n.preInit]);n.preInit.length>0;)n.preInit.pop()();function Rr(){}function Or(r){return(r||Rr).__cache__}function jr(r,t){var e=Or(t),n=e[r];return n||((n=Object.create((t||Rr).prototype)).ptr=r,e[r]=n)}Cr(),Rr.prototype=Object.create(Rr.prototype),Rr.prototype.constructor=Rr,Rr.prototype.__class__=Rr,Rr.__cache__={},n.WrapperObject=Rr,n.getCache=Or,n.wrapPointer=jr,n.castObject=function(r,t){return jr(r.ptr,t)},n.NULL=jr(0),n.destroy=function(r){if(!r.__destroy__)throw"Error: Cannot destroy object. (Did you create it yourself?)";r.__destroy__(),delete Or(r.__class__)[r.ptr]},n.compare=function(r,t){return r.ptr===t.ptr},n.getPointer=function(r){return r.ptr},n.getClass=function(r){return r.__class__};var Tr,zr={buffer:0,size:0,pos:0,temps:[],needed:0,prepare:function(){if(zr.needed){for(var r=0;r=zr.size?(m(i>0),zr.needed+=i,e=n._malloc(i),zr.temps.push(e)):(e=zr.buffer+zr.pos,zr.pos+=i),e},copy:function(r,t,e){var n=e;switch(t.BYTES_PER_ELEMENT){case 2:n>>=1;break;case 4:n>>=2;break;case 8:n>>=3}for(var o=0;o=n);)++o;if(o-t>16&&r.subarray&&S)return S.decode(r.subarray(t,o));for(var i="";t>10,56320|1023&c)}}else i+=String.fromCharCode((31&a)<<6|s)}else i+=String.fromCharCode(a)}return i}function P(r,t){return r?F(E,r,t):""}function x(r,t,e,n){if(!(n>0))return 0;for(var o=e,i=e+n-1,a=0;a=55296&&s<=57343&&(s=65536+((1023&s)<<10)|1023&r.charCodeAt(++a)),s<=127){if(e>=i)break;t[e++]=s}else if(s<=2047){if(e+1>=i)break;t[e++]=192|s>>6,t[e++]=128|63&s}else if(s<=65535){if(e+2>=i)break;t[e++]=224|s>>12,t[e++]=128|s>>6&63,t[e++]=128|63&s}else{if(e+3>=i)break;t[e++]=240|s>>18,t[e++]=128|s>>12&63,t[e++]=128|s>>6&63,t[e++]=128|63&s}}return t[e]=0,e-o}function A(r){for(var t=0,e=0;e=55296&&n<=57343&&(n=65536+((1023&n)<<10)|1023&r.charCodeAt(++e)),n<=127?++t:t+=n<=2047?2:n<=65535?3:4}return t}function M(r){w=r,n.HEAP8=g=new Int8Array(r),n.HEAP16=k=new Int16Array(r),n.HEAP32=b=new Int32Array(r),n.HEAPU8=E=new Uint8Array(r),n.HEAPU16=new Uint16Array(r),n.HEAPU32=new Uint32Array(r),n.HEAPF32=new Float32Array(r),n.HEAPF64=D=new Float64Array(r)}"undefined"!=typeof TextDecoder&&new TextDecoder("utf-16le");var C=n.TOTAL_MEMORY||16777216;function R(r){for(;r.length>0;){var t=r.shift();if("function"!=typeof t){var e=t.func;"number"==typeof e?void 0===t.arg?n.dynCall_v(e):n.dynCall_vi(e,t.arg):e(void 0===t.arg?null:t.arg)}else t()}}(p=n.wasmMemory?n.wasmMemory:new WebAssembly.Memory({initial:C/65536}))&&(w=p.buffer),C=w.byteLength,M(w),b[54772]=5462128;var O=[],j=[],T=[],z=[],B=!1,N=Math.abs,L=Math.ceil,I=Math.floor,H=Math.min,U=0,W=null;function q(r){U++,n.monitorRunDependencies&&n.monitorRunDependencies(U)}function Y(r){if(U--,n.monitorRunDependencies&&n.monitorRunDependencies(U),0==U&&W){var t=W;W=null,t()}}function J(r){throw n.onAbort&&n.onAbort(r),c(r+=""),f(r),v=!0,r="abort("+r+"). Build with -s ASSERTIONS=1 for more info.",new WebAssembly.RuntimeError(r)}function V(r){return String.prototype.startsWith?r.startsWith("data:application/octet-stream;base64,"):0===r.indexOf("data:application/octet-stream;base64,")}n.preloadedImages={},n.preloadedAudios={};var X,G,Z,K="graphvizlib.wasm";function $(){try{if(l)return new Uint8Array(l);throw"both async and sync fetching of the wasm failed"}catch(r){J(r)}}V(K)||(X=K,K=n.locateFile?n.locateFile(X,u):u+X);var Q={1088:function(r,t){var e=P(r),n=P(t);sr.createPath("/",nr.dirname(e)),sr.writeFile(nr.join("/",e),n)}};function rr(){var r=function(){var r=new Error;if(!r.stack){try{throw new Error}catch(t){r=t}if(!r.stack)return"(no stack trace available)"}return r.stack.toString()}();return n.extraStackTrace&&(r+="\n"+n.extraStackTrace()),r.replace(/\b_Z[\w\d_]+/g,(function(r){return r==r?r:r+" ["+r+"]"}))}function tr(){J()}function er(r){return n.___errno_location&&(b[n.___errno_location()>>2]=r),r}j.push({func:function(){Fr()}});var nr={splitPath:function(r){return/^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/.exec(r).slice(1)},normalizeArray:function(r,t){for(var e=0,n=r.length-1;n>=0;n--){var o=r[n];"."===o?r.splice(n,1):".."===o?(r.splice(n,1),e++):e&&(r.splice(n,1),e--)}if(t)for(;e;e--)r.unshift("..");return r},normalize:function(r){var t="/"===r.charAt(0),e="/"===r.substr(-1);return(r=nr.normalizeArray(r.split("/").filter((function(r){return!!r})),!t).join("/"))||t||(r="."),r&&e&&(r+="/"),(t?"/":"")+r},dirname:function(r){var t=nr.splitPath(r),e=t[0],n=t[1];return e||n?(n&&(n=n.substr(0,n.length-1)),e+n):"."},basename:function(r){if("/"===r)return"/";var t=r.lastIndexOf("/");return-1===t?r:r.substr(t+1)},extname:function(r){return nr.splitPath(r)[3]},join:function(){var r=Array.prototype.slice.call(arguments,0);return nr.normalize(r.join("/"))},join2:function(r,t){return nr.normalize(r+"/"+t)}},or={resolve:function(){for(var r="",t=!1,e=arguments.length-1;e>=-1&&!t;e--){var n=e>=0?arguments[e]:sr.cwd();if("string"!=typeof n)throw new TypeError("Arguments to path.resolve must be strings");if(!n)return"";r=n+"/"+r,t="/"===n.charAt(0)}return(t?"/":"")+(r=nr.normalizeArray(r.split("/").filter((function(r){return!!r})),!t).join("/"))||"."},relative:function(r,t){function e(r){for(var t=0;t=0&&""===r[e];e--);return t>e?[]:r.slice(t,e-t+1)}r=or.resolve(r).substr(1),t=or.resolve(t).substr(1);for(var n=e(r.split("/")),o=e(t.split("/")),i=Math.min(n.length,o.length),a=i,s=0;s0&&(c(F(r.output,0)),r.output=[])}},default_tty1_ops:{put_char:function(r,t){null===t||10===t?(f(F(r.output,0)),r.output=[]):0!=t&&r.output.push(t)},flush:function(r){r.output&&r.output.length>0&&(f(F(r.output,0)),r.output=[])}}},ar={ops_table:null,mount:function(r){return ar.createNode(null,"/",16895,0)},createNode:function(r,t,e,n){if(sr.isBlkdev(e)||sr.isFIFO(e))throw new sr.ErrnoError(63);ar.ops_table||(ar.ops_table={dir:{node:{getattr:ar.node_ops.getattr,setattr:ar.node_ops.setattr,lookup:ar.node_ops.lookup,mknod:ar.node_ops.mknod,rename:ar.node_ops.rename,unlink:ar.node_ops.unlink,rmdir:ar.node_ops.rmdir,readdir:ar.node_ops.readdir,symlink:ar.node_ops.symlink},stream:{llseek:ar.stream_ops.llseek}},file:{node:{getattr:ar.node_ops.getattr,setattr:ar.node_ops.setattr},stream:{llseek:ar.stream_ops.llseek,read:ar.stream_ops.read,write:ar.stream_ops.write,allocate:ar.stream_ops.allocate,mmap:ar.stream_ops.mmap,msync:ar.stream_ops.msync}},link:{node:{getattr:ar.node_ops.getattr,setattr:ar.node_ops.setattr,readlink:ar.node_ops.readlink},stream:{}},chrdev:{node:{getattr:ar.node_ops.getattr,setattr:ar.node_ops.setattr},stream:sr.chrdev_stream_ops}});var o=sr.createNode(r,t,e,n);return sr.isDir(o.mode)?(o.node_ops=ar.ops_table.dir.node,o.stream_ops=ar.ops_table.dir.stream,o.contents={}):sr.isFile(o.mode)?(o.node_ops=ar.ops_table.file.node,o.stream_ops=ar.ops_table.file.stream,o.usedBytes=0,o.contents=null):sr.isLink(o.mode)?(o.node_ops=ar.ops_table.link.node,o.stream_ops=ar.ops_table.link.stream):sr.isChrdev(o.mode)&&(o.node_ops=ar.ops_table.chrdev.node,o.stream_ops=ar.ops_table.chrdev.stream),o.timestamp=Date.now(),r&&(r.contents[t]=o),o},getFileDataAsRegularArray:function(r){if(r.contents&&r.contents.subarray){for(var t=[],e=0;e=t)){t=Math.max(t,e*(e<1048576?2:1.125)|0),0!=e&&(t=Math.max(t,256));var n=r.contents;r.contents=new Uint8Array(t),r.usedBytes>0&&r.contents.set(n.subarray(0,r.usedBytes),0)}},resizeFileStorage:function(r,t){if(r.usedBytes!=t){if(0==t)return r.contents=null,void(r.usedBytes=0);if(!r.contents||r.contents.subarray){var e=r.contents;return r.contents=new Uint8Array(t),e&&r.contents.set(e.subarray(0,Math.min(t,r.usedBytes))),void(r.usedBytes=t)}if(r.contents||(r.contents=[]),r.contents.length>t)r.contents.length=t;else for(;r.contents.length=r.node.usedBytes)return 0;var a=Math.min(r.node.usedBytes-o,n);if(a>8&&i.subarray)t.set(i.subarray(o,o+a),e);else for(var s=0;s0||o+n8)throw new sr.ErrnoError(32);for(var o=nr.normalizeArray(r.split("/").filter((function(r){return!!r})),!1),i=sr.root,a="/",s=0;s40)throw new sr.ErrnoError(32)}}return{path:a,node:i}},getPath:function(r){for(var t;;){if(sr.isRoot(r)){var e=r.mount.mountpoint;return t?"/"!==e[e.length-1]?e+"/"+t:e+t:e}t=t?r.name+"/"+t:r.name,r=r.parent}},hashName:function(r,t){for(var e=0,n=0;n>>0)%sr.nameTable.length},hashAddNode:function(r){var t=sr.hashName(r.parent.id,r.name);r.name_next=sr.nameTable[t],sr.nameTable[t]=r},hashRemoveNode:function(r){var t=sr.hashName(r.parent.id,r.name);if(sr.nameTable[t]===r)sr.nameTable[t]=r.name_next;else for(var e=sr.nameTable[t];e;){if(e.name_next===r){e.name_next=r.name_next;break}e=e.name_next}},lookupNode:function(r,t){var e=sr.mayLookup(r);if(e)throw new sr.ErrnoError(e,r);for(var n=sr.hashName(r.id,t),o=sr.nameTable[n];o;o=o.name_next){var i=o.name;if(o.parent.id===r.id&&i===t)return o}return sr.lookup(r,t)},createNode:function(r,t,e,n){sr.FSNode||(sr.FSNode=function(r,t,e,n){r||(r=this),this.parent=r,this.mount=r.mount,this.mounted=null,this.id=sr.nextInode++,this.name=t,this.mode=e,this.node_ops={},this.stream_ops={},this.rdev=n},sr.FSNode.prototype={},Object.defineProperties(sr.FSNode.prototype,{read:{get:function(){return 365==(365&this.mode)},set:function(r){r?this.mode|=365:this.mode&=-366}},write:{get:function(){return 146==(146&this.mode)},set:function(r){r?this.mode|=146:this.mode&=-147}},isFolder:{get:function(){return sr.isDir(this.mode)}},isDevice:{get:function(){return sr.isChrdev(this.mode)}}}));var o=new sr.FSNode(r,t,e,n);return sr.hashAddNode(o),o},destroyNode:function(r){sr.hashRemoveNode(r)},isRoot:function(r){return r===r.parent},isMountpoint:function(r){return!!r.mounted},isFile:function(r){return 32768==(61440&r)},isDir:function(r){return 16384==(61440&r)},isLink:function(r){return 40960==(61440&r)},isChrdev:function(r){return 8192==(61440&r)},isBlkdev:function(r){return 24576==(61440&r)},isFIFO:function(r){return 4096==(61440&r)},isSocket:function(r){return 49152==(49152&r)},flagModes:{r:0,rs:1052672,"r+":2,w:577,wx:705,xw:705,"w+":578,"wx+":706,"xw+":706,a:1089,ax:1217,xa:1217,"a+":1090,"ax+":1218,"xa+":1218},modeStringToFlags:function(r){var t=sr.flagModes[r];if(void 0===t)throw new Error("Unknown file open mode: "+r);return t},flagsToPermissionString:function(r){var t=["r","w","rw"][3&r];return 512&r&&(t+="w"),t},nodePermissions:function(r,t){return sr.ignorePermissions||(-1===t.indexOf("r")||292&r.mode)&&(-1===t.indexOf("w")||146&r.mode)&&(-1===t.indexOf("x")||73&r.mode)?0:2},mayLookup:function(r){var t=sr.nodePermissions(r,"x");return t||(r.node_ops.lookup?0:2)},mayCreate:function(r,t){try{return sr.lookupNode(r,t),20}catch(r){}return sr.nodePermissions(r,"wx")},mayDelete:function(r,t,e){var n;try{n=sr.lookupNode(r,t)}catch(r){return r.errno}var o=sr.nodePermissions(r,"wx");if(o)return o;if(e){if(!sr.isDir(n.mode))return 54;if(sr.isRoot(n)||sr.getPath(n)===sr.cwd())return 10}else if(sr.isDir(n.mode))return 31;return 0},mayOpen:function(r,t){return r?sr.isLink(r.mode)?32:sr.isDir(r.mode)&&("r"!==sr.flagsToPermissionString(t)||512&t)?31:sr.nodePermissions(r,sr.flagsToPermissionString(t)):44},MAX_OPEN_FDS:4096,nextfd:function(r,t){r=r||0,t=t||sr.MAX_OPEN_FDS;for(var e=r;e<=t;e++)if(!sr.streams[e])return e;throw new sr.ErrnoError(33)},getStream:function(r){return sr.streams[r]},createStream:function(r,t,e){sr.FSStream||(sr.FSStream=function(){},sr.FSStream.prototype={},Object.defineProperties(sr.FSStream.prototype,{object:{get:function(){return this.node},set:function(r){this.node=r}},isRead:{get:function(){return 1!=(2097155&this.flags)}},isWrite:{get:function(){return 0!=(2097155&this.flags)}},isAppend:{get:function(){return 1024&this.flags}}}));var n=new sr.FSStream;for(var o in r)n[o]=r[o];r=n;var i=sr.nextfd(t,e);return r.fd=i,sr.streams[i]=r,r},closeStream:function(r){sr.streams[r]=null},chrdev_stream_ops:{open:function(r){var t=sr.getDevice(r.node.rdev);r.stream_ops=t.stream_ops,r.stream_ops.open&&r.stream_ops.open(r)},llseek:function(){throw new sr.ErrnoError(70)}},major:function(r){return r>>8},minor:function(r){return 255&r},makedev:function(r,t){return r<<8|t},registerDevice:function(r,t){sr.devices[r]={stream_ops:t}},getDevice:function(r){return sr.devices[r]},getMounts:function(r){for(var t=[],e=[r];e.length;){var n=e.pop();t.push(n),e.push.apply(e,n.mounts)}return t},syncfs:function(r,t){"function"==typeof r&&(t=r,r=!1),sr.syncFSRequests++,sr.syncFSRequests>1&&f("warning: "+sr.syncFSRequests+" FS.syncfs operations in flight at once, probably just doing extra work");var e=sr.getMounts(sr.root.mount),n=0;function o(r){return sr.syncFSRequests--,t(r)}function i(r){if(r)return i.errored?void 0:(i.errored=!0,o(r));++n>=e.length&&o(null)}e.forEach((function(t){if(!t.type.syncfs)return i(null);t.type.syncfs(t,r,i)}))},mount:function(r,t,e){var n,o="/"===e,i=!e;if(o&&sr.root)throw new sr.ErrnoError(10);if(!o&&!i){var a=sr.lookupPath(e,{follow_mount:!1});if(e=a.path,n=a.node,sr.isMountpoint(n))throw new sr.ErrnoError(10);if(!sr.isDir(n.mode))throw new sr.ErrnoError(54)}var s={type:r,opts:t,mountpoint:e,mounts:[]},u=r.mount(s);return u.mount=s,s.root=u,o?sr.root=u:n&&(n.mounted=s,n.mount&&n.mount.mounts.push(s)),u},unmount:function(r){var t=sr.lookupPath(r,{follow_mount:!1});if(!sr.isMountpoint(t.node))throw new sr.ErrnoError(28);var e=t.node,n=e.mounted,o=sr.getMounts(n);Object.keys(sr.nameTable).forEach((function(r){for(var t=sr.nameTable[r];t;){var e=t.name_next;-1!==o.indexOf(t.mount)&&sr.destroyNode(t),t=e}})),e.mounted=null;var i=e.mount.mounts.indexOf(n);e.mount.mounts.splice(i,1)},lookup:function(r,t){return r.node_ops.lookup(r,t)},mknod:function(r,t,e){var n=sr.lookupPath(r,{parent:!0}).node,o=nr.basename(r);if(!o||"."===o||".."===o)throw new sr.ErrnoError(28);var i=sr.mayCreate(n,o);if(i)throw new sr.ErrnoError(i);if(!n.node_ops.mknod)throw new sr.ErrnoError(63);return n.node_ops.mknod(n,o,t,e)},create:function(r,t){return t=void 0!==t?t:438,t&=4095,t|=32768,sr.mknod(r,t,0)},mkdir:function(r,t){return t=void 0!==t?t:511,t&=1023,t|=16384,sr.mknod(r,t,0)},mkdirTree:function(r,t){for(var e=r.split("/"),n="",o=0;othis.length-1||r<0)){var t=r%this.chunkSize,e=r/this.chunkSize|0;return this.getter(e)[t]}},i.prototype.setDataGetter=function(r){this.getter=r},i.prototype.cacheLength=function(){var r=new XMLHttpRequest;if(r.open("HEAD",e,!1),r.send(null),!(r.status>=200&&r.status<300||304===r.status))throw new Error("Couldn't load "+e+". Status: "+r.status);var t,n=Number(r.getResponseHeader("Content-length")),o=(t=r.getResponseHeader("Accept-Ranges"))&&"bytes"===t,i=(t=r.getResponseHeader("Content-Encoding"))&&"gzip"===t,a=1048576;o||(a=n);var s=this;s.setDataGetter((function(r){var t=r*a,o=(r+1)*a-1;if(o=Math.min(o,n-1),void 0===s.chunks[r]&&(s.chunks[r]=function(r,t){if(r>t)throw new Error("invalid range ("+r+", "+t+") or no bytes requested!");if(t>n-1)throw new Error("only "+n+" bytes available! programmer error!");var o=new XMLHttpRequest;if(o.open("GET",e,!1),n!==a&&o.setRequestHeader("Range","bytes="+r+"-"+t),"undefined"!=typeof Uint8Array&&(o.responseType="arraybuffer"),o.overrideMimeType&&o.overrideMimeType("text/plain; charset=x-user-defined"),o.send(null),!(o.status>=200&&o.status<300||304===o.status))throw new Error("Couldn't load "+e+". Status: "+o.status);return void 0!==o.response?new Uint8Array(o.response||[]):kr(o.responseText||"",!0)}(t,o)),void 0===s.chunks[r])throw new Error("doXHR failed!");return s.chunks[r]})),!i&&n||(a=n=1,n=this.getter(0).length,a=n,c("LazyFiles on gzip forces download of the whole file when length is accessed")),this._length=n,this._chunkSize=a,this.lengthKnown=!0},"undefined"!=typeof XMLHttpRequest)throw"Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc";var a={isDevice:!1,url:e},s=sr.createFile(r,t,a,n,o);a.contents?s.contents=a.contents:a.url&&(s.contents=null,s.url=a.url),Object.defineProperties(s,{usedBytes:{get:function(){return this.contents.length}}});var u={};return Object.keys(s.stream_ops).forEach((function(r){var t=s.stream_ops[r];u[r]=function(){if(!sr.forceLoadFile(s))throw new sr.ErrnoError(29);return t.apply(null,arguments)}})),u.read=function(r,t,e,n,o){if(!sr.forceLoadFile(s))throw new sr.ErrnoError(29);var i=r.node.contents;if(o>=i.length)return 0;var a=Math.min(i.length-o,n);if(i.slice)for(var u=0;u>2]=n.dev,b[e+4>>2]=0,b[e+8>>2]=n.ino,b[e+12>>2]=n.mode,b[e+16>>2]=n.nlink,b[e+20>>2]=n.uid,b[e+24>>2]=n.gid,b[e+28>>2]=n.rdev,b[e+32>>2]=0,Z=[n.size>>>0,(G=n.size,+N(G)>=1?G>0?(0|H(+I(G/4294967296),4294967295))>>>0:~~+L((G-+(~~G>>>0))/4294967296)>>>0:0)],b[e+40>>2]=Z[0],b[e+44>>2]=Z[1],b[e+48>>2]=4096,b[e+52>>2]=n.blocks,b[e+56>>2]=n.atime.getTime()/1e3|0,b[e+60>>2]=0,b[e+64>>2]=n.mtime.getTime()/1e3|0,b[e+68>>2]=0,b[e+72>>2]=n.ctime.getTime()/1e3|0,b[e+76>>2]=0,Z=[n.ino>>>0,(G=n.ino,+N(G)>=1?G>0?(0|H(+I(G/4294967296),4294967295))>>>0:~~+L((G-+(~~G>>>0))/4294967296)>>>0:0)],b[e+80>>2]=Z[0],b[e+84>>2]=Z[1],0},doMsync:function(r,t,e,n,o){var i=E.slice(r,r+e);sr.msync(t,i,o,e,n)},doMkdir:function(r,t){return"/"===(r=nr.normalize(r))[r.length-1]&&(r=r.substr(0,r.length-1)),sr.mkdir(r,t,0),0},doMknod:function(r,t,e){switch(61440&t){case 32768:case 8192:case 24576:case 4096:case 49152:break;default:return-28}return sr.mknod(r,t,e),0},doReadlink:function(r,t,e){if(e<=0)return-28;var n=sr.readlink(r),o=Math.min(e,A(n)),i=g[t+o];return x(n,E,t,e+1),g[t+o]=i,o},doAccess:function(r,t){if(-8&t)return-28;var e;if(!(e=sr.lookupPath(r,{follow:!0}).node))return-44;var n="";return 4&t&&(n+="r"),2&t&&(n+="w"),1&t&&(n+="x"),n&&sr.nodePermissions(e,n)?-2:0},doDup:function(r,t,e){var n=sr.getStream(e);return n&&sr.close(n),sr.open(r,t,0,e,e).fd},doReadv:function(r,t,e,n){for(var o=0,i=0;i>2],s=b[t+(8*i+4)>>2],u=sr.read(r,g,a,s,n);if(u<0)return-1;if(o+=u,u>2],s=b[t+(8*i+4)>>2],u=sr.write(r,g,a,s,n);if(u<0)return-1;o+=u}return o},varargs:0,get:function(r){return ur.varargs+=4,b[ur.varargs-4>>2]},getStr:function(){return P(ur.get())},getStreamFromFD:function(r){void 0===r&&(r=ur.get());var t=sr.getStream(r);if(!t)throw new sr.ErrnoError(8);return t},get64:function(){var r=ur.get();return ur.get(),r},getZero:function(){ur.get()}};function cr(r,t,e){t|=0;var n,o=0,i=0,a=0;if(n=(r|=0)+(e|=0)|0,t&=255,(0|e)>=67){for(;0!=(3&r);)g[r>>0]=t,r=r+1|0;for(a=t|t<<8|t<<16|t<<24,i=(o=-4&n|0)-64|0;(0|r)<=(0|i);)b[r>>2]=a,b[r+4>>2]=a,b[r+8>>2]=a,b[r+12>>2]=a,b[r+16>>2]=a,b[r+20>>2]=a,b[r+24>>2]=a,b[r+28>>2]=a,b[r+32>>2]=a,b[r+36>>2]=a,b[r+40>>2]=a,b[r+44>>2]=a,b[r+48>>2]=a,b[r+52>>2]=a,b[r+56>>2]=a,b[r+60>>2]=a,r=r+64|0;for(;(0|r)<(0|o);)b[r>>2]=a,r=r+4|0}for(;(0|r)<(0|n);)g[r>>0]=t,r=r+1|0;return n-e|0}var fr=42,lr=0;function dr(r){try{return p.grow(r-w.byteLength+65535>>16),M(p.buffer),1}catch(r){}}var pr={};function mr(){if(!mr.strings){var r={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:a||"./this.program"};for(var t in pr)r[t]=pr[t];var e=[];for(var t in r)e.push(t+"="+r[t]);mr.strings=e}return mr.strings}function hr(r){return r%4==0&&(r%100!=0||r%400==0)}function yr(r,t){for(var e=0,n=0;n<=t;e+=r[n++]);return e}var vr=[31,29,31,30,31,30,31,31,30,31,30,31],_r=[31,28,31,30,31,30,31,31,30,31,30,31];function wr(r,t){for(var e=new Date(r.getTime());t>0;){var n=hr(e.getFullYear()),o=e.getMonth(),i=(n?vr:_r)[o];if(!(t>i-e.getDate()))return e.setDate(e.getDate()+t),e;t-=i-e.getDate()+1,e.setDate(1),o<11?e.setMonth(o+1):(e.setMonth(0),e.setFullYear(e.getFullYear()+1))}return e}function gr(r,t,e,n){var o=b[n+40>>2],i={tm_sec:b[n>>2],tm_min:b[n+4>>2],tm_hour:b[n+8>>2],tm_mday:b[n+12>>2],tm_mon:b[n+16>>2],tm_year:b[n+20>>2],tm_wday:b[n+24>>2],tm_yday:b[n+28>>2],tm_isdst:b[n+32>>2],tm_gmtoff:b[n+36>>2],tm_zone:o?P(o):""},a=P(e),s={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"};for(var u in s)a=a.replace(new RegExp(u,"g"),s[u]);var c=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],f=["January","February","March","April","May","June","July","August","September","October","November","December"];function l(r,t,e){for(var n="number"==typeof r?r.toString():r||"";n.length0?1:0}var n;return 0===(n=e(r.getFullYear()-t.getFullYear()))&&0===(n=e(r.getMonth()-t.getMonth()))&&(n=e(r.getDate()-t.getDate())),n}function m(r){switch(r.getDay()){case 0:return new Date(r.getFullYear()-1,11,29);case 1:return r;case 2:return new Date(r.getFullYear(),0,3);case 3:return new Date(r.getFullYear(),0,2);case 4:return new Date(r.getFullYear(),0,1);case 5:return new Date(r.getFullYear()-1,11,31);case 6:return new Date(r.getFullYear()-1,11,30)}}function h(r){var t=wr(new Date(r.tm_year+1900,0,1),r.tm_yday),e=new Date(t.getFullYear(),0,4),n=new Date(t.getFullYear()+1,0,4),o=m(e),i=m(n);return p(o,t)<=0?p(i,t)<=0?t.getFullYear()+1:t.getFullYear():t.getFullYear()-1}var y={"%a":function(r){return c[r.tm_wday].substring(0,3)},"%A":function(r){return c[r.tm_wday]},"%b":function(r){return f[r.tm_mon].substring(0,3)},"%B":function(r){return f[r.tm_mon]},"%C":function(r){return d((r.tm_year+1900)/100|0,2)},"%d":function(r){return d(r.tm_mday,2)},"%e":function(r){return l(r.tm_mday,2," ")},"%g":function(r){return h(r).toString().substring(2)},"%G":function(r){return h(r)},"%H":function(r){return d(r.tm_hour,2)},"%I":function(r){var t=r.tm_hour;return 0==t?t=12:t>12&&(t-=12),d(t,2)},"%j":function(r){return d(r.tm_mday+yr(hr(r.tm_year+1900)?vr:_r,r.tm_mon-1),3)},"%m":function(r){return d(r.tm_mon+1,2)},"%M":function(r){return d(r.tm_min,2)},"%n":function(){return"\n"},"%p":function(r){return r.tm_hour>=0&&r.tm_hour<12?"AM":"PM"},"%S":function(r){return d(r.tm_sec,2)},"%t":function(){return"\t"},"%u":function(r){return r.tm_wday||7},"%U":function(r){var t=new Date(r.tm_year+1900,0,1),e=0===t.getDay()?t:wr(t,7-t.getDay()),n=new Date(r.tm_year+1900,r.tm_mon,r.tm_mday);if(p(e,n)<0){var o=yr(hr(n.getFullYear())?vr:_r,n.getMonth()-1)-31,i=31-e.getDate()+o+n.getDate();return d(Math.ceil(i/7),2)}return 0===p(e,t)?"01":"00"},"%V":function(r){var t,e=new Date(r.tm_year+1900,0,4),n=new Date(r.tm_year+1901,0,4),o=m(e),i=m(n),a=wr(new Date(r.tm_year+1900,0,1),r.tm_yday);return p(a,o)<0?"53":p(i,a)<=0?"01":(t=o.getFullYear()=0;return t=(t=Math.abs(t)/60)/60*100+t%60,(e?"+":"-")+String("0000"+t).slice(-4)},"%Z":function(r){return r.tm_zone},"%%":function(){return"%"}};for(var u in y)a.indexOf(u)>=0&&(a=a.replace(new RegExp(u,"g"),y[u](i)));var v=kr(a,!1);return v.length>t?0:(function(r,t){g.set(r,t)}(v,r),v.length-1)}function Er(r,t){Er.array||(Er.array=[]);var e,n=Er.array;for(n.length=0;e=E[r++];)100===e||102===e?(t=t+7&-8,n.push(D[t>>3]),t+=8):(t=t+3&-4,n.push(b[t>>2]),t+=4);return n}function kr(r,t,e){var n=e>0?e:A(r)+1,o=new Array(n),i=x(r,o,0,o.length);return t&&(o.length=i),o}tr=function(){return performance.now()},sr.staticInit();var br={M:function(r,t){return function(r,t){var e;if(0===r)e=Date.now();else{if(1!==r&&4!==r)return er(28),-1;e=tr()}return b[t>>2]=e/1e3|0,b[t+4>>2]=e%1e3*1e3*1e3|0,0}(r,t)},k:function(r){return Rr(r)},j:function(r,t,e){throw"uncaught_exception"in zr?zr.uncaught_exceptions++:zr.uncaught_exceptions=1,r},q:function(){},U:function(r,t){return er(63),-1},P:function(r,t){ur.varargs=t;try{var e=ur.getStr();return sr.unlink(e),0}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},R:function(r,t){ur.varargs=t;try{return function(r,t,e,n,o,i){var a;i<<=12;var s=!1;if(0!=(16&n)&&r%16384!=0)return-28;if(0!=(32&n)){if(!(a=Br(16384,t)))return-48;cr(a,0,t),s=!0}else{var u=sr.getStream(o);if(!u)return-8;var c=sr.mmap(u,E,r,t,i,e,n);a=c.ptr,s=c.allocated}return ur.mappings[a]={malloc:a,len:t,allocated:s,fd:o,flags:n,offset:i},a}(ur.get(),ur.get(),ur.get(),ur.get(),ur.get(),ur.get())}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},S:function(r,t){ur.varargs=t;try{var e=ur.getStr(),n=ur.get();return ur.doStat(sr.stat,e,n)}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},T:function(r,t){ur.varargs=t;try{var e=ur.getStreamFromFD(),n=ur.get();return ur.doStat(sr.stat,e.path,n)}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},L:function(r,t){ur.varargs=t;try{return fr}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},y:function(r,t){ur.varargs=t;try{var e=ur.getStreamFromFD();switch(ur.get()){case 0:return(n=ur.get())<0?-28:sr.open(e.path,e.flags,0,n).fd;case 1:case 2:return 0;case 3:return e.flags;case 4:var n=ur.get();return e.flags|=n,0;case 12:return n=ur.get(),k[n+0>>1]=2,0;case 13:case 14:return 0;case 16:case 8:return-28;case 9:return er(28),-1;default:return-28}}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},K:function(r,t){ur.varargs=t;try{var e=ur.getStr(),n=ur.get();return ur.doAccess(e,n)}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},z:function(r,t){ur.varargs=t;try{var e=ur.getStr(),n=ur.get(),o=ur.get();return sr.open(e,n,o).fd}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},O:function(r,t){ur.varargs=t;try{var e=ur.getStreamFromFD(),n=ur.get();switch(n){case 21509:case 21505:return e.tty?0:-59;case 21510:case 21511:case 21512:case 21506:case 21507:case 21508:return e.tty?0:-59;case 21519:if(!e.tty)return-59;var o=ur.get();return b[o>>2]=0,0;case 21520:return e.tty?-28:-59;case 21531:return o=ur.get(),sr.ioctl(e,n,o);case 21523:case 21524:return e.tty?0:-59;default:J("bad ioctl syscall "+n)}}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},Q:function(r,t){ur.varargs=t;try{return function(r,t){if(-1===r||0===t)return-28;var e=ur.mappings[r];if(!e)return 0;if(t===e.len){var n=sr.getStream(e.fd);ur.doMsync(r,n,t,e.flags,e.offset),sr.munmap(n),ur.mappings[r]=null,e.allocated&&Or(e.malloc)}return 0}(ur.get(),ur.get())}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),-r.errno}},m:function(){},w:function(){J()},C:function(r,t,e){var n=Er(t,e);return Q[r].apply(null,n)},d:function(r,t){!function(r,t){throw Tr(r,t||1),"longjmp"}(r,t)},E:function(r,t,e){E.set(E.subarray(t,t+e),r)},F:function(r){var t=E.length;if(r>2147418112)return!1;for(var e,n,o=1;o<=4;o*=2){var i=t*(1+.2/o);if(i=Math.min(i,r+100663296),dr(Math.min(2147418112,((e=Math.max(16777216,r,i))%(n=65536)>0&&(e+=n-e%n),e))))return!0}return!1},I:function(r,t){var e=mr(),n=0;return e.forEach((function(e,o){var i=t+n;b[r+4*o>>2]=i,function(r,t,e){for(var n=0;n>0]=r.charCodeAt(n);e||(g[t>>0]=0)}(e,i),n+=e.length+1})),0},J:function(r,t){var e=mr();b[r>>2]=e.length;var n=0;return e.forEach((function(r){n+=r.length+1})),b[t>>2]=n,0},l:function(r){!function(r,t){t&&d&&0===r||(d||(v=!0,n.onExit&&n.onExit(r)),s(r,new rt(r)))}(r)},p:function(r){try{var t=ur.getStreamFromFD(r);return sr.close(t),0}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),r.errno}},H:function(r,t){try{var e=ur.getStreamFromFD(r),n=e.tty?2:sr.isDir(e.mode)?3:sr.isLink(e.mode)?7:4;return g[t>>0]=n,0}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),r.errno}},N:function(r,t,e,n){try{var o=ur.getStreamFromFD(r),i=ur.doReadv(o,t,e);return b[n>>2]=i,0}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),r.errno}},D:function(r,t,e,n,o){try{var i=ur.getStreamFromFD(r),a=4294967296*e+(t>>>0);return a<=-9007199254740992||a>=9007199254740992?-61:(sr.llseek(i,a,n),Z=[i.position>>>0,(G=i.position,+N(G)>=1?G>0?(0|H(+I(G/4294967296),4294967295))>>>0:~~+L((G-+(~~G>>>0))/4294967296)>>>0:0)],b[o>>2]=Z[0],b[o+4>>2]=Z[1],i.getdents&&0===a&&0===n&&(i.getdents=null),0)}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),r.errno}},x:function(r,t,e,n){try{var o=ur.getStreamFromFD(r),i=ur.doWritev(o,t,e);return b[n>>2]=i,0}catch(r){return void 0!==sr&&r instanceof sr.ErrnoError||J(r),r.errno}},a:function(){return 0|m},X:function(r){var t=Date.now();return b[r>>2]=t/1e3|0,b[r+4>>2]=t%1e3*1e3|0,0},Y:function(r){var t=$r();try{return Zr(r)}catch(r){if(Qr(t),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},V:function(r,t){var e=$r();try{return Kr(r,t)}catch(r){if(Qr(e),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},v:function(r){var t=$r();try{return qr(r)}catch(r){if(Qr(t),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},f:function(r,t){var e=$r();try{return Yr(r,t)}catch(r){if(Qr(e),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},e:function(r,t,e){var n=$r();try{return Jr(r,t,e)}catch(r){if(Qr(n),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},g:function(r,t,e,n){var o=$r();try{return Vr(r,t,e,n)}catch(r){if(Qr(o),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},n:function(r,t,e,n,o){var i=$r();try{return Xr(r,t,e,n,o)}catch(r){if(Qr(i),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},W:function(r,t,e,n,o,i,a){var s=$r();try{return Gr(r,t,e,n,o,i,a)}catch(r){if(Qr(s),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},s:function(r){var t=$r();try{Nr(r)}catch(r){if(Qr(t),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},h:function(r,t){var e=$r();try{Lr(r,t)}catch(r){if(Qr(e),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},o:function(r,t,e){var n=$r();try{Ir(r,t,e)}catch(r){if(Qr(n),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},u:function(r,t,e,n){var o=$r();try{Hr(r,t,e,n)}catch(r){if(Qr(o),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},t:function(r,t,e,n,o){var i=$r();try{Ur(r,t,e,n,o)}catch(r){if(Qr(i),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},r:function(r,t,e,n,o,i){var a=$r();try{Wr(r,t,e,n,o,i)}catch(r){if(Qr(a),r!==r+0&&"longjmp"!==r)throw r;Tr(1,0)}},memory:p,i:function r(t,e,n,o){e|=0,n|=0,o|=0;var i=0;for(lr=lr+1|0,b[(t|=0)>>2]=lr;(0|i)<(0|o);){if(0==(0|b[n+(i<<3)>>2]))return b[n+(i<<3)>>2]=lr,b[n+(4+(i<<3))>>2]=e,b[n+(8+(i<<3))>>2]=0,h(0|o),0|n;i=i+1|0}return n=0|r(0|t,0|e,0|(n=0|jr(0|n,8*(1+(o=2*o|0)|0)|0)),0|o),h(0|o),0|n},b:function(r){h(0|r)},G:function(r,t,e,n){return gr(r,t,e,n)},table:y,c:function(r,t,e){r|=0,t|=0,e|=0;for(var n=0,o=0;(0|n)<(0|e)&&0!=(0|(o=0|b[t+(n<<3)>>2]));){if((0|o)==(0|r))return 0|b[t+(4+(n<<3))>>2];n=n+1|0}return 0},A:function(r){var t=Date.now()/1e3|0;return r&&(b[r>>2]=t),t},B:function(r){return 0!==r&&cr(r,0,16),0}},Dr=function(){var r={a:br};function t(r,t){var e=r.exports;n.asm=e,Y()}function e(r){t(r.instance)}function o(t){return(l||"function"!=typeof fetch?new Promise((function(r,t){r($())})):fetch(K,{credentials:"same-origin"}).then((function(r){if(!r.ok)throw"failed to load wasm binary file at '"+K+"'";return r.arrayBuffer()})).catch((function(){return $()}))).then((function(t){return WebAssembly.instantiate(t,r)})).then(t,(function(r){f("failed to asynchronously prepare wasm: "+r),J(r)}))}if(q(),n.instantiateWasm)try{return n.instantiateWasm(r,t)}catch(r){return f("Module.instantiateWasm callback failed with error: "+r),!1}return function(){if(l||"function"!=typeof WebAssembly.instantiateStreaming||V(K)||"function"!=typeof fetch)return o(e);fetch(K,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,r).then(e,(function(r){f("wasm streaming compile failed: "+r),f("falling back to ArrayBuffer instantiation"),o(e)}))}))}(),{}}();n.asm=Dr;var Sr,Fr=n.___wasm_call_ctors=function(){return(Fr=n.___wasm_call_ctors=n.asm.Z).apply(null,arguments)},Pr=n._emscripten_bind_VoidPtr___destroy___0=function(){return(Pr=n._emscripten_bind_VoidPtr___destroy___0=n.asm._).apply(null,arguments)},xr=n._emscripten_bind_Main_layout_3=function(){return(xr=n._emscripten_bind_Main_layout_3=n.asm.$).apply(null,arguments)},Ar=n._emscripten_bind_Main_lastError_0=function(){return(Ar=n._emscripten_bind_Main_lastError_0=n.asm.aa).apply(null,arguments)},Mr=n._emscripten_bind_Main_createFile_2=function(){return(Mr=n._emscripten_bind_Main_createFile_2=n.asm.ba).apply(null,arguments)},Cr=n._emscripten_bind_Main___destroy___0=function(){return(Cr=n._emscripten_bind_Main___destroy___0=n.asm.ca).apply(null,arguments)},Rr=n._malloc=function(){return(Rr=n._malloc=n.asm.da).apply(null,arguments)},Or=n._free=function(){return(Or=n._free=n.asm.ea).apply(null,arguments)},jr=n._realloc=function(){return(jr=n._realloc=n.asm.fa).apply(null,arguments)},Tr=(n.___errno_location=function(){return(n.___errno_location=n.asm.ga).apply(null,arguments)},n._setThrew=function(){return(Tr=n._setThrew=n.asm.ha).apply(null,arguments)}),zr=n.__ZSt18uncaught_exceptionv=function(){return(zr=n.__ZSt18uncaught_exceptionv=n.asm.ia).apply(null,arguments)},Br=n._memalign=function(){return(Br=n._memalign=n.asm.ja).apply(null,arguments)},Nr=n.dynCall_v=function(){return(Nr=n.dynCall_v=n.asm.ka).apply(null,arguments)},Lr=n.dynCall_vi=function(){return(Lr=n.dynCall_vi=n.asm.la).apply(null,arguments)},Ir=n.dynCall_vii=function(){return(Ir=n.dynCall_vii=n.asm.ma).apply(null,arguments)},Hr=n.dynCall_viii=function(){return(Hr=n.dynCall_viii=n.asm.na).apply(null,arguments)},Ur=n.dynCall_viiii=function(){return(Ur=n.dynCall_viiii=n.asm.oa).apply(null,arguments)},Wr=n.dynCall_viiiii=function(){return(Wr=n.dynCall_viiiii=n.asm.pa).apply(null,arguments)},qr=n.dynCall_i=function(){return(qr=n.dynCall_i=n.asm.qa).apply(null,arguments)},Yr=n.dynCall_ii=function(){return(Yr=n.dynCall_ii=n.asm.ra).apply(null,arguments)},Jr=n.dynCall_iii=function(){return(Jr=n.dynCall_iii=n.asm.sa).apply(null,arguments)},Vr=n.dynCall_iiii=function(){return(Vr=n.dynCall_iiii=n.asm.ta).apply(null,arguments)},Xr=n.dynCall_iiiii=function(){return(Xr=n.dynCall_iiiii=n.asm.ua).apply(null,arguments)},Gr=n.dynCall_iiiiiii=function(){return(Gr=n.dynCall_iiiiiii=n.asm.va).apply(null,arguments)},Zr=n.dynCall_d=function(){return(Zr=n.dynCall_d=n.asm.wa).apply(null,arguments)},Kr=n.dynCall_di=function(){return(Kr=n.dynCall_di=n.asm.xa).apply(null,arguments)},$r=n.stackSave=function(){return($r=n.stackSave=n.asm.ya).apply(null,arguments)},Qr=(n.stackAlloc=function(){return(n.stackAlloc=n.asm.za).apply(null,arguments)},n.stackRestore=function(){return(Qr=n.stackRestore=n.asm.Aa).apply(null,arguments)});function rt(r){this.name="ExitStatus",this.message="Program terminated with exit("+r+")",this.status=r}function tt(r){function t(){Sr||(Sr=!0,v||(B=!0,n.noFSInit||sr.init.initialized||sr.init(),ir.init(),R(j),sr.ignorePermissions=!1,R(T),n.onRuntimeInitialized&&n.onRuntimeInitialized(),function(){if(n.postRun)for("function"==typeof n.postRun&&(n.postRun=[n.postRun]);n.postRun.length;)r=n.postRun.shift(),z.unshift(r);var r;R(z)}()))}U>0||(function(){if(n.preRun)for("function"==typeof n.preRun&&(n.preRun=[n.preRun]);n.preRun.length;)r=n.preRun.shift(),O.unshift(r);var r;R(O)}(),U>0||(n.setStatus?(n.setStatus("Running..."),setTimeout((function(){setTimeout((function(){n.setStatus("")}),1),t()}),1)):t()))}if(n.asm=Dr,n.then=function(r){if(Sr)r(n);else{var t=n.onRuntimeInitialized;n.onRuntimeInitialized=function(){t&&t(),r(n)}}return n},W=function r(){Sr||tt(),Sr||(W=r)},n.run=tt,n.preInit)for("function"==typeof n.preInit&&(n.preInit=[n.preInit]);n.preInit.length>0;)n.preInit.pop()();function et(){}function nt(r){return(r||et).__cache__}function ot(r,t){var e=nt(t),n=e[r];return n||((n=Object.create((t||et).prototype)).ptr=r,e[r]=n)}d=!0,tt(),et.prototype=Object.create(et.prototype),et.prototype.constructor=et,et.prototype.__class__=et,et.__cache__={},n.WrapperObject=et,n.getCache=nt,n.wrapPointer=ot,n.castObject=function(r,t){return ot(r.ptr,t)},n.NULL=ot(0),n.destroy=function(r){if(!r.__destroy__)throw"Error: Cannot destroy object. (Did you create it yourself?)";r.__destroy__(),delete nt(r.__class__)[r.ptr]},n.compare=function(r,t){return r.ptr===t.ptr},n.getPointer=function(r){return r.ptr},n.getClass=function(r){return r.__class__};var it,at={buffer:0,size:0,pos:0,temps:[],needed:0,prepare:function(){if(at.needed){for(var r=0;r=at.size?(_(i>0),at.needed+=i,e=n._malloc(i),at.temps.push(e)):(e=at.buffer+at.pos,at.pos+=i),e},copy:function(r,t,e){var n=e;switch(t.BYTES_PER_ELEMENT){case 2:n>>=1;break;case 4:n>>=2;break;case 8:n>>=3}for(var o=0;o\n\n'}}function h(r,t){var e,n=d({images:[],files:[]},t);p(n.files,(e=n.images,e.map(m))).forEach((function(t){return r.Main.prototype.createFile(t.path,t.data)}))}var y={layout:function(r,t,e,n){return void 0===t&&(t="svg"),void 0===e&&(e="dot"),r?a(l).then((function(o){h(o,n);var i=o.Main.prototype.layout(r,t,e);if(!i)throw new Error(o.Main.prototype.lastError());return i})):Promise.resolve("")},circo:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"circo",e)},dot:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"dot",e)},fdp:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"fdp",e)},neato:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"neato",e)},osage:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"osage",e)},patchwork:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"patchwork",e)},twopi:function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"twopi",e)}},v=function(){function r(r){this._wasm=r}return r.prototype.layout=function(r,t,e,n){if(void 0===t&&(t="svg"),void 0===e&&(e="dot"),!r)return"";h(this._wasm,n);var o=this._wasm.Main.prototype.layout(r,t,e);if(!o)throw new Error(this._wasm.Main.prototype.lastError());return o},r.prototype.circo=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"circo",e)},r.prototype.dot=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"dot",e)},r.prototype.fdp=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"fdp",e)},r.prototype.neato=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"neato",e)},r.prototype.osage=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"osage",e)},r.prototype.patchwork=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"patchwork",e)},r.prototype.twopi=function(r,t,e){return void 0===t&&(t="svg"),this.layout(r,t,"twopi",e)},r}();r.StackParser=u,r.graphviz=y,r.graphvizSync=function(){return a(l).then((function(r){return new v(r)}))},r.parse=c,r.wasmFolder=i,Object.defineProperty(r,"__esModule",{value:!0})})); \ No newline at end of file diff --git a/blueprint/web/js/jquery.min.js b/blueprint/web/js/jquery.min.js deleted file mode 100644 index ebbcf4191..000000000 --- a/blueprint/web/js/jquery.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */ -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="
",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h; -if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.lengthh;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="
a",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/\s*$/g,rb={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:k.htmlSerialize?[0,"",""]:[1,"X
","
"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?""!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("