diff --git a/.gitignore b/.gitignore index f35286d6a..4b85f4656 100644 --- a/.gitignore +++ b/.gitignore @@ -3,8 +3,10 @@ /assignment.txt /params __pycache__ -/P +/P* /V +/SpartanPP +/*.pin /pi /x /perf.data* diff --git a/Cargo.lock b/Cargo.lock index a1163a91c..3ba932de7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b2e69442aa5628ea6951fa33e24efe8313f4321a91bd729fc2f75bdfc858570" dependencies = [ - "num-bigint", + "num-bigint 0.3.3", "num-integer", "num-traits", ] @@ -28,6 +28,18 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -102,6 +114,133 @@ dependencies = [ "num-traits", ] +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote 1.0.40", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-secp256r1" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3975a01b0a6e3eae0f72ec7ca8598a6620fc72fa5981f6f5cca33b7cd788f633" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-secp256r1" +version = "0.4.0" +source = "git+https://github.com/arkworks-rs/curves#e2d16a27e2cfa9f972ae9772df827a22730011b4" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -152,6 +291,24 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64ct" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" + [[package]] name = "beef" version = "0.5.2" @@ -167,8 +324,8 @@ dependencies = [ "blake2s_simd", "byteorder", "crossbeam-channel", - "ff", - "group", + "ff 0.12.1", + "group 0.12.1", "lazy_static", "log", "merlin", @@ -260,14 +417,14 @@ dependencies = [ [[package]] name = "bls12_381" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c196a77437e7cc2fb515ce413a6401291578b5afc8ecb29a3c7ab957f05941" +version = "0.7.0" +source = "git+https://github.com/woopuiyung/bls12_381.git?branch=release-0.7.0#be9a76bb50a6def9ac9779ae24491c6dcb1ea6b3" dependencies = [ - "ff", - "group", + "ff 0.12.1", + "group 0.12.1", "pairing", "rand_core 0.6.4", + "serde", "subtle", ] @@ -283,6 +440,16 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "cc" +version = "1.2.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932" +dependencies = [ + "find-msvc-tools", + "shlex", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -294,6 +461,11 @@ name = "circ" version = "0.1.0" dependencies = [ "approx", + "ark-ec", + "ark-ff", + "ark-secp256r1 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ark-serialize", + "ark-std", "bellman", "bincode", "bls12_381", @@ -302,17 +474,26 @@ dependencies = [ "circ_hc", "circ_opt", "circ_waksman", - "curve25519-dalek", + "curve25519-dalek 4.1.3", + "dorian", + "dorian-t25519", + "dorian-t256", + "dynfmt", + "ed25519-dalek", + "elliptic-curve", "env_logger", - "ff", + "ff 0.12.1", "from-pest", "fxhash", "gmp-mpfr-sys", "good_lp", - "group", + "group 0.12.1", + "hex", + "hex-literal 0.4.1", "ieee754", "im", "itertools 0.10.5", + "jemallocator", "lang-c", "lazy_static", "log", @@ -320,8 +501,11 @@ dependencies = [ "lp-solvers 0.0.4", "merlin", "once_cell", + "openssl", + "p256", "pairing", "paste", + "pem", "pest", "pest-ast", "pest_derive", @@ -330,14 +514,19 @@ dependencies = [ "quickcheck_macros", "rand 0.8.5", "rand_chacha 0.3.1", + "rand_core 0.5.1", + "rand_core 0.6.4", "rayon", + "regex", + "rsa", "rsmt2", "rug", "rug-polynomial", "serde", "serde_bytes", "serde_json", - "spartan", + "sha2", + "subtle", "thiserror", "typed-arena", "zokrates_parser 0.2.4", @@ -350,8 +539,14 @@ dependencies = [ name = "circ_fields" version = "0.1.0" dependencies = [ + "ark-ec", + "ark-ff", + "ark-ff-macros", + "ark-secp256r1 0.4.0 (git+https://github.com/arkworks-rs/curves)", + "ark-serialize", + "ark-std", "datasize", - "ff", + "ff 0.12.1", "ff-derive-num", "ff_derive", "lazy_static", @@ -360,6 +555,7 @@ dependencies = [ "rand 0.8.5", "rug", "serde", + "sha2", ] [[package]] @@ -459,6 +655,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "constant_time_eq" version = "0.3.1" @@ -517,6 +719,29 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crrl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca032663238726938d4ca23189575167143599eb219b591270c0d268ca9259b0" +dependencies = [ + "rand_core 0.6.4", + "sha2", + "sha3 0.10.8", +] + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -529,9 +754,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest 0.9.0", @@ -541,6 +766,35 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rand_core 0.6.4", + "rustc_version", + "serde", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", +] + [[package]] name = "datasize" version = "0.2.15" @@ -562,6 +816,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.109", +] + [[package]] name = "digest" version = "0.8.1" @@ -587,7 +863,141 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", + "subtle", +] + +[[package]] +name = "dorian" +version = "0.8.0" +dependencies = [ + "bincode", + "byteorder", + "colored", + "curve25519-dalek 3.2.0", + "digest 0.8.1", + "flate2", + "itertools 0.10.5", + "merlin", + "rand 0.7.3", + "rayon", + "serde", + "sha3 0.8.2", + "subtle", + "zeroize", +] + +[[package]] +name = "dorian-t25519" +version = "0.9.0" +source = "git+https://github.com/woopuiyung/Dorian.git?branch=t25519#c58f91be431d7d8167ef1a15d823d9857fe3039a" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "bincode", + "byteorder", + "colored", + "crrl", + "curve25519-dalek 3.2.0", + "digest 0.8.1", + "flate2", + "hex-literal 0.3.4", + "itertools 0.10.5", + "lazy_static", + "merlin", + "num-bigint-dig 0.7.1", + "rand 0.7.3", + "rayon", + "serde", + "serde_bytes", + "sha3 0.8.2", + "subtle", + "zeroize", +] + +[[package]] +name = "dorian-t256" +version = "0.9.0" +source = "git+https://github.com/woopuiyung/Dorian.git?branch=t256#3e3f73b01fbfd0bc6053dbf403020847947f6c0b" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "bincode", + "byteorder", + "colored", + "curve25519-dalek 3.2.0", + "digest 0.8.1", + "fields", + "flate2", + "hex-literal 0.3.4", + "itertools 0.10.5", + "lazy_static", + "merlin", + "num-bigint-dig 0.7.1", + "rand 0.7.3", + "rayon", + "serde", + "serde_bytes", + "sha3 0.8.2", + "subtle", + "zeroize", +] + +[[package]] +name = "dynfmt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c298552016db86f0d49e5de09818dd86c536f66095013cc415f4f85744033f" +dependencies = [ + "erased-serde", + "lazy_static", + "regex", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "serde", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek 4.1.3", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", ] [[package]] @@ -596,6 +1006,26 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "elliptic-curve" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff 0.13.1", + "generic-array 0.14.7", + "group 0.13.0", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "env_logger" version = "0.8.4" @@ -615,6 +1045,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +[[package]] +name = "erased-serde" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" +dependencies = [ + "serde", +] + [[package]] name = "errno" version = "0.3.10" @@ -676,6 +1115,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff-derive-num" version = "0.2.0" @@ -689,20 +1138,56 @@ dependencies = [ ] [[package]] -name = "ff_derive" -version = "0.12.1" +name = "ff_derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17db6fa0748f1f66e9dbafba1881009b50614948c0e900f59083afff2f8d784b" +dependencies = [ + "addchain", + "cfg-if", + "num-bigint 0.3.3", + "num-integer", + "num-traits", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.109", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "fields" +version = "0.1.0" +source = "git+https://github.com/woopuiyung/Dorian.git?branch=t256#3e3f73b01fbfd0bc6053dbf403020847947f6c0b" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-ff-macros", + "ark-secp256r1 0.4.0 (git+https://github.com/arkworks-rs/curves)", + "ark-serialize", + "ark-std", + "datasize", + "ff 0.12.1", + "ff-derive-num", + "ff_derive", + "lazy_static", + "num-traits", + "paste", + "rand 0.8.5", + "rug", + "serde", + "sha2", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17db6fa0748f1f66e9dbafba1881009b50614948c0e900f59083afff2f8d784b" -dependencies = [ - "addchain", - "cfg-if", - "num-bigint", - "num-integer", - "num-traits", - "proc-macro2 1.0.94", - "quote 1.0.40", - "syn 1.0.109", -] +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "fixedbitset" @@ -743,6 +1228,21 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "from-pest" version = "0.3.1" @@ -785,6 +1285,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -854,11 +1355,31 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff", + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff 0.13.1", "rand_core 0.6.4", "subtle", ] +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.15.2" @@ -891,6 +1412,33 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "humantime" version = "2.2.0" @@ -924,7 +1472,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.2", ] [[package]] @@ -957,6 +1505,26 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +dependencies = [ + "jemalloc-sys", + "libc", +] + [[package]] name = "keccak" version = "0.1.5" @@ -977,6 +1545,9 @@ name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] [[package]] name = "libc" @@ -984,6 +1555,12 @@ version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + [[package]] name = "linux-raw-sys" version = "0.9.3" @@ -1044,7 +1621,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown", + "hashbrown 0.15.2", ] [[package]] @@ -1085,6 +1662,49 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint-dig" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9bc3e36fd683e004fd59c64a425e0e991616f5a8b617c3b9a933a93c168facc" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "serde", + "smallvec", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +dependencies = [ + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -1094,6 +1714,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -1101,6 +1732,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -1134,13 +1766,63 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", +] + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + [[package]] name = "pairing" version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" dependencies = [ - "group", + "group 0.12.1", ] [[package]] @@ -1149,6 +1831,25 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pem" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" +dependencies = [ + "base64", + "serde", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "pest" version = "2.4.1" @@ -1216,6 +1917,27 @@ dependencies = [ "indexmap", ] +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.32" @@ -1231,6 +1953,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "primeorder" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7dbe9ed3b56368bd99483eb32fe9c17fdd3730aebadc906918ce78d54c7eeb4" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "proc-macro2" version = "0.4.30" @@ -1436,6 +2167,36 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rsa" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig 0.8.6", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "signature", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "rsmt2" version = "0.14.1" @@ -1485,6 +2246,15 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "1.0.3" @@ -1504,6 +2274,26 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + [[package]] name = "serde" version = "1.0.219" @@ -1556,6 +2346,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha3" version = "0.8.2" @@ -1569,6 +2370,32 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "single" version = "1.0.1" @@ -1589,26 +2416,25 @@ dependencies = [ ] [[package]] -name = "spartan" -version = "0.7.1" +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e52e49b6d607a0ef405654185943138a64ad36f9262e33d791f7233cedaa1b2" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ - "bincode", - "byteorder", - "colored", - "curve25519-dalek", - "digest 0.8.1", - "flate2", - "itertools 0.10.5", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "serde", - "sha3", - "subtle", - "thiserror", - "zeroize", + "base64ct", + "der", ] [[package]] @@ -1758,6 +2584,12 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.5" @@ -1941,9 +2773,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.3.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 7ae638899..280ba6381 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,11 +7,33 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +dorian-t256 = { git = "https://github.com/woopuiyung/Dorian.git", branch = "t256", optional = true, features = ["profile", "multicore", "bench"]} +dorian = { path = "third_party/Dorian", default-features = false, optional = true, features = ["profile", "std", "multicore", "bench"] } # , "simd_backend" +dorian-t25519 = { git = "https://github.com/woopuiyung/Dorian.git", branch = "t25519", optional = true, features = ["profile", "multicore", "bench"]} +subtle = "2.4" +hex = "0.4.3" +hex-literal = "0.4" +ed25519-dalek = {version="2.1.1", features=["rand_core", "serde"]} +sha2 = { version = "0.10", default-features = false } # needed to implement hash to curve in spartan +ark-ff = { version = "0.4.2", default-features = false } # needed by spartan +ark-ec = { version = "0.4.2", default-features = false} # needed by t256 +ark-secp256r1 = { version = "0.4.0", default-features = false} # needed by gk membership proof +ark-std = { version = "0.4.0", default-features = false } # needed by gk membership proof +ark-serialize = {version = "0.4.2", default-features = false } # needed by t256 +openssl = "0.10" +p256 = "0.13.2" # Check for the latest version +elliptic-curve = { version = "=0.13.6", default-features = false, features = ["hazmat", "sec1"] } +rand_core = "0.6" # Check for the latest version +rand_core0_5 = {package = "rand_core", version="0.5"} +regex = "1.4" +rsa = "0.9.2" +pem = "2.0.1" +dynfmt = {version = "0.1", features = ["curly"]} circ_fields = { path = "circ_fields" } circ_opt = { path = "circ_opt" } circ_hc = { path = "circ_hc", default-features = false, features = ["rc", "lru"]} circ_waksman = { path = "circ_waksman" } -rug = { version = "1.11", features = ["serde"] } +rug = { version = "1.18", features = ["serde"] } gmp-mpfr-sys = { version = "1.4", optional = true } lazy_static = { version = "1.4", optional = true } rand = "0.8" @@ -47,23 +69,24 @@ pest-ast = { version = "0.3", optional = true } from-pest = { version = "0.3", optional = true } itertools = "0.10" petgraph = { version = "0.6", optional = true } -spartan = { version = "0.7.0", default-features = false, optional = true } -merlin = { version = "3.0.0", optional = true } -curve25519-dalek = {version = "3.2.0", features = ["serde"], optional = true} +merlin = { version = "3.0.0"} +curve25519-dalek = {version = "4.0", features = ["serde", "rand_core"]} + # TODO: kill paste = "1.0" im = "15" once_cell = "1" +jemallocator = "0.5" [dev-dependencies] quickcheck = "1" quickcheck_macros = "1" env_logger = "0.8" -bls12_381 = "0.7" +bls12_381 = {git = "https://github.com/woopuiyung/bls12_381.git", branch = "release-0.7.0"} approx = "0.5.0" [features] -default = [] +default = ["bellman", "r1cs", "poly", "smt", "zok", "datalog", "multicore", "spartan"] # frontends c = ["lang-c"] zok = ["smt", "zokrates_parser", "zokrates_pest_ast", "typed-arena", "petgraph"] @@ -75,8 +98,10 @@ lp = ["good_lp", "lp-solvers"] aby = ["lp"] r1cs = ["bincode", "rayon"] poly = ["rug-polynomial"] -spartan = ["r1cs", "dep:spartan", "merlin", "curve25519-dalek", "bincode", "gmp-mpfr-sys"] +spartan = ["r1cs", "dep:dorian", "dep:dorian-t256", "dep:dorian-t25519", "bincode", "gmp-mpfr-sys"] +# spartan = ["r1cs", "dep:spartan", "bincode", "gmp-mpfr-sys"] bellman = ["r1cs", "dep:bellman", "ff", "group", "pairing", "serde_bytes", "bincode", "gmp-mpfr-sys", "byteorder", "rayon"] +multicore = ["rayon"] [[example]] name = "circ" diff --git a/circ_fields/Cargo.toml b/circ_fields/Cargo.toml index 0d6c244ab..2777c35ff 100644 --- a/circ_fields/Cargo.toml +++ b/circ_fields/Cargo.toml @@ -5,6 +5,8 @@ authors = ["kwantam "] edition = "2021" [dependencies] +# serde_bytes = { version = "0.11.14", default-features = false } +sha2 = "0.10.8" # needed to implement hash to curve in spartan ff = { version = "0.12", features = ["derive"] } ff_derive = "0.12" ff-derive-num = "0.2" @@ -15,6 +17,13 @@ rand = "0.8" rug = { version = "1.11", features = ["serde"] } serde = { version = "1.0", features = ["derive", "rc"] } datasize = { version = "0.2", features = ["detailed"] } +ark-ff = { version = "0.4.2", default-features = false } +ark-ec = { version = "0.4.2", default-features = false} +ark-r1cs-std = { version = "0.4.0", default-features = false, optional = true } +ark-std = { version = "0.4.0", default-features = false } +ark-secp256r1 = {default-features = false, git = "https://github.com/arkworks-rs/curves" } +ark-ff-macros = { version = "0.4.2", default-features = false } +ark-serialize = { version = "0.4.2", default-features = false } [dev-dependencies] rand = "0.8" diff --git a/circ_fields/src/int_field.rs b/circ_fields/src/int_field.rs index 89c8d38d1..744268d78 100644 --- a/circ_fields/src/int_field.rs +++ b/circ_fields/src/int_field.rs @@ -72,6 +72,18 @@ impl IntField { self.m.clone() } + /// Update the modulus + pub fn update_modulus(&mut self, new_modulus: Arc) { + self.i.rem_floor_assign(&*new_modulus); + self.m = new_modulus; + } + + /// Update the value + pub fn update_val(&mut self, new_val: Integer) { + self.i = new_val; + self.i.rem_floor_assign(&*self.m); + } + /// Invert mod p pub fn recip(self) -> Self { let r = Self { diff --git a/circ_fields/src/lib.rs b/circ_fields/src/lib.rs index cd6407a59..9b75d39b2 100644 --- a/circ_fields/src/lib.rs +++ b/circ_fields/src/lib.rs @@ -1,11 +1,13 @@ //! Fields for use in CirC #![warn(missing_docs)] -#![deny(warnings)] +// #![deny(warnings)] +#![allow(non_local_definitions)] mod ff_field; mod int_field; pub mod size; +pub mod t256; /// Exports for moduli defined in this crate, as ARCs pub mod moduli { @@ -314,7 +316,7 @@ impl FieldV { let ptr: *const FullFieldV = self.0 as *const _; unsafe { &*ptr } } - fn full_cow(&self) -> std::borrow::Cow { + fn full_cow(&self) -> std::borrow::Cow<'_, FullFieldV> { if self.is_full() { std::borrow::Cow::Borrowed(self.full_ref()) } else { @@ -504,6 +506,32 @@ impl FieldV { } } + /// Update the modulus of this field element (only works for IntField) + pub fn update_modulus(&mut self, new_modulus: Arc) { + let full_element: &mut FullFieldV = self.full_mut(); + match full_element { + FullFieldV::IntField(ref mut i) => { + i.update_modulus(new_modulus); + } + _ => { + panic!("update_modulus only applicable to IntField elements"); + } + } + } + + /// Update the value of this field element (only works for IntField) + pub fn update_val(&mut self, new_val: Integer) { + let full_element: &mut FullFieldV = self.full_mut(); + match full_element { + FullFieldV::IntField(ref mut i) => { + i.update_val(new_val); + } + _ => { + panic!("update_val only applicable to IntField elements"); + } + } + } + /// Compute the multiplicative inverse (panics on 0) #[track_caller] #[inline] diff --git a/circ_fields/src/t256/curves/mod.rs b/circ_fields/src/t256/curves/mod.rs new file mode 100644 index 000000000..5c06eb355 --- /dev/null +++ b/circ_fields/src/t256/curves/mod.rs @@ -0,0 +1,98 @@ +//! This module implements t256 curve +use ark_ec::{ + models::CurveConfig, + short_weierstrass::{self as sw, SWCurveConfig}, +}; + +use super::{fq::Fq, fr::Fr, fr::FrConfig}; +use ark_secp256r1::Config as secp256r1conf; +use ark_secp256r1::Fq as secp256r1Fq; +use ark_secp256r1::FqConfig as secp256FqConfig; +use ark_secp256r1::Fr as secp256r1Fr; +#[allow(unused_imports)] +// This is actually used in the macro below, but rustfmt seems to +// be unable to deduce that... +use ark_secp256r1::FrConfig as secp256FrConfig; +#[warn(unused_imports)] +use crate::derive_conversion; + +/// Affine +pub type Affine = sw::Affine; +/// Projective +pub type Projective = sw::Projective; +/// Define ScalarBytes +pub type ScalarField = ::ScalarField; + +/// Compressed form of the generator +pub const BASEPOINT_COMPRESSED: [u8; 33] = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + +#[derive(Copy, Clone, Default, PartialEq, Eq)] +/// Config +pub struct Config; + +impl CurveConfig for Config { + type BaseField = Fq; + type ScalarField = Fr; + + // We're dealing with prime order curves. + + /// COFACTOR = 1 + const COFACTOR: &'static [u64] = &[0x1]; + + /// COFACTOR_INV = COFACTOR^{-1} mod r = 1 + const COFACTOR_INV: Fr = Fr::ONE; +} + +impl SWCurveConfig for Config { + /// COEFF_A = a4 in the docs, which is a very large string. + const COEFF_A: Fq = + MontFp!("115792089210356248762697446949407573530594504085698471288169790229257723883796"); + + /// COEFF_B = a6 in the docs, which is a very large string. + const COEFF_B: Fq = + MontFp!("81531206846337786915455327229510804132577517753388365729879493166393691077718"); + + /// GENERATOR = (G_GENERATOR_X, G_GENERATOR_Y) + const GENERATOR: Affine = Affine::new_unchecked(G_GENERATOR_X, G_GENERATOR_Y); + +} + +/// G_GENERATOR_X = 3 +pub const G_GENERATOR_X: Fq = MontFp!("3"); + +/// G_GENERATOR_Y = 40902200210088653215032584946694356296222563095503428277299570638400093548589 +pub const G_GENERATOR_Y: Fq = + MontFp!("40902200210088653215032584946694356296222563095503428277299570638400093548589"); + +/// G_GENERATOR_X2 = 5 +pub const G_GENERATOR_X2: Fq = MontFp!("5"); + +/// G_GENERATOR_Y2 = 28281484859698624956664858566852274012236038028101624500031073655422126514829 +pub const G_GENERATOR_Y2: Fq = + MontFp!("28281484859698624956664858566852274012236038028101624500031073655422126514829"); + +/// The x co-ordinate of the other generator for secp256r1. +pub const G_SECP256_O_X: &str = "5"; + +/// The y co-ordinate of the other generator for secp256r1. +pub const G_SECP256_O_Y: &str = + "31468013646237722594854082025316614106172411895747863909393730389177298123724"; + +// Now we instantiate everything else. +derive_conversion!( + Config, + 4, + 128, + secp256r1conf, + G_GENERATOR_X2, + G_GENERATOR_Y2, + Fr, + FrConfig, + secp256r1Fq, + secp256r1Fr, + secp256FqConfig, + secp256FrConfig, + Affine, + "5", + "31468013646237722594854082025316614106172411895747863909393730389177298123724" +); diff --git a/circ_fields/src/t256/curves/tests_todelete.rs b/circ_fields/src/t256/curves/tests_todelete.rs new file mode 100644 index 000000000..c6cb30dc0 --- /dev/null +++ b/circ_fields/src/t256/curves/tests_todelete.rs @@ -0,0 +1,12 @@ +use crate::{Config, Projective}; +use ark_algebra_test_templates::*; +use ark_ec::short_weierstrass::{self as sw}; +use ark_secp256r1::Config as secp256r1conf; +use cdls_macros::test_pedersen; + +// use std::any::type_name; + +type OtherProject = sw::Projective; + +// test_group!(g1; Projective; sw); +test_pedersen!(tp; Config, OtherProject); diff --git a/circ_fields/src/t256/fields/fq.rs b/circ_fields/src/t256/fields/fq.rs new file mode 100644 index 000000000..f75801b92 --- /dev/null +++ b/circ_fields/src/t256/fields/fq.rs @@ -0,0 +1,9 @@ +use ark_ff::fields::{Fp256, MontBackend, MontConfig}; + +#[derive(MontConfig)] +#[modulus = "115792089210356248762697446949407573530594504085698471288169790229257723883799"] +#[generator = "3"] +/// FqConfig +pub struct FqConfig; +/// Base field +pub type Fq = Fp256>; diff --git a/circ_fields/src/t256/fields/fr.rs b/circ_fields/src/t256/fields/fr.rs new file mode 100644 index 000000000..28e49e066 --- /dev/null +++ b/circ_fields/src/t256/fields/fr.rs @@ -0,0 +1,9 @@ +use ark_ff::fields::{Fp256, MontBackend, MontConfig}; + +#[derive(MontConfig)] +#[modulus = "115792089210356248762697446949407573530086143415290314195533631308867097853951"] +#[generator = "1"] +/// FrConfig +pub struct FrConfig; +/// Scalar field +pub type Fr = Fp256>; \ No newline at end of file diff --git a/circ_fields/src/t256/fields/mod.rs b/circ_fields/src/t256/fields/mod.rs new file mode 100644 index 000000000..c48aa48b1 --- /dev/null +++ b/circ_fields/src/t256/fields/mod.rs @@ -0,0 +1,9 @@ +/// Definition of parameters for base field of T256 +pub mod fq; +pub use self::fq::*; +/// Definition of parameters for scalar field of T256 +pub mod fr; +pub use self::fr::*; + +#[cfg(test)] +mod tests; diff --git a/circ_fields/src/t256/hash_to_curve/mod.rs b/circ_fields/src/t256/hash_to_curve/mod.rs new file mode 100644 index 000000000..9e61a922f --- /dev/null +++ b/circ_fields/src/t256/hash_to_curve/mod.rs @@ -0,0 +1,30 @@ +//! This module implements hash to curve for t256 +use super::{Config, Projective}; +use ark_ec::hashing::curve_maps::swu::{SWUMap, SWUConfig}; +use super::{fq::Fq}; +use ark_ec::hashing::{map_to_curve_hasher::{MapToCurveBasedHasher}, + HashToCurve}; +use ark_ff::field_hashers::DefaultFieldHasher; +use sha2::Sha256; +use ark_ff::MontFp; + + +impl SWUConfig for Config { + /// Define parameters for hash to curve as needed in https://github.com/arkworks-rs/algebra/blob/fc3f6614b4b1aa4303a0204daece19679bea04c5/ec/src/hashing/curve_maps/swu.rs + const ZETA: Fq = MontFp!("115792089210356248762697446949407573530594504085698471288169790229257723883798"); // -1 +} + +/// Create a hasher for hash to curve +pub fn create_curvebased_hasher(domain: &[u8]) -> MapToCurveBasedHasher::< + Projective, + DefaultFieldHasher, + SWUMap + > +{ + let hasher = MapToCurveBasedHasher::< + Projective, + DefaultFieldHasher, + SWUMap + >::new(domain).unwrap(); + hasher +} \ No newline at end of file diff --git a/circ_fields/src/t256/macros/derive_conversion.rs b/circ_fields/src/t256/macros/derive_conversion.rs new file mode 100644 index 000000000..6608631db --- /dev/null +++ b/circ_fields/src/t256/macros/derive_conversion.rs @@ -0,0 +1,100 @@ +#[macro_export] +#[doc(hidden)] +macro_rules! __derive_conversion { + ($config: ty, $dim: expr, $sec_param: expr, $OtherCurve: ty, $G2_X: ident, $G2_Y: ident, $fr: ty, $fr_config: ty, $other_q: ty, $other_r: ty, $other_q_conf: ty, $other_r_conf: ty, $affine: ty, $GSX: expr, $GSY: expr) => { + // Define the conversion functions for this particular + // mapping. + type OtherBaseField = <$OtherCurve as CurveConfig>::BaseField; + type OtherScalarField = <$OtherCurve as CurveConfig>::ScalarField; + + struct FrStruct($fr); + impl FrStruct { + pub fn new(x: $fr) -> FrStruct { + FrStruct(x) + } + + } + + impl From> for FrStruct { + fn from(x: BigInt<$dim>) -> Self { + let x_t = <$fr_config>::from_bigint(x).unwrap(); + FrStruct::new(x_t) + } + } + + impl From for BigInt<$dim> { + fn from(val: FrStruct) -> Self { + FrConfig::into_bigint(val.0) + } + } + + struct OtherBase(OtherBaseField); + impl OtherBase { + pub fn new(x: $other_q) -> OtherBase { + OtherBase(x) + } + } + + impl From for BigInt<$dim> { + fn from(x: OtherBase) -> Self { + <$other_q_conf>::into_bigint(x.0) + } + } + + impl From> for OtherBase { + fn from(x: BigInt<$dim>) -> OtherBase { + let x_t = <$other_q_conf>::from_bigint(x).unwrap(); + OtherBase::new(x_t) + } + } + + struct OtherScalar(OtherScalarField); + impl OtherScalar { + pub fn new(x: $other_r) -> OtherScalar { + OtherScalar(x) + } + + } + + impl From for BigInt<$dim> { + fn from(x: OtherScalar) -> Self { + <$other_r_conf>::into_bigint(x.0) + } + } + + impl From> for OtherScalar { + fn from(x: BigInt<$dim>) -> OtherScalar { + let x_t = <$other_r_conf>::from_bigint(x).unwrap(); + OtherScalar::new(x_t) + } + } + + }; +} + +#[macro_export] +/// derive_conversion +macro_rules! derive_conversion { + ($config: ty, $dim: expr, $sec_param: expr, $OtherCurve: ty, $G2_X: ident, $G2_Y: ident, $fr: ty, $fr_config: ty, $other_q: ty, $other_r: ty, $other_q_conf: ty, $other_r_conf: ty, $affine: ty, $GSX: expr, $GSY: expr) => { + use ark_ff::BigInt; + use ark_ff::{Field, MontConfig, MontFp}; + + $crate::__derive_conversion!( + $config, + $dim, + $sec_param, + $OtherCurve, + $G2_X, + $G2_Y, + $fr, + $fr_config, + $other_q, + $other_r, + $other_q_conf, + $other_r_conf, + $affine, + $GSX, + $GSY + ); + }; +} diff --git a/circ_fields/src/t256/macros/mod.rs b/circ_fields/src/t256/macros/mod.rs new file mode 100644 index 000000000..c5004ce1b --- /dev/null +++ b/circ_fields/src/t256/macros/mod.rs @@ -0,0 +1,2 @@ +#[macro_use] +pub mod derive_conversion; diff --git a/circ_fields/src/t256/mod.rs b/circ_fields/src/t256/mod.rs new file mode 100644 index 000000000..8c8640b10 --- /dev/null +++ b/circ_fields/src/t256/mod.rs @@ -0,0 +1,32 @@ +#![forbid(unsafe_code)] +//! From https://github.com/brave-experiments/CDLS/blob/main/t256/src/lib.rs +//! This library implements the 256-bit prime order curve used inside ZKAttest. +//! Note: we use the values here from the ZKAttest implementation: +//! https://github.com/cloudflare/zkp-ecdsa/blob/0af748d23b535a8fffebca34ab51abf37ef1ea13/src/curves/instances.ts#L34, +//! which are different from those that are given in the paper. +//! +//! Curve infomration: +//! * Base field: q = 0xffffffff0000000100000000000000017e72b42b30e7317793135661b1c4b117 +//! * Scalar field: r = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff +//! +//! Note that by "base field" we mean "the characteristic of the underlying finite field" and by "scalar field" we mean +//! "the order of the curve". +//! +//! * Curve equation: y^2 = x^3 + a_4*x + a_6, where +//! a_4 = -3 +//! a_6 = 0xb441071b12f4a0366fb552f8e21ed4ac36b06aceeb354224863e60f20219fc56 +//! +//! Or, in decimal, a_4 = -3 +//! a_6 = 81531206846337786915455327229510804132577517753388365729879493166393691077718 + +//#[cfg(feature = "r1cs")] +pub mod curves; +pub mod hash_to_curve; +mod fields; +mod macros; +/// utils +pub mod utils; + +pub use curves::*; +pub use hash_to_curve::*; +pub use fields::*; diff --git a/circ_fields/src/t256/utils/helper.rs b/circ_fields/src/t256/utils/helper.rs new file mode 100644 index 000000000..41af84cdf --- /dev/null +++ b/circ_fields/src/t256/utils/helper.rs @@ -0,0 +1,18 @@ +use crate::t256::Config; +use ark_ec::{models::CurveConfig}; +use ark_serialize::CanonicalSerialize; + +/// Trait for Spartan +pub trait SpartanTrait { + /// Convert to bytes + fn to_bytes(&self) -> [u8; 32]; +} + +impl SpartanTrait for ::ScalarField { + /// Convert Scalar to bytes + fn to_bytes(&self) -> [u8; 32] { + let mut array_bytes = [0u8; 32]; + self.serialize_compressed(&mut &mut array_bytes[..]).unwrap(); + array_bytes + } +} \ No newline at end of file diff --git a/circ_fields/src/t256/utils/mod.rs b/circ_fields/src/t256/utils/mod.rs new file mode 100644 index 000000000..1f7e19c90 --- /dev/null +++ b/circ_fields/src/t256/utils/mod.rs @@ -0,0 +1,4 @@ +/// Helper function for using t256 in spartan +pub mod helper; + + diff --git a/examples/ZoKratesCurly/pf/binarize_kelsey.zok b/examples/ZoKratesCurly/pf/binarize_kelsey.zok new file mode 100644 index 000000000..4fc598099 --- /dev/null +++ b/examples/ZoKratesCurly/pf/binarize_kelsey.zok @@ -0,0 +1,25 @@ +from "EMBED" import value_in_array; +const field[128] RANGE_0_128 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127]; + + +// Test code from https://github.com/sschefflab/zk-barcodes/blob/main/zokrates/binarize.zok +def binarize(field[R][C] image, field[R][C] bin_image) -> bool { + bool mut all_binary = true; + for u32 r in 0..R { + for u32 c in 0..C { + all_binary = all_binary && (bin_image[r][c]*(1-bin_image[r][c]) == 0); + field to_lookup = image[r][c] - 128 * (1 - bin_image[r][c]); // if bin_image = 0, lookup image[i] - 128. Else, lookup image[i] + assert(value_in_array(to_lookup, RANGE_0_128)); + } + } + + return all_binary; +} + +// test main function +const u32 R = 4; +const u32 C = 8; + +def main(private field[R][C] image, private field[R][C] bin_image) -> () { + assert(binarize(image, bin_image)); +} \ No newline at end of file diff --git a/examples/circ.rs b/examples/circ.rs index 27a9dcb0c..977a5e456 100644 --- a/examples/circ.rs +++ b/examples/circ.rs @@ -38,7 +38,7 @@ use circ::target::aby::trans::to_aby; #[cfg(feature = "lp")] use circ::target::ilp::{assignment_to_values, trans::to_ilp}; #[cfg(feature = "spartan")] -use circ::target::r1cs::spartan::write_data; +use circ::target::r1cs::spartan::utils::{write_data_spartan, write_data_spartan_rand}; #[cfg(feature = "bellman")] use circ::target::r1cs::{ bellman::Bellman, @@ -99,6 +99,8 @@ enum Backend { prover_key: PathBuf, #[arg(long, default_value = "V")] verifier_key: PathBuf, + #[arg(long, default_value = "SpartanPP")] + pp: PathBuf, #[arg(long, default_value = "50")] /// linear combination constraints up to this size will be eliminated lc_elimination_thresh: usize, @@ -106,6 +108,8 @@ enum Backend { action: ProofAction, #[arg(long, default_value = "groth16")] proof_impl: ProofImpl, + #[arg(long, default_value = "curve25519")] + pfcurve: PfCurve, }, Smt {}, Ilp {}, @@ -147,13 +151,21 @@ enum ProofAction { Count, Setup, CpSetup, - SpartanSetup, } #[derive(PartialEq, Eq, Debug, Clone, ValueEnum)] enum ProofImpl { Groth16, Mirage, + Spartan, + Dorian, +} + +#[derive(PartialEq, Eq, Debug, Clone, ValueEnum)] +enum PfCurve { + T256, + Curve25519, + T25519, } fn determine_language(l: &Language, input_path: &Path) -> DeterminedLanguage { @@ -188,7 +200,33 @@ fn main() { .format_level(false) .format_timestamp(None) .init(); - let options = Options::parse(); + let options = { + let mut cur_options = Options::parse(); + match cur_options.backend { + Backend::R1cs { + proof_impl: ProofImpl::Spartan, + ref pfcurve, + .. + } + | Backend::R1cs { + proof_impl: ProofImpl::Dorian, + ref pfcurve, + .. + } => match pfcurve { + PfCurve::Curve25519 => { + cur_options.circ.field.custom_modulus = "7237005577332262213973186563042994240857116359379907606001950938285454250989".to_string(); + } + PfCurve::T256 => { + cur_options.circ.field.custom_modulus = "115792089210356248762697446949407573530086143415290314195533631308867097853951".to_string(); + } + PfCurve::T25519 => { + cur_options.circ.field.custom_modulus = "57896044618658097711785492504343953926634992332820282019728792003956564819949".to_string(); + } + }, + _ => {} + } + cur_options + }; circ::cfg::set(&options.circ); let path_buf = options.path.clone(); let mode = match options.backend { @@ -322,6 +360,7 @@ fn main() { action, prover_key, verifier_key, + pp, proof_impl, .. } => { @@ -359,10 +398,10 @@ fn main() { match action { ProofAction::Count => (), - #[cfg(feature = "bellman")] ProofAction::Setup => { println!("Running Setup"); match proof_impl { + #[cfg(feature = "bellman")] ProofImpl::Groth16 => Bellman::::setup_fs( prover_data, verifier_data, @@ -370,6 +409,7 @@ fn main() { verifier_key, ) .unwrap(), + #[cfg(feature = "bellman")] ProofImpl::Mirage => Mirage::::setup_fs( prover_data, verifier_data, @@ -377,10 +417,30 @@ fn main() { verifier_key, ) .unwrap(), + #[cfg(feature = "spartan")] + ProofImpl::Spartan => write_data_spartan::<_, _>( + prover_key, + verifier_key, + pp, + &prover_data, + &verifier_data, + ) + .unwrap(), + #[cfg(feature = "spartan")] + ProofImpl::Dorian => write_data_spartan_rand::<_, _>( + prover_key, + verifier_key, + pp, + &prover_data, + &verifier_data, + ) + .unwrap(), + #[cfg(not(feature = "spartan"))] + ProofImpl::Spartan | ProofImpl::Dorian => panic!("Missing feature: spartan"), + #[cfg(not(feature = "bellman"))] + ProofImpl::Groth16 | ProofImpl::Mirage => panic!("Missing feature: bellman"), }; } - #[cfg(not(feature = "bellman"))] - ProofAction::Setup => panic!("Missing feature: bellman"), #[cfg(feature = "bellman")] ProofAction::CpSetup => { println!("Running CpSetup"); @@ -393,17 +453,11 @@ fn main() { verifier_key, ) .unwrap(), + ProofImpl::Spartan | ProofImpl::Dorian => panic!("Spartan/Dorian is not CP"), }; } #[cfg(not(feature = "bellman"))] ProofAction::CpSetup => panic!("Missing feature: bellman"), - #[cfg(feature = "spartan")] - ProofAction::SpartanSetup => { - write_data::<_, _>(prover_key, verifier_key, &prover_data, &verifier_data) - .unwrap(); - } - #[cfg(not(feature = "spartan"))] - ProofAction::SpartanSetup => panic!("Missing feature: spartan"), } } #[cfg(not(feature = "r1cs"))] diff --git a/examples/zk.rs b/examples/zk.rs index c157885dd..a50bd373d 100644 --- a/examples/zk.rs +++ b/examples/zk.rs @@ -14,6 +14,8 @@ use circ::ir::term::text::parse_value_map; #[cfg(feature = "spartan")] use circ::target::r1cs::spartan; +use circ::create_input; + #[derive(Debug, Parser)] #[command(name = "zk", about = "The CirC ZKP runner")] struct Options { @@ -21,6 +23,8 @@ struct Options { prover_key: PathBuf, #[arg(long, default_value = "V")] verifier_key: PathBuf, + #[arg(long, default_value = "SpartanPP")] + pp: PathBuf, #[arg(long, default_value = "pi")] proof: PathBuf, #[arg(long, default_value = "in")] @@ -33,24 +37,26 @@ struct Options { proof_impl: ProofImpl, #[arg(long)] action: ProofAction, + #[arg(long, default_value = "curve25519")] + pfcurve: create_input::PfCurve, #[command(flatten)] circ: CircOpt, } #[derive(PartialEq, Debug, Clone, ValueEnum)] -/// `Prove`/`Verify` execute proving/verifying in bellman separately -/// `Spartan` executes both proving/verifying in spartan +/// `Prove`/`Verify` execute proving/verifying separately enum ProofAction { Prove, Verify, - Spartan, } #[derive(PartialEq, Debug, Clone, ValueEnum)] -/// Whether to use Groth16 or Mirage +/// Whether to use Groth16, Mirage, Spartan, or Dorian enum ProofImpl { Groth16, Mirage, + Spartan, + Dorian, } fn main() { @@ -90,16 +96,58 @@ fn main() { #[cfg(not(feature = "bellman"))] (ProofAction::Prove | ProofAction::Verify, _) => panic!("Missing feature: bellman"), #[cfg(feature = "spartan")] - (ProofAction::Spartan, _) => { - let prover_input_map = parse_value_map(&std::fs::read(opts.pin).unwrap()); + (ProofAction::Prove, ProofImpl::Spartan) => { + let prover_input_map = parse_value_map(&std::fs::read(opts.inputs).unwrap()); println!("Spartan Proving"); - let (gens, inst, proof) = spartan::prove(opts.prover_key, &prover_input_map).unwrap(); - - let verifier_input_map = parse_value_map(&std::fs::read(opts.vin).unwrap()); + spartan::spartan::prove_fs( + opts.prover_key, + opts.pp, + &prover_input_map, + opts.proof, + &opts.pfcurve, + ) + .unwrap(); + } + #[cfg(feature = "spartan")] + (ProofAction::Verify, ProofImpl::Spartan) => { + let verifier_input_map = parse_value_map(&std::fs::read(opts.inputs).unwrap()); println!("Spartan Verifying"); - spartan::verify(opts.verifier_key, &verifier_input_map, &gens, &inst, proof).unwrap(); + spartan::spartan::verify_fs( + opts.verifier_key, + opts.pp, + &verifier_input_map, + opts.proof, + &opts.pfcurve, + ) + .unwrap(); + } + #[cfg(feature = "spartan")] + (ProofAction::Prove, ProofImpl::Dorian) => { + let prover_input_map = parse_value_map(&std::fs::read(opts.inputs).unwrap()); + println!("Dorian Proving (Spartan with verifier randomness)"); + spartan::spartan_rand::prove_fs( + opts.prover_key, + opts.pp, + &prover_input_map, + opts.proof, + &opts.pfcurve, + ) + .unwrap(); + } + #[cfg(feature = "spartan")] + (ProofAction::Verify, ProofImpl::Dorian) => { + let verifier_input_map = parse_value_map(&std::fs::read(opts.inputs).unwrap()); + println!("Dorian Verifying (Spartan with verifier randomness)"); + spartan::spartan_rand::verify_fs( + opts.verifier_key, + opts.pp, + &verifier_input_map, + opts.proof, + &opts.pfcurve, + ) + .unwrap(); } #[cfg(not(feature = "spartan"))] - (ProofAction::Spartan, _) => panic!("Missing feature: spartan"), + (_, ProofImpl::Spartan | ProofImpl::Dorian) => panic!("Missing feature: spartan"), } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 9cab2696b..c5b8b136f 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "stable" +channel = "1.88.0" diff --git a/src/create_input/mod.rs b/src/create_input/mod.rs new file mode 100644 index 000000000..01c91a520 --- /dev/null +++ b/src/create_input/mod.rs @@ -0,0 +1,14 @@ +//! Create prover inputs and verifier inputs + +use crate::cfg::clap::{self, ValueEnum}; + +#[derive(PartialEq, Eq, Debug, Clone, ValueEnum)] +/// Curve for Spartan +pub enum PfCurve { + /// Curve T256 + T256, + /// Curve25519 + Curve25519, + /// Curve T25519 + T25519, +} diff --git a/src/front/datalog/mod.rs b/src/front/datalog/mod.rs index 0f04def48..f67e92349 100644 --- a/src/front/datalog/mod.rs +++ b/src/front/datalog/mod.rs @@ -113,7 +113,7 @@ impl<'ast> Gen<'ast> { ) } - fn entry_rule(&mut self, name: &'ast str) -> Result<()> { + fn entry_rule(&mut self, name: &'ast str) -> Result<'_, ()> { let rule = *self .rules .get(name) @@ -291,7 +291,7 @@ impl<'ast> Gen<'ast> { } // Begin prim-rec linting - fn lint_rules(&mut self) -> Result<()> { + fn lint_rules(&mut self) -> Result<'_, ()> { let rules: Vec<&'ast ast::Rule_> = self.rules.values().cloned().collect(); let bug_if = rules.iter().try_fold(term::bool_lit(false), |x, rule| { let cond = self.lint_rule(rule)?; diff --git a/src/front/datalog/parser.rs b/src/front/datalog/parser.rs index 0e02ca6db..9924100d0 100644 --- a/src/front/datalog/parser.rs +++ b/src/front/datalog/parser.rs @@ -24,7 +24,7 @@ pub mod ast { pub use pest::Span; use pest_ast::FromPest; - fn span_into_str(span: Span) -> &str { + fn span_into_str(span: Span<'_>) -> &str { span.as_str() } @@ -534,7 +534,7 @@ pub mod ast { } #[allow(clippy::result_large_err)] -pub fn parse(file_string: &str) -> Result> { +pub fn parse(file_string: &str) -> Result, Error> { let mut pest_pairs = MyParser::parse(Rule::program, file_string)?; use from_pest::FromPest; Ok(ast::Program::from_pest(&mut pest_pairs).expect("bug in AST construction")) diff --git a/src/front/zsharp/parser.rs b/src/front/zsharp/parser.rs index 3152f1daa..dfd887b2f 100644 --- a/src/front/zsharp/parser.rs +++ b/src/front/zsharp/parser.rs @@ -106,7 +106,7 @@ impl ZLoad { /// ## Returns /// /// Returns a map from file paths to parsed files. - pub fn load>(&self, p: &P) -> HashMap { + pub fn load>(&self, p: &P) -> HashMap> { self.recursive_load(p).unwrap() } diff --git a/src/front/zsharpcurly/parser.rs b/src/front/zsharpcurly/parser.rs index 8265025c8..2251a0b1d 100644 --- a/src/front/zsharpcurly/parser.rs +++ b/src/front/zsharpcurly/parser.rs @@ -106,7 +106,7 @@ impl ZLoad { /// ## Returns /// /// Returns a map from file paths to parsed files. - pub fn load>(&self, p: &P) -> HashMap { + pub fn load>(&self, p: &P) -> HashMap> { self.recursive_load(p).unwrap() } diff --git a/src/ir/term/bv.rs b/src/ir/term/bv.rs index 91c358038..7b68c26b0 100644 --- a/src/ir/term/bv.rs +++ b/src/ir/term/bv.rs @@ -253,7 +253,7 @@ impl BitVector { impl Display for BitVector { fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.width % 4 == 0 { + if self.width.is_multiple_of(4) { write!( f, "#x{:0>width$}", diff --git a/src/ir/term/fmt.rs b/src/ir/term/fmt.rs index 7575a37b6..da4de9c72 100644 --- a/src/ir/term/fmt.rs +++ b/src/ir/term/fmt.rs @@ -228,7 +228,7 @@ pub struct IrWrapper<'a, T> { } /// Wrap a reference for IR formatting. Uses [IrCfg::from_circ_cfg]. -pub fn wrap(t: &T) -> IrWrapper { +pub fn wrap(t: &T) -> IrWrapper<'_, T> { IrWrapper::new(t, IrCfg::from_circ_cfg()) } diff --git a/src/ir/term/mod.rs b/src/ir/term/mod.rs index 09d90dad6..48032ba3f 100644 --- a/src/ir/term/mod.rs +++ b/src/ir/term/mod.rs @@ -819,7 +819,7 @@ impl std::hash::Hash for Value { } } -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, Default)] /// The "type" of an IR term pub enum Sort { /// bit-vectors of this width @@ -833,6 +833,7 @@ pub enum Sort { /// prime field, integers mod FieldT.modulus() Field(FieldT), /// boolean + #[default] Bool, /// Array from one sort to another, of fixed size. /// @@ -864,12 +865,6 @@ pub struct MapSort { pub val: Sort, } -impl Default for Sort { - fn default() -> Self { - Self::Bool - } -} - impl Sort { #[track_caller] /// Unwrap the bitsize of this bit-vector, panicking otherwise. diff --git a/src/ir/term/text/mod.rs b/src/ir/term/text/mod.rs index 58c81030a..8446a55a7 100644 --- a/src/ir/term/text/mod.rs +++ b/src/ir/term/text/mod.rs @@ -125,7 +125,7 @@ impl Debug for TokTree<'_> { } /// Parse a token tree. -fn parse_tok_tree(bytes: &[u8]) -> TokTree { +fn parse_tok_tree(bytes: &[u8]) -> TokTree<'_> { let mut stack: Vec> = vec![vec![]]; let lex = Token::lexer(bytes).spanned(); for (t, s) in lex { diff --git a/src/lib.rs b/src/lib.rs index 851c264ba..b91dff84f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,10 +3,13 @@ //! A compiler infrastructure for compiling programs to circuits #![warn(missing_docs)] -#![deny(warnings)] +// #![deny(warnings)] #![allow(rustdoc::private_intra_doc_links)] #![allow(clippy::mutable_key_type)] +#[global_allocator] +static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; + #[macro_use] pub mod ir; pub mod cfg; @@ -14,3 +17,6 @@ pub mod circify; pub mod front; pub mod target; pub mod util; +pub mod create_input; +#[cfg(feature = "spartan")] +pub mod right_field_arithmetic; diff --git a/src/right_field_arithmetic/alloc.rs b/src/right_field_arithmetic/alloc.rs new file mode 100644 index 000000000..75baf005d --- /dev/null +++ b/src/right_field_arithmetic/alloc.rs @@ -0,0 +1,25 @@ +use super::convert::{integer_to_field}; // ARC_MOD_T256, ARC_MOD_SECQ256K1, +use std::sync::Arc; +use crate::ir::term::{Value}; +use rug::Integer; +use fxhash::FxHashMap as HashMap; + + +/// Allocate a `field` element to the circuit. +pub fn map_field(input: &Integer, modulus: &Arc, name: &str, input_map: &mut HashMap::) { + input_map.insert(name.to_string(), integer_to_field(input, modulus.clone())); +} + +/// Allocate each element in a `field` vec to the circuit. +pub fn map_field_vec(vec: &Vec, modulus: &Arc, name: &str, input_map: &mut HashMap::) { + for (i, value) in vec.iter().enumerate() { + input_map.insert(format!("{}.{}", name, i), integer_to_field(value, modulus.clone())); + } +} + +/// Allocate each element in a `field` double vec to the circuit. +pub fn map_field_double_vec(double_vec: &Vec>, modulus: &Arc, name: &str, input_map: &mut HashMap::) { + for (i, vec) in double_vec.iter().enumerate() { + map_field_vec(&vec, modulus, &format!("{}.{}", name, i), input_map); + } +} diff --git a/src/right_field_arithmetic/convert.rs b/src/right_field_arithmetic/convert.rs new file mode 100644 index 000000000..9b8f69a35 --- /dev/null +++ b/src/right_field_arithmetic/convert.rs @@ -0,0 +1,11 @@ +use std::sync::Arc; +use circ_fields::{FieldV}; // FieldT, +use crate::ir::term::{Value}; // ,BitVector +use rug::Integer; + + + +/// Converts Integer to a value in the scalar field +pub fn integer_to_field(big_int: &Integer, ark_modulus: Arc) -> Value { + Value::Field(FieldV::new(big_int, ark_modulus)) +} diff --git a/src/right_field_arithmetic/field.rs b/src/right_field_arithmetic/field.rs new file mode 100644 index 000000000..89429b60f --- /dev/null +++ b/src/right_field_arithmetic/field.rs @@ -0,0 +1,24 @@ +use rug::Integer; +use std::sync::Arc; +use lazy_static::lazy_static; + + +lazy_static! { + /// modulus defining scalar field of T256 + pub static ref MOD_T256: Integer = Integer::from_str_radix("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10).unwrap(); + // pub static ref MOD_T256: Integer = Integer::from_str_radix("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10).unwrap(); + /// Ark modulus for T256 + pub static ref ARC_MOD_T256: Arc = Arc::new(MOD_T256.clone()); + /// modulus defining scalar field of Secq256k1 + pub static ref MOD_SECQ256K1: Integer = Integer::from_str_radix("115792089237316195423570985008687907853269984665640564039457584007908834671663", 10).unwrap(); + /// Ark modulus for Secq256k1 + pub static ref ARC_MOD_SECQ256K1: Arc = Arc::new(MOD_SECQ256K1.clone()); + /// modulus defining scalar field of curve25519 + pub static ref MOD_CURVE25519: Integer = Integer::from_str_radix("7237005577332262213973186563042994240857116359379907606001950938285454250989", 10).unwrap(); + /// Ark modulus for curve25519 + pub static ref ARC_MOD_CURVE25519: Arc = Arc::new(MOD_CURVE25519.clone()); + /// modulus defining scalar field of T25519 + pub static ref MOD_T25519: Integer = Integer::from_str_radix("57896044618658097711785492504343953926634992332820282019728792003956564819949", 10).unwrap(); + /// Ark modulus for T25519 + pub static ref ARC_MOD_T25519: Arc = Arc::new(MOD_T25519.clone()); +} \ No newline at end of file diff --git a/src/right_field_arithmetic/mod.rs b/src/right_field_arithmetic/mod.rs new file mode 100644 index 000000000..4fd91278d --- /dev/null +++ b/src/right_field_arithmetic/mod.rs @@ -0,0 +1,9 @@ +//! Right-field arithmetic + +#[macro_use] +/// Allocate variables to the circuit +pub mod alloc; +/// Convert between types +pub mod convert; +/// Define the constants for field +pub mod field; diff --git a/src/target/r1cs/mod.rs b/src/target/r1cs/mod.rs index 4c0f464b6..41505c6fa 100644 --- a/src/target/r1cs/mod.rs +++ b/src/target/r1cs/mod.rs @@ -623,6 +623,206 @@ impl ProverData { } } +#[cfg(feature = "spartan")] +impl ProverDataSpartan { + /// Convert ProverData to ProverDataSpartan + pub fn from_prover_data(prover_data: &ProverData) -> Self { + let total_var_len = prover_data.r1cs.vars.len(); + let mut pubinp_len = 0; + for var in prover_data.r1cs.vars.iter() { + if matches!(var.ty(), VarType::Inst) { + pubinp_len += 1; + } + } + ProverDataSpartan { + pubinp_len, + wit_len: total_var_len - pubinp_len, + precompute: prover_data.precompute.clone(), + } + } + /// Compute an R1CS witness ~(setting any challenges to 1s)~ + pub fn extend_r1cs_witness(&self, values: &HashMap) -> Vec { + // we need to evaluate all R1CS variables + // let mut var_values: HashMap = Default::default(); + let mut var_values: Vec = Vec::new(); + let mut eval = wit_comp::StagedWitCompEvaluator::new(&self.precompute); + // this will hold inputs to the multi-round evaluator. + let mut inputs = values.clone(); + let var_len = self.pubinp_len + self.wit_len; + while var_values.len() < var_len { + trace!( + "Have {}/{} values, doing another round", + var_values.len(), + var_len + ); + // do a round of evaluation + let value_vec = eval.eval_stage(std::mem::take(&mut inputs)); + for value in value_vec { + var_values.push(value.as_pf().clone()); + } + } + var_values + } +} + +#[cfg(feature = "spartan")] +impl ProverDataSpartanRand { + /// Convert ProverData to ProverDataSpartan + pub fn from_prover_data(prover_data: &ProverData) -> Self { + const N_ROUNDS: usize = 2; + let mut pubinp_len: [usize; N_ROUNDS] = [0, 0]; + let mut wit_len: [usize; N_ROUNDS] = [0, 0]; + for var in prover_data.r1cs.vars.iter() { + if matches!(var.ty(), VarType::Inst) { + pubinp_len[0] += 1; + } else if matches!(var.ty(), VarType::Chall) { + pubinp_len[1] += 1; + } else if matches!(var.ty(), VarType::RoundWit) { + wit_len[0] += 1; + } else if matches!(var.ty(), VarType::FinalWit) { + wit_len[1] += 1; + } else { + panic!("unexpected variable type"); + } + } + let pubinp_len = if pubinp_len[1] == 0 { + vec![pubinp_len[0]] + } else { + pubinp_len.to_vec() + }; + let wit_len = if wit_len[0] == 0 { + vec![wit_len[1]] + } else { + wit_len.to_vec() + }; + assert_eq!(pubinp_len.len(), wit_len.len()); + // println!("pubinp_len: {:?}", pubinp_len); + + assert_eq!( + prover_data.r1cs.vars.len(), + pubinp_len.iter().sum::() + wit_len.iter().sum::() + ); + Self { + r1cs: prover_data.r1cs.clone(), + pubinp_len: pubinp_len.to_vec(), + wit_len: wit_len.to_vec(), + precompute: prover_data.precompute.clone(), + } + } + /// Compute an R1CS witness ~(setting any challenges to 1s)~; TO DO: support verifier randomness + pub fn extend_r1cs_witness(&self, values: &HashMap) -> Vec { + // we need to evaluate all R1CS variables + let mut var_values: Vec = Vec::new(); + #[cfg(debug_assertions)] + self.precompute.type_check(); + let mut eval = wit_comp::StagedWitCompEvaluator::new(&self.precompute); + // this will hold inputs to the multi-round evaluator. + let mut inputs = values.clone(); + while var_values.len() < self.r1cs.vars.len() { + trace!( + "Have {}/{} values, doing another round", + var_values.len(), + self.r1cs.vars.len() + ); + // do a round of evaluation + let value_vec = eval.eval_stage(std::mem::take(&mut inputs)); + for value in value_vec { + trace!( + "var {} : {}", + self.r1cs + .names + .get(&self.r1cs.vars[var_values.len()]) + .unwrap(), + value.as_pf() + ); + var_values.push(value.as_pf().clone()); + } + // fill the challenges with 1s + if var_values.len() < self.r1cs.vars.len() { + for next_var_i in var_values.len()..self.r1cs.vars.len() { + if !matches!(self.r1cs.vars[next_var_i].ty(), VarType::Chall) { + break; + } + let var = self.r1cs.vars[next_var_i]; + let name = self.r1cs.names.get(&var).unwrap().clone(); + let val = eval_pf_challenge(&name, &self.r1cs.field); + var_values.push(val.clone()); + inputs.insert(name, Value::Field(val)); + } + } + } + var_values + } + + /// Check all assertions. Puts in 1 for challenges. + pub fn check_all(&self, values: &HashMap) { + self.r1cs.check_all( + &self + .r1cs + .vars + .iter() + .cloned() + .zip(self.extend_r1cs_witness(values)) + .collect(), + ); + } + + #[cfg(debug_assertions)] + /// Check all assertions. Puts in 1 for challenges. + pub fn check_all_witness( + &self, + inp: &Vec, + wit0: &Vec, + wit1: &Vec, + verifier_rand: &Vec, + ) { + let mut z = inp.clone(); + z.extend(wit0.clone()); + z.extend(verifier_rand.clone()); + z.extend(wit1.clone()); + self.r1cs + .check_all(&self.r1cs.vars.iter().cloned().zip(z).collect()); + } + /// Compute the IR1CS witness in the second round + pub fn create_wit1( + &self, + values: &HashMap, + verifier_rand: &Vec, + ) -> Vec { + // we need to evaluate all R1CS variables + #[cfg(debug_assertions)] + self.precompute.type_check(); + let mut eval = wit_comp::StagedWitCompEvaluator::new(&self.precompute); + // this will hold inputs to the multi-round evaluator. + let mut inputs = values.clone(); + let mut chall_idx = 0; + let mut return_values = Vec::new(); + let mut idx = 0; + while idx < self.r1cs.vars.len() { + // do a round of evaluation + let value_vec = eval.eval_stage(std::mem::take(&mut inputs)); + return_values = value_vec.iter().map(|v| v.as_pf().clone()).collect(); + + idx += value_vec.len(); + // fill the challenges with 1s + if idx < self.r1cs.vars.len() { + for next_var_i in idx..self.r1cs.vars.len() { + if !matches!(self.r1cs.vars[next_var_i].ty(), VarType::Chall) { + break; + } + let var = self.r1cs.vars[next_var_i]; + let name = self.r1cs.names.get(&var).unwrap().clone(); + let val = verifier_rand[chall_idx].clone(); + inputs.insert(name, Value::Field(val.clone())); + chall_idx += 1; + idx += 1; + } + } + } + return_values + } +} + /// A bidirectional map. #[derive(Debug, Clone, Serialize, Deserialize)] struct BiMap { @@ -1152,6 +1352,32 @@ pub struct ProverData { pub precompute: wit_comp::StagedWitComp, } +#[cfg(feature = "spartan")] +/// (Simplified) Relation-related data that a prover needs to make a proof. +#[derive(Debug, Serialize, Deserialize)] +pub struct ProverDataSpartan { + /// Public input length + pub pubinp_len: usize, + /// Witness length + pub wit_len: usize, + /// Witness computation + pub precompute: wit_comp::StagedWitComp, +} + +#[cfg(feature = "spartan")] +/// Relation-related data that a prover needs to make a proof. +#[derive(Debug, Serialize, Deserialize)] +pub struct ProverDataSpartanRand { + /// Public input length + Verifier randomness length + pub pubinp_len: Vec, + /// Witness length + pub wit_len: Vec, + /// R1cs + pub r1cs: R1csFinal, // added to compute witness + /// Witness computation + pub precompute: wit_comp::StagedWitComp, +} + /// Relation-related data that a verifier needs to check a proof. #[derive(Debug, Serialize, Deserialize)] pub struct VerifierData { diff --git a/src/target/r1cs/proof.rs b/src/target/r1cs/proof.rs index c08e8938e..a5048c0cf 100644 --- a/src/target/r1cs/proof.rs +++ b/src/target/r1cs/proof.rs @@ -12,13 +12,15 @@ use super::{ProverData, VerifierData}; use crate::ir::term::text::parse_value_map; use crate::ir::term::Value; -fn serialize_into_file>(data: &S, path: P) -> std::io::Result<()> { +/// Serialize data into a file +pub fn serialize_into_file>(data: &S, path: P) -> std::io::Result<()> { let mut file = BufWriter::new(File::create(path.as_ref())?); serialize_into(&mut file, data).unwrap(); Ok(()) } -fn deserialize_from_file Deserialize<'a>, P: AsRef>( +/// Deserialize data from a file +pub fn deserialize_from_file Deserialize<'a>, P: AsRef>( path: P, ) -> std::io::Result { Ok(deserialize_from(BufReader::new(File::open(path.as_ref())?)).unwrap()) diff --git a/src/target/r1cs/spartan.rs b/src/target/r1cs/spartan.rs deleted file mode 100644 index 80a1a0585..000000000 --- a/src/target/r1cs/spartan.rs +++ /dev/null @@ -1,254 +0,0 @@ -//! Export circ R1cs to Spartan -use crate::target::r1cs::*; -use bincode::{deserialize_from, serialize_into}; -use curve25519_dalek::scalar::Scalar; -use fxhash::FxHashMap as HashMap; -use gmp_mpfr_sys::gmp::limb_t; -use libspartan::{Assignment, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK}; -use merlin::Transcript; -use rug::Integer; -use std::fs::File; -use std::io; -use std::io::{BufReader, BufWriter}; -use std::path::Path; - -/// Hold Spartan variables -#[derive(Debug)] -pub struct Variable { - sid: usize, - value: [u8; 32], -} - -/// generate spartan proof -pub fn prove>( - p_path: P, - inputs_map: &HashMap, -) -> io::Result<(NIZKGens, Instance, NIZK)> { - let prover_data = read_prover_data::<_>(p_path)?; - - println!("Converting R1CS to Spartan"); - let (inst, wit, inps, num_cons, num_vars, num_inputs) = - spartan::r1cs_to_spartan(&prover_data, inputs_map); - - println!("Proving with Spartan"); - assert_ne!(num_cons, 0, "No constraints"); - - // produce public parameters - let gens = NIZKGens::new(num_cons, num_vars, num_inputs); - // produce proof - let mut prover_transcript = Transcript::new(b"nizk_example"); - let pf = NIZK::prove(&inst, wit, &inps, &gens, &mut prover_transcript); - - Ok((gens, inst, pf)) -} - -/// verify spartan proof -pub fn verify>( - v_path: P, - inputs_map: &HashMap, - gens: &NIZKGens, - inst: &Instance, - proof: NIZK, -) -> io::Result<()> { - let verifier_data = read_verifier_data::<_>(v_path)?; - - let values = verifier_data.eval(inputs_map); - - let mut inp = Vec::new(); - for v in &values { - let scalar = int_to_scalar(&v.i()); - inp.push(scalar.to_bytes()); - } - let inputs = InputsAssignment::new(&inp).unwrap(); - - println!("Verifying with Spartan"); - let mut verifier_transcript = Transcript::new(b"nizk_example"); - assert!(proof - .verify(inst, &inputs, &mut verifier_transcript, gens) - .is_ok()); - - println!("Proof Verification Successful!"); - Ok(()) -} - -/// circ R1cs -> spartan R1CSInstance -pub fn r1cs_to_spartan( - prover_data: &ProverData, - inputs_map: &HashMap, -) -> (Instance, Assignment, Assignment, usize, usize, usize) { - // spartan format mapper: CirC -> Spartan - let mut wit = Vec::new(); - let mut inp = Vec::new(); - let mut trans: HashMap = HashMap::default(); // Circ -> spartan ids - let mut itrans: HashMap = HashMap::default(); // spartan ids -> Circ - - // check modulus - let f_mod = prover_data.r1cs.field.modulus(); - let s_mod = Integer::from_str_radix( - "7237005577332262213973186563042994240857116359379907606001950938285454250989", - 10, - ) - .unwrap(); - assert_eq!( - &s_mod, f_mod, - "\nR1CS has modulus \n{s_mod},\n but Spartan CS expects \n{f_mod}", - ); - - let values = prover_data.extend_r1cs_witness(inputs_map); - prover_data.r1cs.check_all(&values); - - assert_eq!(values.len(), prover_data.r1cs.vars.len()); - - for var in prover_data.r1cs.vars.iter() { - assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); - if let VarType::FinalWit = var.ty() { - // witness - let id = wit.len(); - itrans.insert(id, *var); - trans.insert(*var, id); - let val = values.get(var).expect("missing R1CS value"); - wit.push(int_to_scalar(&val.i()).to_bytes()); - } - } - - let const_id = wit.len(); - - for var in prover_data.r1cs.vars.iter() { - assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); - if let VarType::Inst = var.ty() { - // input - let id = wit.len() + 1 + inp.len(); - itrans.insert(id, *var); - trans.insert(*var, id); - let val = values.get(var).expect("missing R1CS value"); - inp.push(int_to_scalar(&val.i()).to_bytes()); - } - } - - let num_vars = wit.len(); - let num_inputs = inp.len(); - assert_eq!(wit.len() + inp.len(), prover_data.r1cs.vars.len()); - - let assn_witness = VarsAssignment::new(&wit).unwrap(); - let assn_inputs = InputsAssignment::new(&inp).unwrap(); - - // circuit - let mut m_a: Vec<(usize, usize, [u8; 32])> = Vec::new(); - let mut m_b: Vec<(usize, usize, [u8; 32])> = Vec::new(); - let mut m_c: Vec<(usize, usize, [u8; 32])> = Vec::new(); - - let mut i = 0; // constraint # - for (lc_a, lc_b, lc_c) in prover_data.r1cs.constraints.iter() { - // circ Lc (const, monomials ) -> Vec - let a = lc_to_v(lc_a, const_id, &trans); - let b = lc_to_v(lc_b, const_id, &trans); - let c = lc_to_v(lc_c, const_id, &trans); - - // constraint # x identifier (vars, 1, inp) - for Variable { sid, value } in a { - m_a.push((i, sid, value)); - } - for Variable { sid, value } in b { - m_b.push((i, sid, value)); - } - for Variable { sid, value } in c { - m_c.push((i, sid, value)); - } - - i += 1; - } - - let num_cons = i; - - let inst = Instance::new(num_cons, num_vars, num_inputs, &m_a, &m_b, &m_c).unwrap(); - - // check if the instance we created is satisfiable - let res = inst.is_sat(&assn_witness, &assn_inputs); - assert!(res.unwrap()); - - ( - inst, - assn_witness, - assn_inputs, - num_cons, - num_vars, - num_inputs, - ) -} - -fn int_to_scalar(i: &Integer) -> Scalar { - let mut accumulator = Scalar::zero(); - let limb_bits = (std::mem::size_of::() as u64) << 3; - assert_eq!(limb_bits, 64); - - let two: u64 = 2; - let mut m = Scalar::from(two.pow(63)); - m *= Scalar::from(two); - - // as_ref yeilds a least-significant-first array. - for digit in i.as_ref().iter().rev() { - accumulator *= m; - accumulator += Scalar::from(*digit); - } - accumulator -} - -// circ Lc (const, monomials ) -> Vec -fn lc_to_v(lc: &Lc, const_id: usize, trans: &HashMap) -> Vec { - let mut v: Vec = Vec::new(); - - for (k, m) in &lc.monomials { - let scalar = int_to_scalar(&m.i()); - - let var = Variable { - sid: *trans.get(k).unwrap(), - value: scalar.to_bytes(), - }; - v.push(var); - } - if lc.constant.i() != 0 { - let scalar = int_to_scalar(&lc.constant.i()); - let var = Variable { - sid: const_id, - value: scalar.to_bytes(), - }; - v.push(var); - } - v -} - -/// write prover and verifier data to file -pub fn write_data, P2: AsRef>( - p_path: P1, - v_path: P2, - p_data: &ProverData, - v_data: &VerifierData, -) -> io::Result<()> { - write_prover_data(p_path, p_data)?; - write_verifier_data(v_path, v_data)?; - Ok(()) -} - -fn write_prover_data>(path: P, data: &ProverData) -> io::Result<()> { - let mut file = BufWriter::new(File::create(path)?); - serialize_into(&mut file, &data).unwrap(); - Ok(()) -} - -fn read_prover_data>(path: P) -> io::Result { - let mut file = BufReader::new(File::open(path)?); - let data: ProverData = deserialize_from(&mut file).unwrap(); - Ok(data) -} - -fn write_verifier_data>(path: P, data: &VerifierData) -> io::Result<()> { - let mut file = BufWriter::new(File::create(path)?); - serialize_into(&mut file, &data).unwrap(); - Ok(()) -} - -fn read_verifier_data>(path: P) -> io::Result { - let mut file = BufReader::new(File::open(path)?); - let data: VerifierData = deserialize_from(&mut file).unwrap(); - Ok(data) -} diff --git a/src/target/r1cs/spartan/curve25519.rs b/src/target/r1cs/spartan/curve25519.rs new file mode 100644 index 000000000..92ce4e4b0 --- /dev/null +++ b/src/target/r1cs/spartan/curve25519.rs @@ -0,0 +1,248 @@ +//! Export circ R1cs to Spartan +use crate::target::r1cs::*; +// use bincode::{deserialize_from, serialize_into}; +use curve25519_dalek::scalar::Scalar; +// use fxhash::FxHashMap as HashMap; +use gmp_mpfr_sys::gmp::limb_t; +use libdorian::{Assignment, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK}; +use merlin::Transcript; +use rug::Integer; +use std::io; +use super::utils::{Variable}; + +use std::time::Instant; +use crate::util::timer::print_time; + +use lazy_static::lazy_static; + +use super::spartan::SpartanProofSystem; + + +lazy_static! { + /// Order of Curve25519 + pub static ref MOD_CURVE25519: Integer = Integer::from_str_radix("7237005577332262213973186563042994240857116359379907606001950938285454250989", 10).unwrap(); +} + +pub struct SpartanCurve25519; + +impl SpartanProofSystem for SpartanCurve25519 { + type VerifierKey = VerifierData; + type ProverKey = ProverDataSpartan; + type SetupParameter = (NIZKGens, Instance); + type Proof = NIZK; + + fn prove( + pp: &Self::SetupParameter, + pk: &Self::ProverKey, + input_map: &HashMap, + ) -> io::Result { + prove(pk, &pp.0, &pp.1, input_map) + } + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + inputs_map: &HashMap, + proof: &Self::Proof, + ) -> io::Result<()> { + let values = vk.eval(inputs_map); + verify(&values, &pp.0, &pp.1, proof) + } +} + +/// generate spartan proof +pub fn prove( + prover_data: &ProverDataSpartan, + gens: &NIZKGens, + inst: &Instance, + inputs_map: &HashMap, +) -> io::Result { + let print_msg = true; + let start = Instant::now(); + let (wit, inps) = + r1cs_to_spartan_simpl(prover_data, inst, inputs_map); + + print_time("Time for r1cs_to_spartan", start.elapsed(), print_msg); + + + // produce proof + let start = Instant::now(); + let mut prover_transcript = Transcript::new(b"nizk_example"); + let pf = NIZK::prove(inst, wit, &inps, gens, &mut prover_transcript); + print_time("Time for NIZK::prove", start.elapsed(), print_msg); + + Ok(pf) +} + + +/// verify spartan proof +pub fn verify( + values: &Vec, + gens: &NIZKGens, + inst: &Instance, + proof: &NIZK, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + + let mut inp = Vec::new(); + for v in values { + let scalar = int_to_scalar(&v.i()); + inp.push(scalar.to_bytes()); + } + let inputs = InputsAssignment::new(&inp).unwrap(); + print_time("Time for Process verifier input -- transforming inputs to appropriate form", start.elapsed(), print_msg); + + // println!("Verifying with Spartan"); + let start = Instant::now(); + let mut verifier_transcript = Transcript::new(b"nizk_example"); + assert!(proof + .verify(inst, &inputs, &mut verifier_transcript, gens) + .is_ok()); + print_time("Time for NIZK::verify", start.elapsed(), print_msg); + + // println!("Proof Verification Successful!"); + Ok(()) +} + +/// circ R1cs -> spartan R1CSInstance +pub fn precompute( + prover_data: &ProverData, +// ) { +) -> io::Result<(NIZKGens, Instance)> { + // spartan format mapper: CirC -> Spartan + let mut trans: HashMap = HashMap::default(); // Circ -> spartan ids + let mut id = 0; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); + if let VarType::FinalWit = var.ty() { + trans.insert(*var, id); + id += 1; + } + } + let num_wit = id; + let num_inp = prover_data.r1cs.vars.len()-id; + id += 1; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); + if let VarType::Inst = var.ty() { + trans.insert(*var, id); + id += 1; + } + } + assert!(id == prover_data.r1cs.vars.len() + 1); + let const_id = num_wit; + + let mut m_a: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_b: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_c: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + let mut i = 0; // constraint # + for (lc_a, lc_b, lc_c) in prover_data.r1cs.constraints.iter() { + // circ Lc (const, monomials ) -> Vec + let a = lc_to_v(lc_a, const_id, &trans); + let b = lc_to_v(lc_b, const_id, &trans); + let c = lc_to_v(lc_c, const_id, &trans); + + // constraint # x identifier (vars, 1, inp) + for Variable { sid, value } in a { + m_a.push((i, sid, value)); + } + for Variable { sid, value } in b { + m_b.push((i, sid, value)); + } + for Variable { sid, value } in c { + m_c.push((i, sid, value)); + } + + i += 1; + } + + let num_cons = i; + assert_ne!(num_cons, 0, "No constraints"); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKGens::new(num_cons, num_wit, num_inp); + Ok((gens, inst)) +} + +/// circ R1cs -> spartan R1CSInstance +pub fn r1cs_to_spartan_simpl( + prover_data: &ProverDataSpartan, + inst: &Instance, + inputs_map: &HashMap, +) -> (Assignment, Assignment) { + // spartan format mapper: CirC -> Spartan + let mut wit = Vec::new(); + let mut inp = Vec::new(); + + let values = prover_data.extend_r1cs_witness(inputs_map); + // prover_data.r1cs.check_all(&values); // for debug purpose + let var_len = prover_data.pubinp_len + prover_data.wit_len; + assert_eq!(values.len(), var_len); + + for val in values.iter().take(prover_data.pubinp_len) { + inp.push(int_to_scalar(&val.i()).to_bytes()); + } + + for val in values.iter().skip(prover_data.pubinp_len) { + wit.push(int_to_scalar(&val.i()).to_bytes()); + } + + let assn_witness = VarsAssignment::new(&wit).unwrap(); + let assn_inputs = InputsAssignment::new(&inp).unwrap(); + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assn_witness, &assn_inputs); // for debug only + assert!(res.unwrap()); + + ( + assn_witness, + assn_inputs, + ) +} + + +/// Integer to scalar +pub fn int_to_scalar(i: &Integer) -> Scalar { + let mut accumulator = Scalar::ZERO; + let limb_bits = (std::mem::size_of::() as u64) << 3; + assert_eq!(limb_bits, 64); + + let two: u64 = 2; + let mut m = Scalar::from(two.pow(63)); + m *= Scalar::from(two); + + // as_ref yeilds a least-significant-first array. + for digit in i.as_ref().iter().rev() { + accumulator *= m; + accumulator += Scalar::from(*digit); + } + accumulator +} + +/// circ Lc (const, monomials ) -> Vec +pub fn lc_to_v(lc: &Lc, const_id: usize, trans: &HashMap) -> Vec { + let mut v: Vec = Vec::new(); + + for (k, m) in &lc.monomials { + let scalar = int_to_scalar(&m.i()); + + let var = Variable { + sid: *trans.get(k).unwrap(), + value: scalar.to_bytes(), + }; + v.push(var); + } + if lc.constant.i() != 0 { + let scalar = int_to_scalar(&lc.constant.i()); + let var = Variable { + sid: const_id, + value: scalar.to_bytes(), + }; + v.push(var); + } + v +} + + diff --git a/src/target/r1cs/spartan/curve25519_rand.rs b/src/target/r1cs/spartan/curve25519_rand.rs new file mode 100644 index 000000000..ce433d55c --- /dev/null +++ b/src/target/r1cs/spartan/curve25519_rand.rs @@ -0,0 +1,328 @@ +//! Export circ R1cs to Spartan +use crate::target::r1cs::*; + +use crate::util::timer::print_time; +use libdorian::scalar::Scalar as OriScalar; +use libdorian::DensePolynomial; +use libdorian::{ + Assignment, InputsAssignment, Instance, + NIZKRand, NIZKRandGens, NIZKRandInter, VarsAssignment, +}; +use merlin::Transcript; +use rug::Integer; +use std::io; +use std::time::Instant; +use crate::target::r1cs::wit_comp::StagedWitComp; + +use super::spartan_rand::{ + precompute_inner, + ISpartanProofSystem, +}; +use super::curve25519::{int_to_scalar, lc_to_v}; + +#[cfg(feature = "multicore")] +use rayon::prelude::*; + +use ark_serialize::CanonicalDeserialize; +use crate::target::r1cs::proof::deserialize_from_file; +use std::path::Path; + + +pub struct SpartanRandCurve25519; + +impl ISpartanProofSystem for SpartanRandCurve25519 { + type VerifierKey = VerifierData; + type ProverKey = ProverDataSpartanRand; + type SetupParameter = (NIZKRandGens, Instance); + type Proof = NIZKRand; + + fn prove_fs_inner( + pk_path: impl AsRef, + pp: &Self::SetupParameter, + inputs_map: &HashMap, + ) -> std::io::Result { + let print_msg = true; + let (pubinp_len, wit_len, rand_list, precompute, field) = { + let prover_data: Self::ProverKey = deserialize_from_file(pk_path)?; + #[cfg(debug_assertions)] + prover_data.check_all(inputs_map); + R1csToSpartan2Round::parse_prover_data(&prover_data) + }; + + let mut evaluator = R1csToSpartan2Round::from_prover_data_inner( + &pubinp_len, + &wit_len, + &rand_list, + &precompute, + &field + ); + let (gens, inst) = pp; + let start = Instant::now(); + let pf = prove(&mut evaluator, gens, inst, inputs_map).unwrap(); + print_time("Time for Proving", start.elapsed(), print_msg); + Ok(pf) + } + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + proof: &Self::Proof, + inputs_map: &HashMap, + print_msg: bool, + ) -> io::Result<()> { + let values = vk.eval(inputs_map); + verify(&values, &pp.0, &pp.1, proof) + } +} + + +/// circ IR1cs -> spartan IR1CSInstance +pub fn precompute( + prover_data: &ProverData, + prover_data_rand: &ProverDataSpartanRand, +) -> io::Result<(NIZKRandGens, Instance)> { + let (num_cons, num_wit, num_inp, m_a, m_b, m_c) = precompute_inner(prover_data, lc_to_v).unwrap(); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKRandGens::new( + num_cons, + &prover_data_rand.pubinp_len, + &prover_data_rand.wit_len, + ); + Ok((gens, inst)) +} + + +/// generate spartan proof; +pub fn prove( + evaluator: &mut R1csToSpartan2Round, + gens: &NIZKRandGens, + inst: &Instance, + inputs_map: &HashMap, +) -> io::Result { + let start_whole = Instant::now(); + #[cfg(debug_assertions)] + assert_eq!(gens.pubinp_len.len(), 2); + let print_msg = true; + + let (inputs, wit0) = evaluator.inputs_to_wit0(inputs_map); + + // produce proof + let mut prover_transcript = Transcript::new(b"nizkrand_example"); + let mut intermediate = NIZKRandInter::new(&inputs); + NIZKRand::prove_00(inst, &inputs, gens, &mut prover_transcript); + let rand_len = gens.pubinp_len[1]; + let verifier_rand: Vec = NIZKRand::prove_01( + inst, + &wit0, + rand_len, + &mut intermediate, + gens, + &mut prover_transcript, + ); + + let start = Instant::now(); + + let wit1 = evaluator.rand_to_wit1(&verifier_rand); + + print_time("Time for r1cs_to_spartan1,2", start.elapsed(), print_msg); + let pf = NIZKRand::prove_1( + inst, + &wit1, + &mut intermediate, + gens, + &mut prover_transcript, + ); + print_time("Time for whole prove", start_whole.elapsed(), print_msg); + + Ok(pf) +} + +/// verify spartan proof +pub fn verify( + values: &Vec, + gens: &NIZKRandGens, + inst: &Instance, + proof: &NIZKRand, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let mut inp = Vec::new(); + for v in values { + let scalar = int_to_scalar(&v.i()); + inp.push(scalar.to_bytes()); + } + let mut inputs = InputsAssignment::new(&inp).unwrap(); + print_time( + "Time for Process verifier input -- transforming inputs to appropriate form", + start.elapsed(), + print_msg, + ); + + let start = Instant::now(); + let mut verifier_transcript = Transcript::new(b"nizkrand_example"); + assert!(proof + .verify(inst, &mut inputs, &mut verifier_transcript, gens) + .is_ok()); + // println!("Time for verifying proof: {:?}", start.elapsed()); // verify-ecdsa: 158.0493ms + print_time("Time for NIZK::verify", start.elapsed(), print_msg); + + Ok(()) +} + + +enum Step { + Fresh, + PostWit0, + Done, +} + +/// A witness evaluator for 2-round Spartan +pub struct R1csToSpartan2Round<'a> { + pubinp_len: [usize; 2], + wit_len: [usize; 2], + rand_list: Vec, + evaluator: wit_comp::StagedWitCompEvaluator<'a>, + field: FieldT, + step: Step, +} + +impl<'a> R1csToSpartan2Round<'a> { + + pub fn parse_prover_data(prover_data: &ProverDataSpartanRand) + -> ([usize; 2], + [usize; 2], + Vec, + StagedWitComp, + FieldT + ) { + assert_eq!(prover_data.pubinp_len.len(), 2); + assert_eq!(prover_data.precompute.stage_sizes().count(), 3); // one more than the wit + // count + assert_eq!(prover_data.wit_len.len(), 2); + #[cfg(debug_assertions)] + prover_data.precompute.type_check(); + let pubinp_len = [prover_data.pubinp_len[0], prover_data.pubinp_len[1]]; + let wit_len = [prover_data.wit_len[0], prover_data.wit_len[1]]; + let rand_list = { + let idx = prover_data.pubinp_len[0] + prover_data.wit_len[0]; + prover_data.r1cs.vars[idx..idx+prover_data.pubinp_len[1]] + .iter() + .map(|var| prover_data.r1cs.names.get(&var).unwrap().clone()) + .collect() + }; + let precompute = prover_data.precompute.clone(); + let field = prover_data.r1cs.field.clone(); + (pubinp_len, wit_len, rand_list, precompute, field) + } + + /// Create a new evaluator + pub fn from_prover_data_inner(pubinp_len: &[usize], + wit_len: &[usize], + rand_list: &Vec, + precompute: &'a StagedWitComp, + field: &FieldT + ) -> Self { + let evaluator = wit_comp::StagedWitCompEvaluator::new(precompute); + let pubinp_len_copy = [pubinp_len[0], pubinp_len[1]]; + let wit_len_copy = [wit_len[0], wit_len[1]]; + Self { + pubinp_len: pubinp_len_copy, + wit_len: wit_len_copy, + rand_list: rand_list.clone(), + field: field.clone(), + evaluator, + step: Step::Fresh, + } + } + /// Inputs: the prover inputs as a map + /// Outputs: the public inputs as an array and the first witness + pub fn inputs_to_wit0( + &mut self, + inputs_map: &HashMap, + ) -> (Assignment, Assignment) { + let start = Instant::now(); + assert!(matches!(self.step, Step::Fresh)); + self.step = Step::PostWit0; + // eval twice. + let start_inner = Instant::now(); + let inputs: Vec<_> = self + .evaluator + .eval_stage(inputs_map.clone()) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + print_time("Time for inputs_to_wit0 inner 0", start_inner.elapsed(), true); + let start_inner = Instant::now(); + + #[cfg(feature = "multicore")] + let wit0: Vec<_> = self + .evaluator + .eval_stage(Default::default()) + .into_par_iter() // Using parallel iterator + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + #[cfg(not(feature = "multicore"))] + let wit0: Vec<_> = self + .evaluator + .eval_stage(Default::default()) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + print_time("Time for inputs_to_wit0 inner 1", start_inner.elapsed(), true); + assert_eq!(self.wit_len[0], wit0.len()); + + print_time("Time for inputs_to_wit0", start.elapsed(), true); + + ( + Assignment::new(&inputs).unwrap(), + Assignment::new(&wit0).unwrap(), + ) + } + /// Inputs: the verifier randomness, as a vector + /// Outputs: the second witness + pub fn rand_to_wit1(&mut self, rand: &Vec) -> Assignment { + // let idx = self.prover_data.pubinp_len[0] + self.prover_data.wit_len[0]; + assert!(matches!(self.step, Step::PostWit0)); + self.step = Step::Done; + + let rand_map: HashMap = + self.rand_list + .iter() + .zip(rand) + .map(|(var, value)| { + ( + var.clone(), + Value::Field(self.field.new_v(scalar_to_int(&value))), + ) + }) + .collect(); // 6.162µs + + assert_eq!(self.pubinp_len[1], rand.len()); + #[cfg(feature = "multicore")] + let wit1: Vec<_> = self + .evaluator + .eval_stage(rand_map) + .into_par_iter() // Using parallel iterator + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + #[cfg(not(feature = "multicore"))] + let wit1: Vec<_> = self + .evaluator + .eval_stage(rand_map) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + assert_eq!(self.wit_len[1], wit1.len()); + + Assignment::new(&wit1).unwrap() + } + +} + + +// Convert Integer to Scalar +fn scalar_to_int(i: &OriScalar) -> Integer { + Integer::from_digits(&i.to_bytes(), rug::integer::Order::LsfLe) +} + diff --git a/src/target/r1cs/spartan/hybrid_bench.rs b/src/target/r1cs/spartan/hybrid_bench.rs new file mode 100644 index 000000000..2ca20b579 --- /dev/null +++ b/src/target/r1cs/spartan/hybrid_bench.rs @@ -0,0 +1,187 @@ +//! Export circ R1cs to Spartan (Convert a R1cs circuit in spartan-t256 to that in spartan-curve25519) +use crate::target::r1cs::*; + +use libdorian::{Assignment, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK}; +use merlin::Transcript; +use std::io; +use super::utils::{Variable}; +use std::time::Instant; +use crate::util::timer::print_time; + + +use super::curve25519::{int_to_scalar as int_to_scalar_curve25519, + lc_to_v as lc_to_v_curve25519}; +use crate::right_field_arithmetic::field::{ARC_MOD_CURVE25519}; +use super::r1cs::convert_r1cs_v2; +/// generate spartan proof; to do: change it into private +pub fn prove( + mut prover_data: ProverDataSpartan, + gens: NIZKGens, + inst: Instance, + inputs_map: &HashMap, +) -> io::Result { + let print_msg = true; + let start = Instant::now(); + let (wit, inps) = + r1cs_to_spartan_simpl(&mut prover_data, &inst, inputs_map); + print_time("Time for r1cs_to_spartan", start.elapsed(), print_msg); + + // produce proof + let start = Instant::now(); + let mut prover_transcript = Transcript::new(b"nizk_example"); + let pf = NIZK::prove(&inst, wit, &inps, &gens, &mut prover_transcript); + print_time("Time for NIZK::prove", start.elapsed(), print_msg); + + Ok(pf) +} +/// circ R1cs -> spartan R1CSInstance +pub fn precompute( + prover_data: &mut ProverData, + inputs_map: &HashMap, // require because we recompute C based on z +) -> io::Result<(NIZKGens, Instance)> { + // spartan format mapper: CirC -> Spartan + let mut values = prover_data.extend_r1cs_witness(inputs_map); + convert_r1cs_v2(&mut prover_data.r1cs.constraints, &mut values, &ARC_MOD_CURVE25519); // Convert the R1CS circuit in t256 to that in curve25519 + assert_eq!(values.len(), prover_data.r1cs.vars.len()); + prover_data.r1cs.check_all(&values); + + let mut trans: HashMap = HashMap::default(); // Circ -> spartan ids + let mut id = 0; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); + if let VarType::FinalWit = var.ty() { + trans.insert(*var, id); + id += 1; + } + } + let num_wit = id; + let num_inp = prover_data.r1cs.vars.len()-id; + id += 1; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); + if let VarType::Inst = var.ty() { + trans.insert(*var, id); + id += 1; + } + } + assert!(id == prover_data.r1cs.vars.len() + 1); + let const_id = num_wit; + + let mut m_a: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_b: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_c: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + let mut i = 0; // constraint # + for (lc_a, lc_b, lc_c) in prover_data.r1cs.constraints.iter() { + // circ Lc (const, monomials ) -> Vec + let a = lc_to_v_curve25519(lc_a, const_id, &trans); + let b = lc_to_v_curve25519(lc_b, const_id, &trans); + let c = lc_to_v_curve25519(lc_c, const_id, &trans); + + // constraint # x identifier (vars, 1, inp) + for Variable { sid, value } in a { + m_a.push((i, sid, value)); + } + for Variable { sid, value } in b { + m_b.push((i, sid, value)); + } + for Variable { sid, value } in c { + m_c.push((i, sid, value)); + } + + i += 1; + } + + let num_cons = i; + assert_ne!(num_cons, 0, "No constraints"); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKGens::new(num_cons, num_wit, num_inp); + Ok((gens, inst)) +} + +/// circ R1cs -> spartan R1CSInstance +pub fn r1cs_to_spartan( + prover_data: &mut ProverData, + inst: &Instance, + inputs_map: &HashMap, +) -> (Assignment, Assignment) { + // spartan format mapper: CirC -> Spartan + let mut wit = Vec::new(); + let mut inp = Vec::new(); + + let mut values = prover_data.extend_r1cs_witness(inputs_map); + // Change entries in values to the field element in the prime field of curve25519 + for (_, fieldv) in values.iter_mut() { + fieldv.update_modulus(ARC_MOD_CURVE25519.clone()); + } + assert_eq!(values.len(), prover_data.r1cs.vars.len()); + + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); + if let VarType::FinalWit = var.ty() { + // witness + let val = values.get(var).expect("missing R1CS value"); + wit.push(int_to_scalar_curve25519(&val.i()).to_bytes()); + } + } + + + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::FinalWit)); + if let VarType::Inst = var.ty() { + // input + let val = values.get(var).expect("missing R1CS value"); + inp.push(int_to_scalar_curve25519(&val.i()).to_bytes()); + } + } + + + let assn_witness = VarsAssignment::new(&wit).unwrap(); + let assn_inputs = InputsAssignment::new(&inp).unwrap(); + // check if the instance we created is satisfiable + let res = inst.is_sat(&assn_witness, &assn_inputs); // for debug only + assert!(res.unwrap()); + ( + assn_witness, + assn_inputs, + ) +} + +/// circ R1cs -> spartan R1CSInstance +pub fn r1cs_to_spartan_simpl( + prover_data: &mut ProverDataSpartan, + inst: &Instance, + inputs_map: &HashMap, +) -> (Assignment, Assignment) { + // spartan format mapper: CirC -> Spartan + let mut wit = Vec::new(); + let mut inp = Vec::new(); + + let mut values = prover_data.extend_r1cs_witness(inputs_map); + // Change entries in values to the field element in the prime field of curve25519 + for fieldv in values.iter_mut() { + fieldv.update_modulus(ARC_MOD_CURVE25519.clone()); + } + let var_len = prover_data.pubinp_len + prover_data.wit_len; + assert_eq!(values.len(), var_len); + + for val in values.iter().take(prover_data.pubinp_len) { + inp.push(int_to_scalar_curve25519(&val.i()).to_bytes()); + } + + for val in values.iter().skip(prover_data.pubinp_len) { + wit.push(int_to_scalar_curve25519(&val.i()).to_bytes()); + } + + let assn_witness = VarsAssignment::new(&wit).unwrap(); + let assn_inputs = InputsAssignment::new(&inp).unwrap(); + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assn_witness, &assn_inputs); // for debug only + assert!(res.unwrap()); + ( + assn_witness, + assn_inputs, + ) +} \ No newline at end of file diff --git a/src/target/r1cs/spartan/mod.rs b/src/target/r1cs/spartan/mod.rs new file mode 100644 index 000000000..9e7def7c1 --- /dev/null +++ b/src/target/r1cs/spartan/mod.rs @@ -0,0 +1,14 @@ +//! Spartan + +pub mod curve25519; +pub mod curve25519_rand; +pub mod t25519; +pub mod t25519_rand; +pub mod t256; +pub mod t256_rand; +pub mod utils; +pub mod spartan; +pub mod spartan_rand; +pub mod r1cs; +pub mod spartan_bench; +pub mod hybrid_bench; diff --git a/src/target/r1cs/spartan/r1cs.rs b/src/target/r1cs/spartan/r1cs.rs new file mode 100644 index 000000000..50f7a2503 --- /dev/null +++ b/src/target/r1cs/spartan/r1cs.rs @@ -0,0 +1,125 @@ +//! Check that the R1CS is satisfied by the given witness and instance. +use crate::target::r1cs::*; +use rug::Integer; +use std::sync::Arc; +use circ_fields::FieldT::IntField; + +/// Change the prime field used in Lc (from prime field of t256 to prime field of curve25519) +fn update_modulus(lc: &mut Lc, new_modulus: &Arc) { + lc.modulus = IntField(new_modulus.clone()); + lc.constant.update_modulus(new_modulus.clone()); + for (_, fieldv) in &mut lc.monomials { + fieldv.update_modulus(new_modulus.clone()); + } +} + +fn inner_product(lc: &Lc, values: &HashMap) -> FieldV { // inner product of a row in r1cs matrix and z + let mut acc = lc.constant.clone(); // lc.constant.clone() * the entry with value 1 in z + + for (var, coeff) in &lc.monomials { + + let val = values // an entry in z + .get(var) + .unwrap_or_else(|| panic!("Missing value in R1cs::eval for variable {:?}", var)) + .clone(); + acc += val * coeff; + } + acc +} + +/// Convert values in prime field of t256 to that in prime field of curve25519 +pub fn convert_values(values: &mut Vec, new_modulus: &Arc) { + for fieldv in values.iter_mut() { + fieldv.update_modulus(new_modulus.clone()); + } +} + +/// Convert a r1cs instances in t256 to a r1cs instances in curve25519 +pub fn convert_r1cs( + r1cs_inst: &mut Vec<(Lc, Lc, Lc)>, + values: &mut HashMap, + new_modulus: &Arc, +) { + // Change entries in values to the field element in the prime field of curve25519 + for (_, fieldv) in values.iter_mut() { + fieldv.update_modulus(new_modulus.clone()); + } + + for (lc_a, lc_b, lc_c) in r1cs_inst.iter_mut() { // a row in matric A, B, C + // Change entries in r1cs_inst to the field element in the prime field of curve25519 + update_modulus(lc_a, new_modulus); + update_modulus(lc_b, new_modulus); + update_modulus(lc_c, new_modulus); + let av = inner_product(lc_a, &values); + let bv = inner_product(lc_b, &values); + let cv = inner_product(lc_c, &values); + let new_c_entry = (av.clone() * &bv) - cv.clone() + lc_c.constant.clone(); // imply cv - lc_c.constant.clone() + new_c_entry = av*bv + lc_c.constant.update_val(new_c_entry.i()); // Compute the new column for matrix C + } +} + +fn update_lc_c( + lc_a: &Lc, + lc_b: &Lc, + lc_c: &mut Lc, + values: &HashMap, + new_modulus: &Integer, +) { + let av = inner_product(lc_a, &values); + let bv = inner_product(lc_b, &values); + let cv = inner_product(lc_c, &values); + let mut update: bool = false; + if lc_c.constant.is_zero() { + for (var, fieldv) in &mut lc_c.monomials { + if !fieldv.is_zero() { + let val = values // an entry in z + .get(var) + .unwrap_or_else(|| panic!("Missing value in R1cs::eval for variable {:?}", var)) + .clone(); + if !val.is_zero() { + assert!(!update, "Error: more than one non-zero entry in z"); + let val_inv: Integer = val.i().invert(&new_modulus).unwrap(); + // println!("fieldv {:?}", fieldv); + let mut field_val_inv = fieldv.clone(); + field_val_inv.update_val(val_inv); + // println!("fieldv {:?}", fieldv); + let new_c_entry = ((av.clone() * &bv) - cv.clone() + (fieldv.clone() * &val)).clone() * &field_val_inv; + fieldv.update_val(new_c_entry.i()); // Compute the new column for matrix C + update = true; + break; + } + } + } + } + + if !update { // the corresponding entry of z is always 1 + let new_c_entry = (av.clone() * &bv) - cv.clone() + lc_c.constant.clone(); // imply cv - lc_c.constant.clone() + new_c_entry = av*bv + lc_c.constant.update_val(new_c_entry.i()); // Compute the new column for matrix C + update = true; + } + assert!(update, "Error: lc is not updated"); +} +/// Convert a r1cs instances in t256 to a r1cs instances in curve25519 +pub fn convert_r1cs_v2( + r1cs_inst: &mut Vec<(Lc, Lc, Lc)>, + values: &mut HashMap, + new_modulus: &Arc, +) { + // Change entries in values to the field element in the prime field of curve25519 + for (_, fieldv) in values.iter_mut() { + fieldv.update_modulus(new_modulus.clone()); + } + + for (lc_a, lc_b, lc_c) in r1cs_inst.iter_mut() { // a row in matric A, B, C + // Change entries in r1cs_inst to the field element in the prime field of curve25519 + update_modulus(lc_a, new_modulus); + update_modulus(lc_b, new_modulus); + update_modulus(lc_c, new_modulus); + update_lc_c(lc_a, lc_b, lc_c, values, &new_modulus); + // Below for debug purpose + let av = inner_product(lc_a, &values); + let bv = inner_product(lc_b, &values); + let cv = inner_product(lc_c, &values); + assert!(av.clone() * &bv == cv); + } +} \ No newline at end of file diff --git a/src/target/r1cs/spartan/spartan.rs b/src/target/r1cs/spartan/spartan.rs new file mode 100644 index 000000000..5151ebf8d --- /dev/null +++ b/src/target/r1cs/spartan/spartan.rs @@ -0,0 +1,169 @@ +//! Spartan +use std::io; +use std::path::Path; +use fxhash::FxHashMap as HashMap; +use crate::target::r1cs::*; + +use super::curve25519::MOD_CURVE25519; +use super::t256::MOD_T256; +use super::t25519::MOD_T25519; +use super::utils::{read_simpl_prover_data, read_verifier_data}; +use crate::create_input::PfCurve; + +use std::time::Instant; +use crate::util::timer::print_time; +use serde::{Deserialize, Serialize}; + +use crate::target::r1cs::proof::{serialize_into_file, deserialize_from_file}; +use super::t256::SpartanT256; +use super::t25519::SpartanT25519; +use super::curve25519::SpartanCurve25519; + +/// A trait from Spartan proofs +pub trait SpartanProofSystem { + /// A verifying key. Also used for commitments. + type VerifierKey: Serialize + for<'a> Deserialize<'a>; + /// A proving key + type ProverKey: Serialize + for<'a> Deserialize<'a>; + /// Precomputed public parameter + type SetupParameter: Serialize + for<'a> Deserialize<'a>; + /// A proof + type Proof: Serialize + for<'a> Deserialize<'a>; + + fn prove( + pp: &Self::SetupParameter, + pk: &Self::ProverKey, + input_map: &HashMap, + ) -> io::Result; + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + inputs_map: &HashMap, + proof: &Self::Proof, + ) -> io::Result<()>; + + /// Prove to/from files + fn prove_fs( + pp_path: impl AsRef, + pk_path: impl AsRef, + input_map: &HashMap, + pf_path: impl AsRef, + // curvetype: PfCurve, + ) -> std::io::Result<()> { + let pp: Self::SetupParameter = deserialize_from_file(pp_path)?; + let pk: Self::ProverKey = deserialize_from_file(pk_path)?; + let proof = Self::prove(&pp, &pk, input_map)?; + serialize_into_file(&proof, pf_path) + } + + /// Verify from files + fn verify_fs( + pp_path: impl AsRef, + vk_path: impl AsRef, + inputs_map: &HashMap, + pf_path: impl AsRef, + ) -> io::Result<()> { + let pp: Self::SetupParameter = deserialize_from_file(pp_path)?; + let vk: Self::VerifierKey = deserialize_from_file(vk_path)?; + let proof: Self::Proof = deserialize_from_file(pf_path)?; + Self::verify(&pp, &vk, inputs_map, &proof) + } +} + +// use std::path::PathBuf; +#[derive(Serialize, Deserialize)] +/// Enum for precomputation +pub enum SpartanSetup { + /// Precomputation for spartan over Curve25519 + Curve25519(libdorian::NIZKGens, libdorian::Instance), + /// Precomputation for spartan over T256 + T256(libdoriant256::NIZKGens, libdoriant256::Instance), +} + + +#[derive(Serialize, Deserialize)] +/// Enum for Prove result +pub enum SpartanProveRes { + /// Prove result for spartan over Curve25519 + PfCurve25519(libdorian::NIZK), + /// Prove result for spartan over T256 + PfT256(libdoriant256::NIZK), +} + +/// Prove to/from files +pub fn prove_fs>( + p_path: P, + pp_path: P, + input_map: &HashMap, + pf_path: P, + curvetype: &PfCurve, +) -> std::io::Result<()> { + match curvetype { + PfCurve::Curve25519 => { + SpartanCurve25519::prove_fs( + pp_path, p_path, input_map, pf_path + ) + } + PfCurve::T25519 => { + SpartanT25519::prove_fs( + pp_path, p_path, input_map, pf_path + ) + } + PfCurve::T256 => { + SpartanT256::prove_fs( + pp_path, p_path, input_map, pf_path + ) + } + } +} + + + + +/// verify spartan proof from files +pub fn verify_fs>( + v_path: P, + pp_path: P, + inputs_map: &HashMap, + pf_path: P, + curvetype: &PfCurve, +) -> io::Result<()> { + match curvetype { + PfCurve::Curve25519 => { + SpartanCurve25519::verify_fs( + pp_path, v_path, inputs_map, pf_path, + ) + } + PfCurve::T25519 => { + SpartanT25519::verify_fs( + pp_path, v_path, inputs_map, pf_path, + ) + } + PfCurve::T256 => { + SpartanT256::verify_fs( + pp_path, v_path, inputs_map, pf_path, + ) + } + } +} + + +/// Precompute for spartan +pub fn precompute>(pp_path: P, prover_data: &ProverData) -> std::io::Result<()> { + let f_mod = prover_data.r1cs.field.modulus(); + + if f_mod == (&MOD_CURVE25519 as &Integer) { + let (gens, inst) = super::curve25519::precompute(prover_data).unwrap(); + serialize_into_file(&(gens, inst), pp_path)?; + } else if f_mod == (&MOD_T256 as &Integer) { + let (gens, inst) = super::t256::precompute(prover_data).unwrap(); + serialize_into_file(&(gens, inst), pp_path)?; + } else if f_mod == (&MOD_T25519 as &Integer) { + let (gens, inst) = super::t25519::precompute(prover_data).unwrap(); + serialize_into_file(&(gens, inst), pp_path)?; + } else { + panic!("Unsupported Curve"); + } + Ok(()) +} \ No newline at end of file diff --git a/src/target/r1cs/spartan/spartan_bench.rs b/src/target/r1cs/spartan/spartan_bench.rs new file mode 100644 index 000000000..f1c432d2e --- /dev/null +++ b/src/target/r1cs/spartan/spartan_bench.rs @@ -0,0 +1,120 @@ +//! Compare the performance of Spartan-t256 with Spartan-curve25519 +use std::io; +use std::path::Path; +use fxhash::FxHashMap as HashMap; +use crate::target::r1cs::*; + +use super::t256::MOD_T256; +use crate::right_field_arithmetic::field::{ARC_MOD_CURVE25519}; + +use super::utils::{read_prover_data, read_simpl_prover_data, read_verifier_data}; + +use std::time::Instant; +use crate::util::timer::print_time; + +use crate::target::r1cs::proof::{serialize_into_file, deserialize_from_file}; + +use super::spartan::{SpartanProveRes, SpartanSetup}; +use super::r1cs::convert_values; + +/// Prove to/from files +pub fn prove_fs>( + p_path: P, + pp_path: P, + input_map: &HashMap, + pf_path: P, +) -> std::io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let pp: SpartanSetup = deserialize_from_file(pp_path)?; + print_time("Time for Deserialize public parameter from file", start.elapsed(), print_msg); + let pf = prove(p_path, pp, input_map, print_msg).unwrap(); // (NIZKGens, Instance, NIZK) + let start = Instant::now(); + serialize_into_file(&pf, pf_path)?; + print_time("Time for Serialize proof into file", start.elapsed(), print_msg); + Ok(()) +} + +/// verify spartan proof from files +pub fn verify_fs>( + v_path: P, + pp_path: P, + inputs_map: &HashMap, + pf_path: P, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let pp: SpartanSetup = deserialize_from_file(pp_path)?; + let pf: SpartanProveRes = deserialize_from_file(pf_path)?; + print_time("Time for Deserialize public parameter and proof from file", start.elapsed(), print_msg); + verify(v_path, pp, inputs_map, pf, print_msg) +} + +/// Precompute for spartan (Convert ECDSA circuit over spartan-t256 to a similar circuit over spartan-curve25519) +pub fn precompute>( + p_path: P, + pp_path: P, + inputs_map: &HashMap, +) -> std::io::Result<()> { + let mut prover_data = read_prover_data::<_>(p_path)?; + let f_mod = prover_data.r1cs.field.modulus(); + + let result = if f_mod == (&MOD_T256 as &Integer) { + let (gens, inst) = super::hybrid_bench::precompute(&mut prover_data, inputs_map).unwrap(); + SpartanSetup::Curve25519(gens, inst) + } else { + panic!("Unsupported Curve"); + }; + serialize_into_file(&result, pp_path)?; + Ok(()) +} + +/// generate spartan proof +pub fn prove>( + p_path: P, + pp: SpartanSetup, + inputs_map: &HashMap, + print_msg: bool, +) -> io::Result { + let start = Instant::now(); + let prover_data = read_simpl_prover_data::<_>(p_path)?; + print_time("Time for Read prover key", start.elapsed(), print_msg); + println!("Proving with Spartan"); + println!("Curve: t256 -> Curve: Curve25519"); + let SpartanSetup::Curve25519(gens, inst) = pp else { panic!("Unsupported SpartanSetup") }; + let pf = super::hybrid_bench::prove(prover_data, gens, inst, inputs_map).unwrap(); + Ok(SpartanProveRes::PfCurve25519(pf)) +} + +/// verify spartan proof; to modify +pub fn verify>( + v_path: P, + pp: SpartanSetup, + inputs_map: &HashMap, + proof_res: SpartanProveRes, + print_msg: bool, +) -> io::Result<()> { + let start = Instant::now(); + let verifier_data = read_verifier_data::<_>(v_path)?; + print_time("Time for Read verifier key", start.elapsed(), print_msg); + + let start = Instant::now(); + let mut values = verifier_data.eval(inputs_map); + print_time("Time for Process verifier input -- eval inputs_map", start.elapsed(), print_msg); + convert_values(&mut values, &ARC_MOD_CURVE25519); + + println!("Verifying with Spartan"); + println!("Curve: t256 -> Curve: Curve25519"); + + let verify_result = + match proof_res { + SpartanProveRes::PfCurve25519(nizk) => { + let SpartanSetup::Curve25519(gens, instance) = pp else {panic!("Expect public parameter for spartan-curve25519 only")}; + let result = super::curve25519::verify(&values, &gens, &instance, &nizk); + result + } + _ => {panic!("Expect proof for spartan-curve25519 only")} + }; + println!("Proof Verification Successful!"); + verify_result +} \ No newline at end of file diff --git a/src/target/r1cs/spartan/spartan_rand.rs b/src/target/r1cs/spartan/spartan_rand.rs new file mode 100644 index 000000000..fe5a9c029 --- /dev/null +++ b/src/target/r1cs/spartan/spartan_rand.rs @@ -0,0 +1,266 @@ +//! Spartan with verifier randomness +use std::io; +use std::path::Path; +use fxhash::FxHashMap as HashMap; +use crate::target::r1cs::*; + +use super::curve25519::MOD_CURVE25519; +use super::t25519::MOD_T25519; +use super::t256::MOD_T256; +use super::utils::{read_verifier_data, Variable}; +use crate::create_input::PfCurve; + +use std::time::Instant; +use crate::util::timer::print_time; +use serde::{Deserialize, Serialize}; + +use crate::target::r1cs::proof::{serialize_into_file, deserialize_from_file}; + +use super::curve25519_rand::SpartanRandCurve25519; +use super::t256_rand::SpartanRandT256; +use super::t25519_rand::SpartanRandT25519; + +/// A trait from Spartan proofs +pub trait ISpartanProofSystem { + /// A verifying key. Also used for commitments. + type VerifierKey: Serialize + for<'a> Deserialize<'a>; + /// A proving key + type ProverKey: Serialize + for<'a> Deserialize<'a>; + /// Precomputed public parameter + type SetupParameter: Serialize + for<'a> Deserialize<'a>; + /// A proof + type Proof: Serialize + for<'a> Deserialize<'a>; + + /// Proving + fn prove_fs_inner( + pk_path: impl AsRef, + pp: &Self::SetupParameter, + input_map: &HashMap, + ) -> std::io::Result; + + /// Prove to/from files + fn prove_fs( + pk_path: impl AsRef, + pp_path: impl AsRef, + input_map: &HashMap, + pf_path: impl AsRef, + ) -> std::io::Result<()> { + let pp: Self::SetupParameter = deserialize_from_file(pp_path)?; + let proof = Self::prove_fs_inner(pk_path, &pp, input_map)?; + serialize_into_file(&proof, pf_path) + } + + /// Verifying + fn verify( + pp: &Self::SetupParameter, + verifier_data: &Self::VerifierKey, + proof: &Self::Proof, + inputs_map: &HashMap, + print_msg: bool, + ) -> io::Result<()>; + + /// Verify from files + fn verify_fs>( + pp_path: P, + vk_path: P, + pf_path: P, + inputs_map: &HashMap, + ) -> io::Result<()> { + let print_msg = true; + + let pp: Self::SetupParameter = deserialize_from_file(pp_path)?; + let verifier_data: Self::VerifierKey = deserialize_from_file(vk_path)?; + let proof: Self::Proof = deserialize_from_file(pf_path)?; + Self::verify(&pp, &verifier_data, &proof, inputs_map, print_msg) + } +} + + + +#[derive(Serialize, Deserialize)] +/// Enum for precomputation +pub enum SpartanRandSetup { + /// Precomputation for spartan over Curve25519 + Curve25519(libdorian::NIZKRandGens, libdorian::Instance), + /// Precomputation for spartan over T256 + T256(libdoriant256::NIZKRandGens, libdoriant256::Instance), +} + +#[derive(Serialize, Deserialize)] +/// Enum for Prove result +pub enum SpartanRandProveRes { // not sure + /// Prove result for spartan over Curve25519 + PfCurve25519(libdorian::NIZKRand), + /// Prove result for spartan over T256 + PfT256(libdoriant256::NIZKRand), +} + +/// Precompute inner +pub fn precompute_inner( + prover_data: &ProverData, + lc_to_v: fn(&Lc, usize, &HashMap) -> Vec, +) -> io::Result<(usize, usize, usize, Vec<(usize, usize, [u8; 32])>, Vec<(usize, usize, [u8; 32])>, Vec<(usize, usize, [u8; 32])>)> { + // spartan format mapper: CirC -> Spartan + let mut trans: HashMap = HashMap::default(); // Circ -> spartan ids + let mut id = 0; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit )); + match var.ty() { + VarType::RoundWit => { + trans.insert(*var, id); + id += 1; + }, + _ => {} + } + } + #[cfg(debug_assertions)] + let num_round_wit = id; + #[cfg(debug_assertions)] + println!("num round wit: {}", id); + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit )); + match var.ty() { + VarType::FinalWit => { + trans.insert(*var, id); + id += 1; + }, + _ => {} + } + } + #[cfg(debug_assertions)] + println!("num final wit: {}", id-num_round_wit); + + let num_wit = id; + let num_inp = prover_data.r1cs.vars.len()-id; + #[cfg(debug_assertions)] + println!("num_inp: {}", num_inp); + id += 1; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit )); + match var.ty() { + VarType::Inst => { + trans.insert(*var, id); + id += 1; + }, + _ => {} + } + } + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit )); + match var.ty() { + VarType::Chall => { + trans.insert(*var, id); + id += 1; + }, + _ => {} + } + } + assert!(id == prover_data.r1cs.vars.len() + 1); + let const_id = num_wit; + + let mut m_a: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_b: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_c: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + let mut i = 0; // constraint # + for (lc_a, lc_b, lc_c) in prover_data.r1cs.constraints.iter() { + // circ Lc (const, monomials ) -> Vec + let a = lc_to_v(lc_a, const_id, &trans); + let b = lc_to_v(lc_b, const_id, &trans); + let c = lc_to_v(lc_c, const_id, &trans); + + // constraint # x identifier (vars, 1, inp) + for Variable { sid, value } in a { + m_a.push((i, sid, value)); // i = row; sid = col + } + for Variable { sid, value } in b { + m_b.push((i, sid, value)); + } + for Variable { sid, value } in c { + m_c.push((i, sid, value)); + } + + i += 1; + } + + let num_cons = i; + assert_ne!(num_cons, 0, "No constraints"); + + Ok((num_cons, num_wit, num_inp, m_a, m_b, m_c)) +} + + + +/// Precompute the polynomials for domain separation for spartan with verifier randomness; TO DO +pub fn precompute>(pp_path: P, prover_data: &ProverData, prover_data_rand: &ProverDataSpartanRand) -> std::io::Result<()> { + let f_mod = prover_data.r1cs.field.modulus(); + + let result = if f_mod == (&MOD_CURVE25519 as &Integer) { + let (gens, inst) = super::curve25519_rand::precompute(prover_data, prover_data_rand).unwrap(); + serialize_into_file(&(gens, inst), pp_path)?; + } else if f_mod == (&MOD_T256 as &Integer) { + let (gens, inst) = super::t256_rand::precompute(prover_data, prover_data_rand).unwrap(); + serialize_into_file(&(gens, inst), pp_path)?; + } else if f_mod == (&MOD_T25519 as &Integer) { + let (gens, inst) = super::t25519_rand::precompute(prover_data, prover_data_rand).unwrap(); + serialize_into_file(&(gens, inst), pp_path)?; + } else { + panic!("Unsupported modulus"); + }; + Ok(()) +} + + +/// Prove to/from files +pub fn prove_fs>( + p_path: P, + pp_path: P, + input_map: &HashMap, + pf_path: P, + curvetype: &PfCurve, +) -> std::io::Result<()> { + match curvetype { + PfCurve::Curve25519 => { + SpartanRandCurve25519::prove_fs( + p_path, pp_path, input_map, pf_path + ) + } + PfCurve::T256 => { + SpartanRandT256::prove_fs( + p_path, pp_path, input_map, pf_path + ) + } + PfCurve::T25519 => { + SpartanRandT25519::prove_fs( + p_path, pp_path, input_map, pf_path + ) + } + } +} +/// verify spartan proof from files +pub fn verify_fs>( + v_path: P, + pp_path: P, + inputs_map: &HashMap, + pf_path: P, + curvetype: &PfCurve, +) -> io::Result<()> { + match curvetype { + PfCurve::Curve25519 => { + SpartanRandCurve25519::verify_fs( + pp_path, v_path, pf_path, inputs_map, + ) + } + PfCurve::T256 => { + SpartanRandT256::verify_fs( + pp_path, v_path, pf_path, inputs_map, + ) + } + PfCurve::T25519 => { + SpartanRandT25519::verify_fs( + pp_path, v_path, pf_path, inputs_map, + ) + } + } +} + diff --git a/src/target/r1cs/spartan/t25519.rs b/src/target/r1cs/spartan/t25519.rs new file mode 100644 index 000000000..977f89de2 --- /dev/null +++ b/src/target/r1cs/spartan/t25519.rs @@ -0,0 +1,256 @@ +//! Export circ R1cs to Spartan +use crate::target::r1cs::*; + +use super::utils::Variable; +use crate::util::timer::print_time; +use libdoriant25519::{ + scalar::Scalar, Assignment, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK, +}; +use merlin::Transcript; +use rug::Integer; +use std::io; +use std::time::Instant; + +use lazy_static::lazy_static; + +use super::spartan::SpartanProofSystem; + + +lazy_static! { + /// Order of T25519 + pub static ref MOD_T25519: Integer = Integer::from_str_radix("57896044618658097711785492504343953926634992332820282019728792003956564819949", 10).unwrap(); +} + +/// Number of bytes of the modulus +pub const NUM_MODULUS_BYTE: usize = 32; + +pub struct SpartanT25519; + +impl SpartanProofSystem for SpartanT25519 { + type VerifierKey = VerifierData; + type ProverKey = ProverDataSpartan; + type SetupParameter = (NIZKGens, Instance); + type Proof = NIZK; + + fn prove( + pp: &Self::SetupParameter, + pk: &Self::ProverKey, + input_map: &HashMap, + ) -> io::Result { + prove(pk, &pp.0, &pp.1, input_map) + } + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + inputs_map: &HashMap, + proof: &Self::Proof, + ) -> io::Result<()> { + let values = vk.eval(inputs_map); + verify(&values, &pp.0, &pp.1, proof) + } +} + +/// generate spartan proof; to do: change it into private +pub fn prove( + prover_data: &ProverDataSpartan, + gens: &NIZKGens, + inst: &Instance, + inputs_map: &HashMap, +) -> io::Result { + let print_msg = true; + let start = Instant::now(); + let (wit, inps) = r1cs_to_spartan_simpl(prover_data, inst, inputs_map); + print_time("Time for r1cs_to_spartan", start.elapsed(), print_msg); + + // produce proof + let start = Instant::now(); + let mut prover_transcript = Transcript::new(b"nizk_example"); + let pf = NIZK::prove(inst, wit, &inps, gens, &mut prover_transcript); + print_time("Time for NIZK::prove", start.elapsed(), print_msg); + + Ok(pf) +} + +/// verify spartan proof +pub fn verify( + values: &Vec, + gens: &NIZKGens, + inst: &Instance, + proof: &NIZK, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let mut inp = Vec::new(); + for v in values { + let scalar = int_to_scalar(&v.i()); + inp.push(scalar.to_bytes()); + } + let inputs = InputsAssignment::new(&inp).unwrap(); + print_time( + "Time for Process verifier input -- transforming inputs to appropriate form", + start.elapsed(), + print_msg, + ); + + let start = Instant::now(); + let mut verifier_transcript = Transcript::new(b"nizk_example"); + assert!(proof + .verify(inst, &inputs, &mut verifier_transcript, gens) + .is_ok()); + print_time("Time for NIZK::verify", start.elapsed(), print_msg); + + Ok(()) +} + +/// Precompute inner +pub fn precompute_inner( + prover_data: &ProverData, +) -> io::Result<( + usize, + usize, + usize, + Vec<(usize, usize, [u8; 32])>, + Vec<(usize, usize, [u8; 32])>, + Vec<(usize, usize, [u8; 32])>, +)> { + // spartan format mapper: CirC -> Spartan + let mut trans: HashMap = HashMap::default(); // Circ -> spartan ids + let mut id = 0; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!( + var.ty(), + VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit + )); + match var.ty() { + VarType::FinalWit | VarType::RoundWit => { + trans.insert(*var, id); + id += 1; + } + _ => {} + } + } + let num_wit = id; + let num_inp = prover_data.r1cs.vars.len() - id; + id += 1; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!( + var.ty(), + VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit + )); + match var.ty() { + VarType::Inst | VarType::Chall => { + trans.insert(*var, id); + id += 1; + } + _ => {} + } + } + assert!(id == prover_data.r1cs.vars.len() + 1); + let const_id = num_wit; + + let mut m_a: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_b: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_c: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + let mut i = 0; // constraint # + for (lc_a, lc_b, lc_c) in prover_data.r1cs.constraints.iter() { + // circ Lc (const, monomials ) -> Vec + let a = lc_to_v(lc_a, const_id, &trans); + let b = lc_to_v(lc_b, const_id, &trans); + let c = lc_to_v(lc_c, const_id, &trans); + + // constraint # x identifier (vars, 1, inp) + for Variable { sid, value } in a { + m_a.push((i, sid, value)); + } + for Variable { sid, value } in b { + m_b.push((i, sid, value)); + } + for Variable { sid, value } in c { + m_c.push((i, sid, value)); + } + + i += 1; + } + + let num_cons = i; + assert_ne!(num_cons, 0, "No constraints"); + + Ok((num_cons, num_wit, num_inp, m_a, m_b, m_c)) +} + +/// circ R1cs -> spartan R1CSInstance +pub fn precompute(prover_data: &ProverData) -> io::Result<(NIZKGens, Instance)> { + let (num_cons, num_wit, num_inp, m_a, m_b, m_c) = precompute_inner(prover_data).unwrap(); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKGens::new(num_cons, num_wit, num_inp); + Ok((gens, inst)) +} + +/// circ R1cs -> spartan R1CSInstance; needed in prove +pub fn r1cs_to_spartan_simpl( + prover_data: &ProverDataSpartan, + inst: &Instance, + inputs_map: &HashMap, +) -> (Assignment, Assignment) { + // spartan format mapper: CirC -> Spartan + let mut wit = Vec::new(); + let mut inp = Vec::new(); + let values = prover_data.extend_r1cs_witness(inputs_map); + + let var_len = prover_data.pubinp_len + prover_data.wit_len; + assert_eq!(values.len(), var_len); + + for val in values.iter().take(prover_data.pubinp_len) { + inp.push(int_to_scalar(&val.i()).to_bytes()); + } + + for val in values.iter().skip(prover_data.pubinp_len) { + wit.push(int_to_scalar(&val.i()).to_bytes()); + } + + let assn_witness = VarsAssignment::new(&wit).unwrap(); + let assn_inputs = InputsAssignment::new(&inp).unwrap(); + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assn_witness, &assn_inputs); + assert!(res.unwrap()); + + (assn_witness, assn_inputs) +} + +/// Convert Integer to Scalar +pub fn int_to_scalar(i: &Integer) -> Scalar { + let digits: Vec = i.to_digits(rug::integer::Order::LsfLe); + let mut repr: [u8; NUM_MODULUS_BYTE] = [0; NUM_MODULUS_BYTE]; + + repr.as_mut()[..digits.len()].copy_from_slice(&digits); + + Scalar::from_bytes_le(&repr) +} + +/// circ Lc (const, monomials ) -> Vec +pub fn lc_to_v(lc: &Lc, const_id: usize, trans: &HashMap) -> Vec { + let mut v: Vec = Vec::new(); + + for (k, m) in &lc.monomials { + let scalar = int_to_scalar(&m.i()); + + let var = Variable { + sid: *trans.get(k).unwrap(), + value: scalar.to_bytes(), + }; + v.push(var); + } + if lc.constant.i() != 0 { + let scalar = int_to_scalar(&lc.constant.i()); + let var = Variable { + sid: const_id, + value: scalar.to_bytes(), + }; + v.push(var); + } + v +} diff --git a/src/target/r1cs/spartan/t25519_rand.rs b/src/target/r1cs/spartan/t25519_rand.rs new file mode 100644 index 000000000..b9b1bac99 --- /dev/null +++ b/src/target/r1cs/spartan/t25519_rand.rs @@ -0,0 +1,322 @@ +//! Export circ R1cs to Spartan +use crate::target::r1cs::*; +use crate::util::timer::print_time; +use circ_fields::t256::utils::helper::SpartanTrait; +use libdoriant25519::scalar::Scalar as OriScalar; +use libdoriant25519::DensePolynomial; +use libdoriant25519::{ + Assignment, InputsAssignment, Instance, NIZKRand, NIZKRandGens, NIZKRandInter, VarsAssignment, +}; +use merlin::Transcript; +use rug::Integer; +use std::io; +use std::time::Instant; + +use std::path::Path; +use crate::target::r1cs::proof::deserialize_from_file; + +use super::t25519::{NUM_MODULUS_BYTE, int_to_scalar, lc_to_v}; +use crate::target::r1cs::wit_comp::StagedWitComp; +use ark_serialize::CanonicalDeserialize; + +use super::spartan_rand::{ + precompute_inner, + ISpartanProofSystem, +}; + +pub struct SpartanRandT25519; + +impl ISpartanProofSystem for SpartanRandT25519 { + type VerifierKey = VerifierData; + type ProverKey = ProverDataSpartanRand; + type SetupParameter = (NIZKRandGens, Instance); + type Proof = NIZKRand; + + fn prove_fs_inner( + pk_path: impl AsRef, + pp: &Self::SetupParameter, + inputs_map: &HashMap, + ) -> std::io::Result { + let print_msg = true; + let (pubinp_len, wit_len, rand_list, precompute, field) = { + let prover_data: Self::ProverKey = deserialize_from_file(pk_path)?; + #[cfg(debug_assertions)] + prover_data.check_all(inputs_map); + R1csToSpartan2Round::parse_prover_data(&prover_data) + }; + + let mut evaluator = R1csToSpartan2Round::from_prover_data_inner( + &pubinp_len, + &wit_len, + &rand_list, + &precompute, + &field + ); + let (gens, inst) = pp; + let start = Instant::now(); + let pf = prove(&mut evaluator, gens, inst, inputs_map).unwrap(); + print_time("Time for Proving", start.elapsed(), print_msg); + Ok(pf) + } + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + proof: &Self::Proof, + inputs_map: &HashMap, + print_msg: bool, + ) -> io::Result<()> { + let values = vk.eval(inputs_map); + verify(&values, &pp.0, &pp.1, proof) + } +} + +/// circ IR1cs -> spartan IR1CSInstance +pub fn precompute( + prover_data: &ProverData, + prover_data_rand: &ProverDataSpartanRand, +) -> io::Result<(NIZKRandGens, Instance)> { + let (num_cons, num_wit, num_inp, m_a, m_b, m_c) = precompute_inner(prover_data, lc_to_v).unwrap(); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKRandGens::new( + num_cons, + &prover_data_rand.pubinp_len, + &prover_data_rand.wit_len, + ); + Ok((gens, inst)) +} + +/// generate spartan proof; to do: change it into private +pub fn prove( + evaluator: &mut R1csToSpartan2Round, + gens: &NIZKRandGens, + inst: &Instance, + inputs_map: &HashMap, +) -> io::Result { + let start_whole = Instant::now(); + #[cfg(debug_assertions)] + assert_eq!(gens.pubinp_len.len(), 2); + let print_msg = true; + + let (inputs, wit0) = evaluator.inputs_to_wit0(inputs_map); + + // produce proof + let mut prover_transcript = Transcript::new(b"nizkrand_example"); + let mut intermediate = NIZKRandInter::new(&inputs); + NIZKRand::prove_00(inst, &inputs, gens, &mut prover_transcript); + let rand_len = gens.pubinp_len[1]; + // ignoring the real randomness for now. + let verifier_rand: Vec = NIZKRand::prove_01( + inst, + &wit0, + rand_len, + &mut intermediate, + gens, + &mut prover_transcript, + ); + + let start = Instant::now(); + + let wit1 = evaluator.rand_to_wit1(&verifier_rand); + + print_time("Time for r1cs_to_spartan1,2", start.elapsed(), print_msg); + let pf = NIZKRand::prove_1( + inst, + &wit1, + &mut intermediate, + gens, + &mut prover_transcript, + ); + print_time("Time for whole prove", start_whole.elapsed(), print_msg); + + Ok(pf) +} + +/// verify spartan proof +pub fn verify( + values: &[FieldV], + gens: &NIZKRandGens, + inst: &Instance, + proof: &NIZKRand, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let mut inp = Vec::new(); + for v in values { + let scalar = int_to_scalar(&v.i()); + inp.push(scalar.to_bytes()); + } + let mut inputs = InputsAssignment::new(&inp).unwrap(); + // println!("Time for transforming inputs to appropriate form: {:?}", start.elapsed()); // verify-ecdsa: 3.868µs + print_time( + "Time for Process verifier input -- transforming inputs to appropriate form", + start.elapsed(), + print_msg, + ); + + let start = Instant::now(); + let mut verifier_transcript = Transcript::new(b"nizkrand_example"); + assert!(proof + .verify(inst, &mut inputs, &mut verifier_transcript, gens) + .is_ok()); + // println!("Time for verifying proof: {:?}", start.elapsed()); // verify-ecdsa: 158.0493ms + print_time("Time for NIZK::verify", start.elapsed(), print_msg); + + Ok(()) +} + + +enum Step { + Fresh, + PostWit0, + Done, +} + +/// A witness evaluator for 2-round Spartan +pub struct R1csToSpartan2Round<'a> { + pubinp_len: [usize; 2], + wit_len: [usize; 2], + rand_list: Vec, + evaluator: wit_comp::StagedWitCompEvaluator<'a>, + field: FieldT, + step: Step, +} + +impl<'a> R1csToSpartan2Round<'a> { + pub fn parse_prover_data(prover_data: &ProverDataSpartanRand) + -> ([usize; 2], + [usize; 2], + Vec, + StagedWitComp, + FieldT + ) { + assert_eq!(prover_data.pubinp_len.len(), 2); + assert_eq!(prover_data.precompute.stage_sizes().count(), 3); // one more than the wit + // count + assert_eq!(prover_data.wit_len.len(), 2); + #[cfg(debug_assertions)] + prover_data.precompute.type_check(); + let pubinp_len = [prover_data.pubinp_len[0], prover_data.pubinp_len[1]]; + let wit_len = [prover_data.wit_len[0], prover_data.wit_len[1]]; + let rand_list = { + let idx = prover_data.pubinp_len[0] + prover_data.wit_len[0]; + prover_data.r1cs.vars[idx..idx+prover_data.pubinp_len[1]] + .iter() + .map(|var| prover_data.r1cs.names.get(&var).unwrap().clone()) + .collect() + }; + let precompute = prover_data.precompute.clone(); + let field = prover_data.r1cs.field.clone(); + (pubinp_len, wit_len, rand_list, precompute, field) + } + + /// Create a new evaluator + pub fn from_prover_data_inner(pubinp_len: &[usize], + wit_len: &[usize], + rand_list: &Vec, + precompute: &'a StagedWitComp, + field: &FieldT + ) -> Self { + let evaluator = wit_comp::StagedWitCompEvaluator::new(precompute); + let pubinp_len_copy = [pubinp_len[0], pubinp_len[1]]; + let wit_len_copy = [wit_len[0], wit_len[1]]; + Self { + pubinp_len: pubinp_len_copy, + wit_len: wit_len_copy, + rand_list: rand_list.clone(), + field: field.clone(), + evaluator, + step: Step::Fresh, + } + } + /// Inputs: the prover inputs as a map + /// Outputs: the public inputs as an array and the first witness + pub fn inputs_to_wit0( + &mut self, + inputs_map: &HashMap, + ) -> (Assignment, Assignment) { + let start = Instant::now(); + assert!(matches!(self.step, Step::Fresh)); + self.step = Step::PostWit0; + // eval twice. + let start_inner = Instant::now(); + let inputs: Vec<_> = self + .evaluator + .eval_stage(inputs_map.clone()) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + print_time("Time for inputs_to_wit0 inner 0", start_inner.elapsed(), true); + let start_inner = Instant::now(); + + #[cfg(feature = "multicore")] + let wit0: Vec<_> = self + .evaluator + .eval_stage(Default::default()) + .into_par_iter() // Using parallel iterator + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + #[cfg(not(feature = "multicore"))] + let wit0: Vec<_> = self + .evaluator + .eval_stage(Default::default()) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + print_time("Time for inputs_to_wit0 inner 1", start_inner.elapsed(), true); + assert_eq!(self.wit_len[0], wit0.len()); + + print_time("Time for inputs_to_wit0", start.elapsed(), true); + + ( + Assignment::new(&inputs).unwrap(), + Assignment::new(&wit0).unwrap(), + ) + } + /// Inputs: the verifier randomness, as a vector + /// Outputs: the second witness + pub fn rand_to_wit1(&mut self, rand: &Vec) -> Assignment { + assert!(matches!(self.step, Step::PostWit0)); + self.step = Step::Done; + + let rand_map: HashMap = + self.rand_list + .iter() + .zip(rand) + .map(|(var, value)| { + ( + var.clone(), + Value::Field(self.field.new_v(scalar_to_int(&value))), + ) + }) + .collect(); // 6.162µs + + assert_eq!(self.pubinp_len[1], rand.len()); + #[cfg(feature = "multicore")] + let wit1: Vec<_> = self + .evaluator + .eval_stage(rand_map) + .into_par_iter() // Using parallel iterator + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + #[cfg(not(feature = "multicore"))] + let wit1: Vec<_> = self + .evaluator + .eval_stage(rand_map) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + assert_eq!(self.wit_len[1], wit1.len()); + + Assignment::new(&wit1).unwrap() + } + +} + + +// Convert Integer to Scalar +fn scalar_to_int(i: &OriScalar) -> Integer { + Integer::from_digits(&i.to_bytes(), rug::integer::Order::LsfLe) +} + diff --git a/src/target/r1cs/spartan/t256.rs b/src/target/r1cs/spartan/t256.rs new file mode 100644 index 000000000..1b91ed493 --- /dev/null +++ b/src/target/r1cs/spartan/t256.rs @@ -0,0 +1,250 @@ +//! Export circ R1cs to Spartan +use crate::target::r1cs::*; +use circ_fields::t256::{ScalarField as Scalar}; //Config, + +use libdoriant256::{Assignment, InputsAssignment, Instance, NIZKGens, VarsAssignment, NIZK}; +use merlin::Transcript; +use rug::Integer; +use std::io; +use super::utils::{Variable}; +use circ_fields::t256::utils::helper::SpartanTrait; +use std::time::Instant; +use crate::util::timer::print_time; + +use lazy_static::lazy_static; + +use ark_ff::PrimeField; +use ark_serialize::CanonicalDeserialize; + +use super::spartan::SpartanProofSystem; +lazy_static! { + /// Order of T256 + pub static ref MOD_T256: Integer = Integer::from_str_radix("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10).unwrap(); +} + +/// Number of bytes of the modulus +pub const NUM_MODULUS_BYTE: usize = ((Scalar::MODULUS_BIT_SIZE + 7) / 8) as usize; + +pub struct SpartanT256; + +impl SpartanProofSystem for SpartanT256 { + type VerifierKey = VerifierData; + type ProverKey = ProverDataSpartan; + type SetupParameter = (NIZKGens, Instance); + type Proof = NIZK; + + fn prove( + pp: &Self::SetupParameter, + pk: &Self::ProverKey, + input_map: &HashMap, + ) -> io::Result { + prove(pk, &pp.0, &pp.1, input_map) + } + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + inputs_map: &HashMap, + proof: &Self::Proof, + ) -> io::Result<()> { + let values = vk.eval(inputs_map); + verify(&values, &pp.0, &pp.1, proof) + } +} + +/// generate spartan proof; to do: change it into private +pub fn prove( + prover_data: &ProverDataSpartan, + gens: &NIZKGens, + inst: &Instance, + inputs_map: &HashMap, +) -> io::Result { + let print_msg = true; + let start = Instant::now(); + let (wit, inps) = r1cs_to_spartan_simpl(prover_data, inst, inputs_map); + print_time("Time for r1cs_to_spartan", start.elapsed(), print_msg); + + + // produce proof + let start = Instant::now(); + let mut prover_transcript = Transcript::new(b"nizk_example"); + let pf = NIZK::prove(inst, wit, &inps, gens, &mut prover_transcript); + print_time("Time for NIZK::prove", start.elapsed(), print_msg); + + Ok(pf) +} + + +/// verify spartan proof +pub fn verify( + values: &Vec, + gens: &NIZKGens, + inst: &Instance, + proof: &NIZK, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let mut inp = Vec::new(); + for v in values { + let scalar = int_to_scalar(&v.i()); + inp.push(scalar.to_bytes()); + } + let inputs = InputsAssignment::new(&inp).unwrap(); + print_time("Time for Process verifier input -- transforming inputs to appropriate form", start.elapsed(), print_msg); + + let start = Instant::now(); + let mut verifier_transcript = Transcript::new(b"nizk_example"); + assert!(proof + .verify(inst, &inputs, &mut verifier_transcript, gens) + .is_ok()); + // println!("Time for verifying proof: {:?}", start.elapsed()); // verify-ecdsa: 158.0493ms + print_time("Time for NIZK::verify", start.elapsed(), print_msg); + + Ok(()) +} + +/// Precompute inner +pub fn precompute_inner( + prover_data: &ProverData, +) -> io::Result<(usize, usize, usize, Vec<(usize, usize, [u8; 32])>, Vec<(usize, usize, [u8; 32])>, Vec<(usize, usize, [u8; 32])>)> { + // spartan format mapper: CirC -> Spartan + let mut trans: HashMap = HashMap::default(); // Circ -> spartan ids + let mut id = 0; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit )); + match var.ty() { + VarType::FinalWit | VarType::RoundWit => { + trans.insert(*var, id); + id += 1; + }, + _ => {} + } + } + let num_wit = id; + let num_inp = prover_data.r1cs.vars.len()-id; + id += 1; + for var in prover_data.r1cs.vars.iter() { + assert!(matches!(var.ty(), VarType::Inst | VarType::Chall | VarType::FinalWit | VarType::RoundWit )); + match var.ty() { + VarType::Inst | VarType::Chall => { + trans.insert(*var, id); + id += 1; + }, + _ => {} + } + } + assert!(id == prover_data.r1cs.vars.len() + 1); + let const_id = num_wit; + + let mut m_a: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_b: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut m_c: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + let mut i = 0; // constraint # + for (lc_a, lc_b, lc_c) in prover_data.r1cs.constraints.iter() { + // circ Lc (const, monomials ) -> Vec + let a = lc_to_v(lc_a, const_id, &trans); + let b = lc_to_v(lc_b, const_id, &trans); + let c = lc_to_v(lc_c, const_id, &trans); + + // constraint # x identifier (vars, 1, inp) + for Variable { sid, value } in a { + m_a.push((i, sid, value)); + } + for Variable { sid, value } in b { + m_b.push((i, sid, value)); + } + for Variable { sid, value } in c { + m_c.push((i, sid, value)); + } + + i += 1; + } + + let num_cons = i; + assert_ne!(num_cons, 0, "No constraints"); + + Ok((num_cons, num_wit, num_inp, m_a, m_b, m_c)) +} + +/// circ R1cs -> spartan R1CSInstance +pub fn precompute( + prover_data: &ProverData, +) -> io::Result<(NIZKGens, Instance)> { + let (num_cons, num_wit, num_inp, m_a, m_b, m_c) = precompute_inner(prover_data).unwrap(); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKGens::new(num_cons, num_wit, num_inp); + Ok((gens, inst)) +} + +/// circ R1cs -> spartan R1CSInstance; needed in prove +pub fn r1cs_to_spartan_simpl( + prover_data: &ProverDataSpartan, + inst: &Instance, + inputs_map: &HashMap, +) -> (Assignment, Assignment) { + // spartan format mapper: CirC -> Spartan + let mut wit = Vec::new(); + let mut inp = Vec::new(); + let values = prover_data.extend_r1cs_witness(inputs_map); + + // prover_data.r1cs.check_all(&values); // for debug purpose; not working now since prover_data.r1cs is not available + let var_len = prover_data.pubinp_len + prover_data.wit_len; + assert_eq!(values.len(), var_len); + + for val in values.iter().take(prover_data.pubinp_len) { + inp.push(int_to_scalar(&val.i()).to_bytes()); + } + + for val in values.iter().skip(prover_data.pubinp_len) { + wit.push(int_to_scalar(&val.i()).to_bytes()); + } + + let assn_witness = VarsAssignment::new(&wit).unwrap(); + let assn_inputs = InputsAssignment::new(&inp).unwrap(); + + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assn_witness, &assn_inputs); + assert!(res.unwrap()); + + ( + assn_witness, + assn_inputs, + ) +} + +// Convert Integer to Scalar +pub fn int_to_scalar(i: &Integer) -> Scalar { + let digits: Vec = i.to_digits(rug::integer::Order::LsfLe); + let mut repr: [u8; NUM_MODULUS_BYTE] = [0; NUM_MODULUS_BYTE]; + + repr.as_mut()[..digits.len()].copy_from_slice(&digits); + +// Scalar::from_be_bytes_mod_order(&repr) + Scalar::deserialize_compressed(&repr[..]).unwrap() +} +// circ Lc (const, monomials ) -> Vec +pub fn lc_to_v(lc: &Lc, const_id: usize, trans: &HashMap) -> Vec { + let mut v: Vec = Vec::new(); + + for (k, m) in &lc.monomials { + let scalar = int_to_scalar(&m.i()); + + let var = Variable { + sid: *trans.get(k).unwrap(), + value: scalar.to_bytes(), + }; + v.push(var); + } + if lc.constant.i() != 0 { + let scalar = int_to_scalar(&lc.constant.i()); + let var = Variable { + sid: const_id, + value: scalar.to_bytes(), + }; + v.push(var); + } + v +} diff --git a/src/target/r1cs/spartan/t256_rand.rs b/src/target/r1cs/spartan/t256_rand.rs new file mode 100644 index 000000000..776860d5f --- /dev/null +++ b/src/target/r1cs/spartan/t256_rand.rs @@ -0,0 +1,324 @@ +//! Export circ R1cs to Spartan +use crate::target::r1cs::*; +use circ_fields::t256::ScalarField as Scalar; //Config, + +use crate::util::timer::print_time; +use libdoriant256::scalar::Scalar as OriScalar; +use libdoriant256::DensePolynomial; +use libdoriant256::{ + Assignment, InputsAssignment, Instance, NIZKRand, NIZKRandGens, NIZKRandInter, VarsAssignment, +}; +use merlin::Transcript; +use rug::Integer; +use std::io; +use std::time::Instant; + +use std::path::Path; +use crate::target::r1cs::proof::deserialize_from_file; + +use super::t256::{NUM_MODULUS_BYTE, int_to_scalar, lc_to_v}; +use crate::target::r1cs::wit_comp::StagedWitComp; +use ark_serialize::CanonicalDeserialize; + +#[cfg(feature = "spartan")] +use circ_fields::t256::utils::helper::SpartanTrait; + +use super::spartan_rand::{ + precompute_inner, + ISpartanProofSystem, +}; + +pub struct SpartanRandT256; + +impl ISpartanProofSystem for SpartanRandT256 { + type VerifierKey = VerifierData; + type ProverKey = ProverDataSpartanRand; + type SetupParameter = (NIZKRandGens, Instance); + type Proof = NIZKRand; + + fn prove_fs_inner( + pk_path: impl AsRef, + pp: &Self::SetupParameter, + inputs_map: &HashMap, + ) -> std::io::Result { + let print_msg = true; + let (pubinp_len, wit_len, rand_list, precompute, field) = { + let prover_data: Self::ProverKey = deserialize_from_file(pk_path)?; + #[cfg(debug_assertions)] + prover_data.check_all(inputs_map); + R1csToSpartan2Round::parse_prover_data(&prover_data) + }; + + let mut evaluator = R1csToSpartan2Round::from_prover_data_inner( + &pubinp_len, + &wit_len, + &rand_list, + &precompute, + &field + ); + let (gens, inst) = pp; + let start = Instant::now(); + let pf = prove(&mut evaluator, gens, inst, inputs_map).unwrap(); + print_time("Time for Proving", start.elapsed(), print_msg); + Ok(pf) + } + + fn verify( + pp: &Self::SetupParameter, + vk: &Self::VerifierKey, + proof: &Self::Proof, + inputs_map: &HashMap, + print_msg: bool, + ) -> io::Result<()> { + let values = vk.eval(inputs_map); + verify(&values, &pp.0, &pp.1, proof) + } +} + +/// circ IR1cs -> spartan IR1CSInstance +pub fn precompute( + prover_data: &ProverData, + prover_data_rand: &ProverDataSpartanRand, +) -> io::Result<(NIZKRandGens, Instance)> { + let (num_cons, num_wit, num_inp, m_a, m_b, m_c) = precompute_inner(prover_data, lc_to_v).unwrap(); + + let inst = Instance::new(num_cons, num_wit, num_inp, &m_a, &m_b, &m_c).unwrap(); + let gens = NIZKRandGens::new( + num_cons, + &prover_data_rand.pubinp_len, + &prover_data_rand.wit_len, + ); + Ok((gens, inst)) +} + + +/// generate spartan proof; to do: change it into private +pub fn prove( + evaluator: &mut R1csToSpartan2Round, + gens: &NIZKRandGens, + inst: &Instance, + inputs_map: &HashMap, +) -> io::Result { + let start_whole = Instant::now(); + #[cfg(debug_assertions)] + assert_eq!(gens.pubinp_len.len(), 2); + let print_msg = true; + + let (inputs, wit0) = evaluator.inputs_to_wit0(inputs_map); + + // produce proof + let mut prover_transcript = Transcript::new(b"nizkrand_example"); + let mut intermediate = NIZKRandInter::new(&inputs); + NIZKRand::prove_00(inst, &inputs, gens, &mut prover_transcript); + let rand_len = gens.pubinp_len[1]; + let verifier_rand: Vec = NIZKRand::prove_01( + inst, + &wit0, + rand_len, + &mut intermediate, + gens, + &mut prover_transcript, + ); + + let start = Instant::now(); + + let wit1 = evaluator.rand_to_wit1(&verifier_rand); + + print_time("Time for r1cs_to_spartan1,2", start.elapsed(), print_msg); + let pf = NIZKRand::prove_1( + inst, + &wit1, + &mut intermediate, + gens, + &mut prover_transcript, + ); + print_time("Time for whole prove", start_whole.elapsed(), print_msg); + + Ok(pf) +} + +/// verify spartan proof +pub fn verify( + values: &[FieldV], + gens: &NIZKRandGens, + inst: &Instance, + proof: &NIZKRand, +) -> io::Result<()> { + let print_msg = true; + let start = Instant::now(); + let mut inp = Vec::new(); + for v in values { + let scalar = int_to_scalar(&v.i()); + inp.push(scalar.to_bytes()); + } + let mut inputs = InputsAssignment::new(&inp).unwrap(); + print_time( + "Time for Process verifier input -- transforming inputs to appropriate form", + start.elapsed(), + print_msg, + ); + + let start = Instant::now(); + let mut verifier_transcript = Transcript::new(b"nizkrand_example"); + assert!(proof + .verify(inst, &mut inputs, &mut verifier_transcript, gens) + .is_ok()); + print_time("Time for NIZK::verify", start.elapsed(), print_msg); + + Ok(()) +} + + +enum Step { + Fresh, + PostWit0, + Done, +} + +/// A witness evaluator for 2-round Spartan +pub struct R1csToSpartan2Round<'a> { + pubinp_len: [usize; 2], + wit_len: [usize; 2], + rand_list: Vec, + evaluator: wit_comp::StagedWitCompEvaluator<'a>, + field: FieldT, + step: Step, +} + +impl<'a> R1csToSpartan2Round<'a> { + pub fn parse_prover_data(prover_data: &ProverDataSpartanRand) + -> ([usize; 2], + [usize; 2], + Vec, + StagedWitComp, + FieldT + ) { + assert_eq!(prover_data.pubinp_len.len(), 2); + assert_eq!(prover_data.precompute.stage_sizes().count(), 3); // one more than the wit + // count + assert_eq!(prover_data.wit_len.len(), 2); + #[cfg(debug_assertions)] + prover_data.precompute.type_check(); + let pubinp_len = [prover_data.pubinp_len[0], prover_data.pubinp_len[1]]; + let wit_len = [prover_data.wit_len[0], prover_data.wit_len[1]]; + let rand_list = { + let idx = prover_data.pubinp_len[0] + prover_data.wit_len[0]; + prover_data.r1cs.vars[idx..idx+prover_data.pubinp_len[1]] + .iter() + .map(|var| prover_data.r1cs.names.get(&var).unwrap().clone()) + .collect() + }; + let precompute = prover_data.precompute.clone(); + let field = prover_data.r1cs.field.clone(); + (pubinp_len, wit_len, rand_list, precompute, field) + } + + /// Create a new evaluator + pub fn from_prover_data_inner(pubinp_len: &[usize], + wit_len: &[usize], + rand_list: &Vec, + precompute: &'a StagedWitComp, + field: &FieldT + ) -> Self { + let evaluator = wit_comp::StagedWitCompEvaluator::new(precompute); + let pubinp_len_copy = [pubinp_len[0], pubinp_len[1]]; + let wit_len_copy = [wit_len[0], wit_len[1]]; + Self { + pubinp_len: pubinp_len_copy, + wit_len: wit_len_copy, + rand_list: rand_list.clone(), + field: field.clone(), + evaluator, + step: Step::Fresh, + } + } + /// Inputs: the prover inputs as a map + /// Outputs: the public inputs as an array and the first witness + pub fn inputs_to_wit0( + &mut self, + inputs_map: &HashMap, + ) -> (Assignment, Assignment) { + let start = Instant::now(); + assert!(matches!(self.step, Step::Fresh)); + self.step = Step::PostWit0; + // eval twice. + let start_inner = Instant::now(); + let inputs: Vec<_> = self + .evaluator + .eval_stage(inputs_map.clone()) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + print_time("Time for inputs_to_wit0 inner 0", start_inner.elapsed(), true); + let start_inner = Instant::now(); + + #[cfg(feature = "multicore")] + let wit0: Vec<_> = self + .evaluator + .eval_stage(Default::default()) + .into_par_iter() // Using parallel iterator + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + #[cfg(not(feature = "multicore"))] + let wit0: Vec<_> = self + .evaluator + .eval_stage(Default::default()) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + print_time("Time for inputs_to_wit0 inner 1", start_inner.elapsed(), true); + assert_eq!(self.wit_len[0], wit0.len()); + + print_time("Time for inputs_to_wit0", start.elapsed(), true); + + ( + Assignment::new(&inputs).unwrap(), + Assignment::new(&wit0).unwrap(), + ) + } + /// Inputs: the verifier randomness, as a vector + /// Outputs: the second witness + pub fn rand_to_wit1(&mut self, rand: &Vec) -> Assignment { + assert!(matches!(self.step, Step::PostWit0)); + self.step = Step::Done; + + let rand_map: HashMap = + self.rand_list + .iter() + .zip(rand) + .map(|(var, value)| { + ( + var.clone(), + Value::Field(self.field.new_v(scalar_to_int(&value))), + ) + }) + .collect(); + + assert_eq!(self.pubinp_len[1], rand.len()); + #[cfg(feature = "multicore")] + let wit1: Vec<_> = self + .evaluator + .eval_stage(rand_map) + .into_par_iter() // Using parallel iterator + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + #[cfg(not(feature = "multicore"))] + let wit1: Vec<_> = self + .evaluator + .eval_stage(rand_map) + .into_iter() + .map(|v| int_to_scalar(&v.as_pf().i()).to_bytes()) + .collect(); + assert_eq!(self.wit_len[1], wit1.len()); + + Assignment::new(&wit1).unwrap() + } + +} + + +// Convert Integer to Scalar +fn scalar_to_int(i: &OriScalar) -> Integer { + Integer::from_digits(&i.to_bytes(), rug::integer::Order::LsfLe) +} + diff --git a/src/target/r1cs/spartan/utils.rs b/src/target/r1cs/spartan/utils.rs new file mode 100644 index 000000000..dfa95799d --- /dev/null +++ b/src/target/r1cs/spartan/utils.rs @@ -0,0 +1,117 @@ +//! Utils function for Spartan +use crate::target::r1cs::*; +use bincode::{deserialize_from, serialize_into}; +use std::fs::File; +use std::io; +use std::io::{BufReader, BufWriter}; +use std::path::Path; +use super::spartan; +use super::spartan_rand; +use crate::target::r1cs::proof::serialize_into_file; + +/// Enum for proof curve type +#[derive(Debug, Clone, Copy)] +pub enum PfCurve { + /// Curve T256 + T256, + /// Curve25519 + Curve25519, + /// Curve T25519 + T25519, +} + +/// Hold Spartan variables +#[derive(Debug)] +pub struct Variable { + /// sid + pub sid: usize, + /// value + pub value: [u8; 32], +} + +/// write prover and verifier data to file +pub fn write_data, P2: AsRef>( + p_path: P1, + v_path: P2, + pp_path: P1, + p_data: &ProverData, + v_data: &VerifierData, +) -> io::Result<()> { + write_prover_data(p_path, p_data)?; + spartan::precompute(pp_path, p_data)?; + write_verifier_data(v_path, v_data)?; + Ok(()) +} + +#[cfg(feature = "spartan")] +/// write prover and verifier data to file +pub fn write_data_spartan, P2: AsRef>( + p_path: P1, + v_path: P2, + pp_path: P1, + p_data: &ProverData, + v_data: &VerifierData, +) -> io::Result<()> { + write_prover_data("P_long", p_data)?; // needed by our hybrid circuit for benchmarking; to delete + let simpl_p_data = ProverDataSpartan::from_prover_data(p_data); + write_simpl_prover_data(p_path, &simpl_p_data)?; + spartan::precompute(pp_path, p_data)?; + write_verifier_data(v_path, v_data)?; + Ok(()) +} + +#[cfg(feature = "spartan")] +/// write prover and verifier data to file +pub fn write_data_spartan_rand, P2: AsRef>( // to do + p_path: P1, + v_path: P2, + pp_path: P1, + p_data: &ProverData, + v_data: &VerifierData, +) -> io::Result<()> { + write_prover_data("P_long", p_data)?; // needed by our hybrid circuit for benchmarking; to delete + let simpl_p_data = ProverDataSpartanRand::from_prover_data(p_data); + serialize_into_file(&simpl_p_data, p_path)?; + spartan_rand::precompute(pp_path, p_data, &simpl_p_data)?; + write_verifier_data(v_path, v_data)?; + Ok(()) +} + +fn write_prover_data>(path: P, data: &ProverData) -> io::Result<()> { + let mut file = BufWriter::new(File::create(path)?); + serialize_into(&mut file, &data).unwrap(); + Ok(()) +} + +/// read prover data +pub fn read_prover_data>(path: P) -> io::Result { + let mut file = BufReader::new(File::open(path)?); + let data: ProverData = deserialize_from(&mut file).unwrap(); + Ok(data) +} + +fn write_simpl_prover_data>(path: P, data: &ProverDataSpartan) -> io::Result<()> { + let mut file = BufWriter::new(File::create(path)?); + serialize_into(&mut file, &data).unwrap(); + Ok(()) +} + +/// read simplified prover data +pub fn read_simpl_prover_data>(path: P) -> io::Result { + let mut file = BufReader::new(File::open(path)?); + let data: ProverDataSpartan = deserialize_from(&mut file).unwrap(); + Ok(data) +} + +fn write_verifier_data>(path: P, data: &VerifierData) -> io::Result<()> { + let mut file = BufWriter::new(File::create(path)?); + serialize_into(&mut file, &data).unwrap(); + Ok(()) +} + +/// read verifier data +pub fn read_verifier_data>(path: P) -> io::Result { + let mut file = BufReader::new(File::open(path)?); + let data: VerifierData = deserialize_from(&mut file).unwrap(); + Ok(data) +} \ No newline at end of file diff --git a/src/target/r1cs/wit_comp.rs b/src/target/r1cs/wit_comp.rs index 5e57b462c..30e17be5f 100644 --- a/src/target/r1cs/wit_comp.rs +++ b/src/target/r1cs/wit_comp.rs @@ -14,7 +14,7 @@ use std::time::Duration; /// In each stage: /// * it takes a partial assignment /// * it returns a vector of field values -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct StagedWitComp { vars: HashSet, stages: Vec, @@ -28,7 +28,7 @@ pub struct StagedWitComp { } /// Specifies a stage. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct Stage { inputs: HashMap, num_outputs: usize, @@ -90,6 +90,14 @@ impl StagedWitComp { pub fn num_step_args(&self) -> usize { self.step_args.len() } + + /// Type-check the witness computation (for debugging) + #[cfg(debug_assertions)] + pub fn type_check(&self) { + // Basic sanity check - verify stages and outputs are consistent + let total_outputs: usize = self.stages.iter().map(|s| s.num_outputs).sum(); + assert!(total_outputs <= self.ouput_steps.len() || self.ouput_steps.is_empty()); + } } /// Evaluator interface diff --git a/src/util/mod.rs b/src/util/mod.rs index 4a74968da..74125e622 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -3,6 +3,7 @@ pub mod hc; pub mod ns; pub mod once; +pub mod timer; #[cfg(test)] mod hash_test { diff --git a/src/util/timer.rs b/src/util/timer.rs new file mode 100644 index 000000000..ca211832f --- /dev/null +++ b/src/util/timer.rs @@ -0,0 +1,11 @@ +//! Timer for benchmark +use std::time::Duration; + +/// Print the time +pub fn print_time(message: &str, duration: Duration, print_msg: bool) { + if print_msg { + println!("{}: {:?}", message, duration); + } else { + println!("{:?}", duration); + } +} diff --git a/third_party/Dorian/.cargo/config b/third_party/Dorian/.cargo/config new file mode 100644 index 000000000..3a420e914 --- /dev/null +++ b/third_party/Dorian/.cargo/config @@ -0,0 +1,4 @@ +[build] +rustflags = [ + "-C", "target-cpu=native", +] \ No newline at end of file diff --git a/third_party/Dorian/.github/workflows/rust.yml b/third_party/Dorian/.github/workflows/rust.yml new file mode 100644 index 000000000..abda4fd62 --- /dev/null +++ b/third_party/Dorian/.github/workflows/rust.yml @@ -0,0 +1,66 @@ +name: Build and Test Spartan + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build_nightly: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Install + run: rustup default nightly + - name: Install rustfmt Components + run: rustup component add rustfmt + - name: Install clippy + run: rustup component add clippy + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --verbose + - name: Build examples + run: cargo build --examples --verbose + - name: Check Rustfmt Code Style + run: cargo fmt --all -- --check + - name: Check clippy warnings + run: cargo clippy --all-targets --all-features -- -D warnings + + build_nightly_wasm: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Install + run: rustup default nightly + + - name: Build without std + run: cargo build --no-default-features --verbose + + - name: Run tests without std + run: cargo test --no-default-features --verbose + + - name: Build examples without std + run: cargo build --examples --no-default-features --verbose + + - name: Install wasm32-wasi target + run: rustup target add wasm32-wasi + + - name: Install wasm32-unknown-unknown target + run: rustup target add wasm32-unknown-unknown + + - name: Build for target wasm-wasi + run: RUSTFLAGS="" cargo build --target=wasm32-wasi --no-default-features --verbose + + - name: Patch Cargo.toml for wasm-bindgen + run: | + echo "[dependencies.getrandom]" >> Cargo.toml + echo "version = \"0.1\"" >> Cargo.toml + echo "default-features = false" >> Cargo.toml + echo "features = [\"wasm-bindgen\"]" >> Cargo.toml + + - name: Build for target wasm32-unknown-unknown + run: RUSTFLAGS="" cargo build --target=wasm32-unknown-unknown --no-default-features --verbose + diff --git a/third_party/Dorian/.gitignore b/third_party/Dorian/.gitignore new file mode 100644 index 000000000..b72b4510c --- /dev/null +++ b/third_party/Dorian/.gitignore @@ -0,0 +1,12 @@ +# Generated by Cargo +# will have compiled files and executables +/target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +*.txt diff --git a/third_party/Dorian/CODE_OF_CONDUCT.md b/third_party/Dorian/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c72a5749c --- /dev/null +++ b/third_party/Dorian/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/third_party/Dorian/CONTRIBUTING.md b/third_party/Dorian/CONTRIBUTING.md new file mode 100644 index 000000000..981b4e147 --- /dev/null +++ b/third_party/Dorian/CONTRIBUTING.md @@ -0,0 +1,12 @@ +This project welcomes contributions and suggestions. Most contributions require you to +agree to a Contributor License Agreement (CLA) declaring that you have the right to, +and actually do, grant us the rights to use your contribution. For details, visit +https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need +to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the +instructions provided by the bot. You will only need to do this once across all repositories using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. \ No newline at end of file diff --git a/third_party/Dorian/Cargo.toml b/third_party/Dorian/Cargo.toml new file mode 100644 index 000000000..01bfcbe1b --- /dev/null +++ b/third_party/Dorian/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "dorian" +version = "0.8.0" +authors = ["Srinath Setty "] +edition = "2021" +description = "High-speed zkSNARKs without trusted setup" +documentation = "https://docs.rs/spartan/" +readme = "README.md" +repository = "https://github.com/microsoft/Spartan" +license-file = "LICENSE" +keywords = ["zkSNARKs", "cryptography", "proofs"] + +[dependencies] +curve25519-dalek = { version = "3.2.0", features = [ + "serde", + "u64_backend", + "alloc", +], default-features = false } +merlin = { version = "3.0.0", default-features = false } +rand = { version = "0.7.3", features = ["getrandom"], default-features = false } +digest = { version = "0.8.1", default-features = false } +sha3 = { version = "0.8.2", default-features = false } +byteorder = { version = "1.3.4", default-features = false } +rayon = { version = "1.3.0", optional = true } +serde = { version = "1.0.106", features = ["derive"], default-features = false } +bincode = { version = "1.3.3", default-features = false } +subtle = { version = "2.4", features = ["i128"], default-features = false } +zeroize = { version = "1.5", default-features = false } +itertools = { version = "0.10.0", default-features = false } +colored = { version = "2.0.0", default-features = false, optional = true } +flate2 = { version = "1.0.14" } + +[dev-dependencies] +criterion = "0.3.1" + +[lib] +name = "libdorian" +path = "src/lib.rs" + +[[bin]] +name = "snark" +path = "profiler/snark.rs" +required-features = ["std"] + +[[bin]] +name = "nizk" +path = "profiler/nizk.rs" +required-features = ["std"] + +[[bench]] +name = "snark" +harness = false +required-features = ["std"] + +[[bench]] +name = "nizk" +harness = false +required-features = ["std"] + +[features] +default = ["std"] #, "simd_backend"] +std = [ + "curve25519-dalek/std", + "digest/std", + "merlin/std", + "rand/std", + "sha3/std", + "byteorder/std", + "serde/std", + "subtle/std", + "zeroize/std", + "itertools/use_std", + "flate2/rust_backend", +] +simd_backend = ["curve25519-dalek/simd_backend"] +multicore = ["rayon"] +profile = ["colored"] +bench = [] diff --git a/third_party/Dorian/LICENSE b/third_party/Dorian/LICENSE new file mode 100644 index 000000000..3d8b93bc7 --- /dev/null +++ b/third_party/Dorian/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/third_party/Dorian/NOTICE.md b/third_party/Dorian/NOTICE.md new file mode 100644 index 000000000..3eb646796 --- /dev/null +++ b/third_party/Dorian/NOTICE.md @@ -0,0 +1,73 @@ +This repository includes the following third-party open-source code. + +* The code in `src/scalar/ristretto255.rs` is derived from [bls12-381](https://github.com/zkcrypto/bls12_381). +Specifically, from [src/bls12_381/scalar.rs](https://github.com/zkcrypto/bls12_381/blob/master/src/scalar.rs) and [src/bls12_381/util.rs](https://github.com/zkcrypto/bls12_381/blob/master/src/util.rs), which has the following copyright and license. + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + +* The `invert` and `batch_invert` methods in src/scalar/ristretto255.rs is from [curve25519-dalek](https://github.com/dalek-cryptography/curve25519-dalek), which has the following license. + + Copyright (c) 2016-2019 Isis Agora Lovecruft, Henry de Valence. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ======================================================================== + + Portions of curve25519-dalek were originally derived from Adam Langley's Go ed25519 implementation, found at , under the following licence: + + ======================================================================== + + Copyright (c) 2012 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +* The module `src/nizk/bullet.rs` is derived from [bulletproofs](https://github.com/dalek-cryptography/bulletproofs/), which has the following license: + + MIT License + + Copyright (c) 2018 Chain, Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/third_party/Dorian/README.md b/third_party/Dorian/README.md new file mode 100644 index 000000000..14ba60213 --- /dev/null +++ b/third_party/Dorian/README.md @@ -0,0 +1,429 @@ +# Spartan: High-speed zkSNARKs without trusted setup + +![Rust](https://github.com/microsoft/Spartan/workflows/Rust/badge.svg) +[![](https://img.shields.io/crates/v/spartan.svg)](<(https://crates.io/crates/spartan)>) + +Spartan is a high-speed zero-knowledge proof system, a cryptographic primitive that enables a prover to prove a mathematical statement to a verifier without revealing anything besides the validity of the statement. This repository provides `libdorian,` a Rust library that implements a zero-knowledge succinct non-interactive argument of knowledge (zkSNARK), which is a type of zero-knowledge proof system with short proofs and fast verification times. The details of the Spartan proof system are described in our [paper](https://eprint.iacr.org/2019/550) published at [CRYPTO 2020](https://crypto.iacr.org/2020/). The security of the Spartan variant implemented in this library is based on the discrete logarithm problem in the random oracle model. + +A simple example application is proving the knowledge of a secret s such that H(s) == d for a public d, where H is a cryptographic hash function (e.g., SHA-256, Keccak). A more complex application is a database-backed cloud service that produces proofs of correct state machine transitions for auditability. See this [paper](https://eprint.iacr.org/2020/758.pdf) for an overview and this [paper](https://eprint.iacr.org/2018/907.pdf) for details. + +Note that this library has _not_ received a security review or audit. + +## Highlights + +We now highlight Spartan's distinctive features. + +- **No "toxic" waste:** Spartan is a _transparent_ zkSNARK and does not require a trusted setup. So, it does not involve any trapdoors that must be kept secret or require a multi-party ceremony to produce public parameters. + +- **General-purpose:** Spartan produces proofs for arbitrary NP statements. `libdorian` supports NP statements expressed as rank-1 constraint satisfiability (R1CS) instances, a popular language for which there exists efficient transformations and compiler toolchains from high-level programs of interest. + +- **Sub-linear verification costs:** Spartan is the first transparent proof system with sub-linear verification costs for arbitrary NP statements (e.g., R1CS). + +- **Standardized security:** Spartan's security relies on the hardness of computing discrete logarithms (a standard cryptographic assumption) in the random oracle model. `libdorian` uses `ristretto255`, a prime-order group abstraction atop `curve25519` (a high-speed elliptic curve). We use [`curve25519-dalek`](https://docs.rs/curve25519-dalek) for arithmetic over `ristretto255`. + +- **State-of-the-art performance:** + Among transparent SNARKs, Spartan offers the fastest prover with speedups of 36–152× depending on the baseline, produces proofs that are shorter by 1.2–416×, and incurs the lowest verification times with speedups of 3.6–1326×. The only exception is proof sizes under Bulletproofs, but Bulletproofs incurs slower verification both asymptotically and concretely. When compared to the state-of-the-art zkSNARK with trusted setup, Spartan’s prover is 2× faster for arbitrary R1CS instances and 16× faster for data-parallel workloads. + +### Implementation details + +`libdorian` uses [`merlin`](https://docs.rs/merlin/) to automate the Fiat-Shamir transform. We also introduce a new type called `RandomTape` that extends a `Transcript` in `merlin` to allow the prover's internal methods to produce private randomness using its private transcript without having to create `OsRng` objects throughout the code. An object of type `RandomTape` is initialized with a new random seed from `OsRng` for each proof produced by the library. + +## Examples + +To import `libdorian` into your Rust project, add the following dependency to `Cargo.toml`: + +```text +spartan = "0.8.0" +``` + +The following example shows how to use `libdorian` to create and verify a SNARK proof. +Some of our public APIs' style is inspired by the underlying crates we use. + +```rust +extern crate libdorian; +extern crate merlin; +use libdorian::{Instance, SNARKGens, SNARK}; +use merlin::Transcript; +fn main() { + // specify the size of an R1CS instance + let num_vars = 1024; + let num_cons = 1024; + let num_inputs = 10; + let num_non_zero_entries = 1024; + + // produce public parameters + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); + + // ask the library to produce a synthentic R1CS instance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // create a commitment to the R1CS instance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"snark_example"); + let proof = SNARK::prove(&inst, &comm, &decomm, vars, &inputs, &gens, &mut prover_transcript); + + // verify the proof of satisfiability + let mut verifier_transcript = Transcript::new(b"snark_example"); + assert!(proof + .verify(&comm, &inputs, &mut verifier_transcript, &gens) + .is_ok()); + println!("proof verification successful!"); +} +``` + +Here is another example to use the NIZK variant of the Spartan proof system: + +```rust +extern crate libdorian; +extern crate merlin; +use libdorian::{Instance, NIZKGens, NIZK}; +use merlin::Transcript; +fn main() { + // specify the size of an R1CS instance + let num_vars = 1024; + let num_cons = 1024; + let num_inputs = 10; + + // produce public parameters + let gens = NIZKGens::new(num_cons, num_vars, num_inputs); + + // ask the library to produce a synthentic R1CS instance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"nizk_example"); + let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); + + // verify the proof of satisfiability + let mut verifier_transcript = Transcript::new(b"nizk_example"); + assert!(proof + .verify(&inst, &inputs, &mut verifier_transcript, &gens) + .is_ok()); + println!("proof verification successful!"); +} +``` + +Finally, we provide an example that specifies a custom R1CS instance instead of using a synthetic instance + +```rust +#![allow(non_snake_case)] +extern crate curve25519_dalek; +extern crate libdorian; +extern crate merlin; +use curve25519_dalek::scalar::Scalar; +use libdorian::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; +use merlin::Transcript; +use rand::rngs::OsRng; + +fn main() { + // produce a tiny instance + let ( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + inst, + assignment_vars, + assignment_inputs, + ) = produce_tiny_r1cs(); + + // produce public parameters + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); + + // create a commitment to the R1CS instance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"snark_example"); + let proof = SNARK::prove( + &inst, + &comm, + &decomm, + assignment_vars, + &assignment_inputs, + &gens, + &mut prover_transcript, + ); + + // verify the proof of satisfiability + let mut verifier_transcript = Transcript::new(b"snark_example"); + assert!(proof + .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) + .is_ok()); + println!("proof verification successful!"); +} + +fn produce_tiny_r1cs() -> ( + usize, + usize, + usize, + usize, + Instance, + VarsAssignment, + InputsAssignment, +) { + // We will use the following example, but one could construct any R1CS instance. + // Our R1CS instance is three constraints over five variables and two public inputs + // (Z0 + Z1) * I0 - Z2 = 0 + // (Z0 + I1) * Z2 - Z3 = 0 + // Z4 * 1 - 0 = 0 + + // parameters of the R1CS instance rounded to the nearest power of two + let num_cons = 4; + let num_vars = 5; + let num_inputs = 2; + let num_non_zero_entries = 5; + + // We will encode the above constraints into three matrices, where + // the coefficients in the matrix are in the little-endian byte order + let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + // The constraint system is defined over a finite field, which in our case is + // the scalar field of ristreeto255/curve25519 i.e., p = 2^{252}+27742317777372353535851937790883648493 + // To construct these matrices, we will use `curve25519-dalek` but one can use any other method. + + // a variable that holds a byte representation of 1 + let one = Scalar::one().to_bytes(); + + // R1CS is a set of three sparse matrices A B C, where is a row for every + // constraint and a column for every entry in z = (vars, 1, inputs) + // An R1CS instance is satisfiable iff: + // Az \circ Bz = Cz, where z = (vars, 1, inputs) + + // constraint 0 entries in (A,B,C) + // constraint 0 is (Z0 + Z1) * I0 - Z2 = 0. + // We set 1 in matrix A for columns that correspond to Z0 and Z1 + // We set 1 in matrix B for column that corresponds to I0 + // We set 1 in matrix C for column that corresponds to Z2 + A.push((0, 0, one)); + A.push((0, 1, one)); + B.push((0, num_vars + 1, one)); + C.push((0, 2, one)); + + // constraint 1 entries in (A,B,C) + A.push((1, 0, one)); + A.push((1, num_vars + 2, one)); + B.push((1, 2, one)); + C.push((1, 3, one)); + + // constraint 3 entries in (A,B,C) + A.push((2, 4, one)); + B.push((2, num_vars, one)); + + let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); + + // compute a satisfying assignment + let mut csprng: OsRng = OsRng; + let i0 = Scalar::random(&mut csprng); + let i1 = Scalar::random(&mut csprng); + let z0 = Scalar::random(&mut csprng); + let z1 = Scalar::random(&mut csprng); + let z2 = (z0 + z1) * i0; // constraint 0 + let z3 = (z0 + i1) * z2; // constraint 1 + let z4 = Scalar::zero(); //constraint 2 + + // create a VarsAssignment + let mut vars = vec![Scalar::zero().to_bytes(); num_vars]; + vars[0] = z0.to_bytes(); + vars[1] = z1.to_bytes(); + vars[2] = z2.to_bytes(); + vars[3] = z3.to_bytes(); + vars[4] = z4.to_bytes(); + let assignment_vars = VarsAssignment::new(&vars).unwrap(); + + // create an InputsAssignment + let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs]; + inputs[0] = i0.to_bytes(); + inputs[1] = i1.to_bytes(); + let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assignment_vars, &assignment_inputs); + assert_eq!(res.unwrap(), true); + + ( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + inst, + assignment_vars, + assignment_inputs, + ) + } +``` + +For more examples, see [`examples/`](examples) directory in this repo. + +## Building `libdorian` + +Install [`rustup`](https://rustup.rs/) + +Switch to nightly Rust using `rustup`: + +```text +rustup default nightly +``` + +Clone the repository: + +```text +git clone https://github.com/Microsoft/Spartan +cd Spartan +``` + +To build docs for public APIs of `libdorian`: + +```text +cargo doc +``` + +To run tests: + +```text +RUSTFLAGS="-C target_cpu=native" cargo test +``` + +To build `libdorian`: + +```text +RUSTFLAGS="-C target_cpu=native" cargo build --release +``` + +> NOTE: We enable SIMD instructions in `curve25519-dalek` by default, so if it fails to build remove the "simd_backend" feature argument in `Cargo.toml`. + +### Supported features + +- `std`: enables std features (enabled by default) +- `simd_backend`: enables `curve25519-dalek`'s simd feature (enabled by default) +- `profile`: enables fine-grained profiling information (see below for its use) + +### WASM Support + +`libdorian` depends upon `rand::OsRng` (internally uses `getrandom` crate), it has out of box support for `wasm32-wasi`. + +For the target `wasm32-unknown-unknown` disable default features for spartan +and add direct dependency on `getrandom` with `wasm-bindgen` feature enabled. + +```toml +[dependencies] +spartan = { version = "0.7", default-features = false } +# since spartan uses getrandom(rand's OsRng), we need to enable 'wasm-bindgen' +# feature to make it feed rand seed from js/nodejs env +# https://docs.rs/getrandom/0.1.16/getrandom/index.html#support-for-webassembly-and-asmjs +getrandom = { version = "0.1", features = ["wasm-bindgen"] } +``` + +## Performance + +### End-to-end benchmarks + +`libdorian` includes two benches: `benches/nizk.rs` and `benches/snark.rs`. If you report the performance of Spartan in a research paper, we recommend using these benches for higher accuracy instead of fine-grained profiling (listed below). + +To run end-to-end benchmarks: + +```text +RUSTFLAGS="-C target_cpu=native" cargo bench +``` + +### Fine-grained profiling + +Build `libdorian` with `profile` feature enabled. It creates two profilers: `./target/release/snark` and `./target/release/nizk`. + +These profilers report performance as depicted below (for varying R1CS instance sizes). The reported +performance is from running the profilers on a Microsoft Surface Laptop 3 on a single CPU core of Intel Core i7-1065G7 running Ubuntu 20.04 (atop WSL2 on Windows 10). +See Section 9 in our [paper](https://eprint.iacr.org/2019/550) to see how this compares with other zkSNARKs in the literature. + +```text +$ ./target/release/snark +Profiler:: SNARK + * number_of_constraints 1048576 + * number_of_variables 1048576 + * number_of_inputs 10 + * number_non-zero_entries_A 1048576 + * number_non-zero_entries_B 1048576 + * number_non-zero_entries_C 1048576 + * SNARK::encode + * SNARK::encode 14.2644201s + * SNARK::prove + * R1CSProof::prove + * polycommit + * polycommit 2.7175848s + * prove_sc_phase_one + * prove_sc_phase_one 683.7481ms + * prove_sc_phase_two + * prove_sc_phase_two 846.1056ms + * polyeval + * polyeval 193.4216ms + * R1CSProof::prove 4.4416193s + * len_r1cs_sat_proof 47024 + * eval_sparse_polys + * eval_sparse_polys 377.357ms + * R1CSEvalProof::prove + * commit_nondet_witness + * commit_nondet_witness 14.4507331s + * build_layered_network + * build_layered_network 3.4360521s + * evalproof_layered_network + * len_product_layer_proof 64712 + * evalproof_layered_network 15.5708066s + * R1CSEvalProof::prove 34.2930559s + * len_r1cs_eval_proof 133720 + * SNARK::prove 39.1297568s + * SNARK::proof_compressed_len 141768 + * SNARK::verify + * verify_sat_proof + * verify_sat_proof 20.0828ms + * verify_eval_proof + * verify_polyeval_proof + * verify_prod_proof + * verify_prod_proof 1.1847ms + * verify_hash_proof + * verify_hash_proof 81.06ms + * verify_polyeval_proof 82.3583ms + * verify_eval_proof 82.8937ms + * SNARK::verify 103.0536ms +``` + +```text +$ ./target/release/nizk +Profiler:: NIZK + * number_of_constraints 1048576 + * number_of_variables 1048576 + * number_of_inputs 10 + * number_non-zero_entries_A 1048576 + * number_non-zero_entries_B 1048576 + * number_non-zero_entries_C 1048576 + * NIZK::prove + * R1CSProof::prove + * polycommit + * polycommit 2.7220635s + * prove_sc_phase_one + * prove_sc_phase_one 722.5487ms + * prove_sc_phase_two + * prove_sc_phase_two 862.6796ms + * polyeval + * polyeval 190.2233ms + * R1CSProof::prove 4.4982305s + * len_r1cs_sat_proof 47024 + * NIZK::prove 4.5139888s + * NIZK::proof_compressed_len 48134 + * NIZK::verify + * eval_sparse_polys + * eval_sparse_polys 395.0847ms + * verify_sat_proof + * verify_sat_proof 19.286ms + * NIZK::verify 414.5102ms +``` + +## LICENSE + +See [LICENSE](./LICENSE) + +## Contributing + +See [CONTRIBUTING](./CONTRIBUTING.md) diff --git a/third_party/Dorian/SECURITY.md b/third_party/Dorian/SECURITY.md new file mode 100644 index 000000000..7ab49eb82 --- /dev/null +++ b/third_party/Dorian/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + diff --git a/third_party/Dorian/benches/nizk.rs b/third_party/Dorian/benches/nizk.rs new file mode 100644 index 000000000..cfb8ace33 --- /dev/null +++ b/third_party/Dorian/benches/nizk.rs @@ -0,0 +1,92 @@ +#![allow(clippy::assertions_on_result_states)] +extern crate byteorder; +extern crate core; +extern crate criterion; +extern crate digest; +extern crate libdorian; +extern crate merlin; +extern crate rand; +extern crate sha3; + +use libdorian::{Instance, NIZKGens, NIZK}; +use merlin::Transcript; + +use criterion::*; + +fn nizk_prove_benchmark(c: &mut Criterion) { + for &s in [10, 12, 16].iter() { + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group("NIZK_prove_benchmark"); + group.plot_config(plot_config); + + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + let gens = NIZKGens::new(num_cons, num_vars, num_inputs); + + let name = format!("NIZK_prove_{num_vars}"); + group.bench_function(&name, move |b| { + b.iter(|| { + let mut prover_transcript = Transcript::new(b"example"); + NIZK::prove( + black_box(&inst), + black_box(vars.clone()), + black_box(&inputs), + black_box(&gens), + black_box(&mut prover_transcript), + ); + }); + }); + group.finish(); + } +} + +fn nizk_verify_benchmark(c: &mut Criterion) { + for &s in [10, 12, 16].iter() { + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group("NIZK_verify_benchmark"); + group.plot_config(plot_config); + + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + let gens = NIZKGens::new(num_cons, num_vars, num_inputs); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"example"); + let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); + + let name = format!("NIZK_verify_{num_cons}"); + group.bench_function(&name, move |b| { + b.iter(|| { + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify( + black_box(&inst), + black_box(&inputs), + black_box(&mut verifier_transcript), + black_box(&gens) + ) + .is_ok()); + }); + }); + group.finish(); + } +} + +fn set_duration() -> Criterion { + Criterion::default().sample_size(10) +} + +criterion_group! { +name = benches_nizk; +config = set_duration(); +targets = nizk_prove_benchmark, nizk_verify_benchmark +} + +criterion_main!(benches_nizk); diff --git a/third_party/Dorian/benches/snark.rs b/third_party/Dorian/benches/snark.rs new file mode 100644 index 000000000..cba60f71f --- /dev/null +++ b/third_party/Dorian/benches/snark.rs @@ -0,0 +1,131 @@ +#![allow(clippy::assertions_on_result_states)] +extern crate libdorian; +extern crate merlin; + +use libdorian::{Instance, SNARKGens, SNARK}; +use merlin::Transcript; + +use criterion::*; + +fn snark_encode_benchmark(c: &mut Criterion) { + for &s in [10, 12, 16].iter() { + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group("SNARK_encode_benchmark"); + group.plot_config(plot_config); + + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + let (inst, _vars, _inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce public parameters + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); + + // produce a commitment to R1CS instance + let name = format!("SNARK_encode_{num_cons}"); + group.bench_function(&name, move |b| { + b.iter(|| { + SNARK::encode(black_box(&inst), black_box(&gens)); + }); + }); + group.finish(); + } +} + +fn snark_prove_benchmark(c: &mut Criterion) { + for &s in [10, 12, 16].iter() { + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group("SNARK_prove_benchmark"); + group.plot_config(plot_config); + + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce public parameters + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); + + // produce a commitment to R1CS instance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof + let name = format!("SNARK_prove_{num_cons}"); + group.bench_function(&name, move |b| { + b.iter(|| { + let mut prover_transcript = Transcript::new(b"example"); + SNARK::prove( + black_box(&inst), + black_box(&comm), + black_box(&decomm), + black_box(vars.clone()), + black_box(&inputs), + black_box(&gens), + black_box(&mut prover_transcript), + ); + }); + }); + group.finish(); + } +} + +fn snark_verify_benchmark(c: &mut Criterion) { + for &s in [10, 12, 16].iter() { + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group("SNARK_verify_benchmark"); + group.plot_config(plot_config); + + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce public parameters + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); + + // produce a commitment to R1CS instance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"example"); + let proof = SNARK::prove( + &inst, + &comm, + &decomm, + vars, + &inputs, + &gens, + &mut prover_transcript, + ); + + // verify the proof + let name = format!("SNARK_verify_{num_cons}"); + group.bench_function(&name, move |b| { + b.iter(|| { + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify( + black_box(&comm), + black_box(&inputs), + black_box(&mut verifier_transcript), + black_box(&gens) + ) + .is_ok()); + }); + }); + group.finish(); + } +} + +fn set_duration() -> Criterion { + Criterion::default().sample_size(10) +} + +criterion_group! { +name = benches_snark; +config = set_duration(); +targets = snark_encode_benchmark, snark_prove_benchmark, snark_verify_benchmark +} + +criterion_main!(benches_snark); diff --git a/third_party/Dorian/examples/cubic.rs b/third_party/Dorian/examples/cubic.rs new file mode 100644 index 000000000..e0d1d40b9 --- /dev/null +++ b/third_party/Dorian/examples/cubic.rs @@ -0,0 +1,146 @@ +//! Demonstrates how to produces a proof for canonical cubic equation: `x^3 + x + 5 = y`. +//! The example is described in detail [here]. +//! +//! The R1CS for this problem consists of the following 4 constraints: +//! `Z0 * Z0 - Z1 = 0` +//! `Z1 * Z0 - Z2 = 0` +//! `(Z2 + Z0) * 1 - Z3 = 0` +//! `(Z3 + 5) * 1 - I0 = 0` +//! +//! [here]: https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649 +#![allow(clippy::assertions_on_result_states)] +use curve25519_dalek::scalar::Scalar; +use libdorian::{InputsAssignment, Instance, SNARKGens, VarsAssignment, SNARK}; +use merlin::Transcript; +use rand::rngs::OsRng; + +#[allow(non_snake_case)] +fn produce_r1cs() -> ( + usize, + usize, + usize, + usize, + Instance, + VarsAssignment, + InputsAssignment, +) { + // parameters of the R1CS instance + let num_cons = 4; + let num_vars = 4; + let num_inputs = 1; + let num_non_zero_entries = 8; + + // We will encode the above constraints into three matrices, where + // the coefficients in the matrix are in the little-endian byte order + let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + let one = Scalar::one().to_bytes(); + + // R1CS is a set of three sparse matrices A B C, where is a row for every + // constraint and a column for every entry in z = (vars, 1, inputs) + // An R1CS instance is satisfiable iff: + // Az \circ Bz = Cz, where z = (vars, 1, inputs) + + // constraint 0 entries in (A,B,C) + // constraint 0 is Z0 * Z0 - Z1 = 0. + A.push((0, 0, one)); + B.push((0, 0, one)); + C.push((0, 1, one)); + + // constraint 1 entries in (A,B,C) + // constraint 1 is Z1 * Z0 - Z2 = 0. + A.push((1, 1, one)); + B.push((1, 0, one)); + C.push((1, 2, one)); + + // constraint 2 entries in (A,B,C) + // constraint 2 is (Z2 + Z0) * 1 - Z3 = 0. + A.push((2, 2, one)); + A.push((2, 0, one)); + B.push((2, num_vars, one)); + C.push((2, 3, one)); + + // constraint 3 entries in (A,B,C) + // constraint 3 is (Z3 + 5) * 1 - I0 = 0. + A.push((3, 3, one)); + A.push((3, num_vars, Scalar::from(5u32).to_bytes())); + B.push((3, num_vars, one)); + C.push((3, num_vars + 1, one)); + + let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); + + // compute a satisfying assignment + let mut csprng: OsRng = OsRng; + let z0 = Scalar::random(&mut csprng); + let z1 = z0 * z0; // constraint 0 + let z2 = z1 * z0; // constraint 1 + let z3 = z2 + z0; // constraint 2 + let i0 = z3 + Scalar::from(5u32); // constraint 3 + + // create a VarsAssignment + let mut vars = vec![Scalar::zero().to_bytes(); num_vars]; + vars[0] = z0.to_bytes(); + vars[1] = z1.to_bytes(); + vars[2] = z2.to_bytes(); + vars[3] = z3.to_bytes(); + let assignment_vars = VarsAssignment::new(&vars).unwrap(); + + // create an InputsAssignment + let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs]; + inputs[0] = i0.to_bytes(); + let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); + + // check if the instance we created is satisfiable + let res = inst.is_sat(&assignment_vars, &assignment_inputs); + assert!(res.unwrap(), "should be satisfied"); + + ( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + inst, + assignment_vars, + assignment_inputs, + ) +} + +fn main() { + // produce an R1CS instance + let ( + num_cons, + num_vars, + num_inputs, + num_non_zero_entries, + inst, + assignment_vars, + assignment_inputs, + ) = produce_r1cs(); + + // produce public parameters + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); + + // create a commitment to the R1CS instance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"snark_example"); + let proof = SNARK::prove( + &inst, + &comm, + &decomm, + assignment_vars, + &assignment_inputs, + &gens, + &mut prover_transcript, + ); + + // verify the proof of satisfiability + let mut verifier_transcript = Transcript::new(b"snark_example"); + assert!(proof + .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) + .is_ok()); + println!("proof verification successful!"); +} diff --git a/third_party/Dorian/profiler/nizk.rs b/third_party/Dorian/profiler/nizk.rs new file mode 100644 index 000000000..706c082c0 --- /dev/null +++ b/third_party/Dorian/profiler/nizk.rs @@ -0,0 +1,52 @@ +#![allow(non_snake_case)] +#![allow(clippy::assertions_on_result_states)] + +extern crate flate2; +extern crate libdorian; +extern crate merlin; +extern crate rand; + +use flate2::{write::ZlibEncoder, Compression}; +use libdorian::{Instance, NIZKGens, NIZK}; +use merlin::Transcript; + +fn print(msg: &str) { + let star = "* "; + println!("{:indent$}{}{}", "", star, msg, indent = 2); +} + +pub fn main() { + // the list of number of variables (and constraints) in an R1CS instance + let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; + + println!("Profiler:: NIZK"); + for &s in inst_sizes.iter() { + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + + // produce a synthetic R1CSInstance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce public generators + let gens = NIZKGens::new(num_cons, num_vars, num_inputs); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"nizk_example"); + let proof = NIZK::prove(&inst, vars, &inputs, &gens, &mut prover_transcript); + + let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); + bincode::serialize_into(&mut encoder, &proof).unwrap(); + let proof_encoded = encoder.finish().unwrap(); + let msg_proof_len = format!("NIZK::proof_compressed_len {:?}", proof_encoded.len()); + print(&msg_proof_len); + + // verify the proof of satisfiability + let mut verifier_transcript = Transcript::new(b"nizk_example"); + assert!(proof + .verify(&inst, &inputs, &mut verifier_transcript, &gens) + .is_ok()); + + println!(); + } +} diff --git a/third_party/Dorian/profiler/snark.rs b/third_party/Dorian/profiler/snark.rs new file mode 100644 index 000000000..a0f31633a --- /dev/null +++ b/third_party/Dorian/profiler/snark.rs @@ -0,0 +1,62 @@ +#![allow(non_snake_case)] +#![allow(clippy::assertions_on_result_states)] + +extern crate flate2; +extern crate libdorian; +extern crate merlin; + +use flate2::{write::ZlibEncoder, Compression}; +use libdorian::{Instance, SNARKGens, SNARK}; +use merlin::Transcript; + +fn print(msg: &str) { + let star = "* "; + println!("{:indent$}{}{}", "", star, msg, indent = 2); +} + +pub fn main() { + // the list of number of variables (and constraints) in an R1CS instance + let inst_sizes = vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]; + + println!("Profiler:: SNARK"); + for &s in inst_sizes.iter() { + let num_vars = (2_usize).pow(s as u32); + let num_cons = num_vars; + let num_inputs = 10; + + // produce a synthetic R1CSInstance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // produce public generators + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); + + // create a commitment to R1CSInstance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof of satisfiability + let mut prover_transcript = Transcript::new(b"snark_example"); + let proof = SNARK::prove( + &inst, + &comm, + &decomm, + vars, + &inputs, + &gens, + &mut prover_transcript, + ); + + let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); + bincode::serialize_into(&mut encoder, &proof).unwrap(); + let proof_encoded = encoder.finish().unwrap(); + let msg_proof_len = format!("SNARK::proof_compressed_len {:?}", proof_encoded.len()); + print(&msg_proof_len); + + // verify the proof of satisfiability + let mut verifier_transcript = Transcript::new(b"snark_example"); + assert!(proof + .verify(&comm, &inputs, &mut verifier_transcript, &gens) + .is_ok()); + + println!(); + } +} diff --git a/third_party/Dorian/rustfmt.toml b/third_party/Dorian/rustfmt.toml new file mode 100644 index 000000000..7b20d96e1 --- /dev/null +++ b/third_party/Dorian/rustfmt.toml @@ -0,0 +1,4 @@ +edition = "2018" +tab_spaces = 2 +newline_style = "Unix" +use_try_shorthand = true diff --git a/third_party/Dorian/src/commitments.rs b/third_party/Dorian/src/commitments.rs new file mode 100644 index 000000000..902eb3165 --- /dev/null +++ b/third_party/Dorian/src/commitments.rs @@ -0,0 +1,94 @@ +use super::group::{GroupElement, VartimeMultiscalarMul, GROUP_BASEPOINT_COMPRESSED}; +use super::scalar::Scalar; +use digest::XofReader; +use digest::{ExtendableOutput, Input}; +use sha3::Shake256; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] // to test +pub struct MultiCommitGens { + pub n: usize, + pub G: Vec, + pub h: GroupElement, +} + +impl MultiCommitGens { + pub fn new(n: usize, label: &[u8]) -> Self { + let mut shake = Shake256::default(); + shake.input(label); + shake.input(GROUP_BASEPOINT_COMPRESSED.as_bytes()); + + let mut reader = shake.xof_result(); + let mut gens: Vec = Vec::new(); + let mut uniform_bytes = [0u8; 64]; + for _ in 0..n + 1 { + reader.read(&mut uniform_bytes); + gens.push(GroupElement::from_uniform_bytes(&uniform_bytes)); + } + + MultiCommitGens { + n, + G: gens[..n].to_vec(), + h: gens[n], + } + } + + pub fn clone(&self) -> MultiCommitGens { + MultiCommitGens { + n: self.n, + h: self.h, + G: self.G.clone(), + } + } + + pub fn scale(&self, s: &Scalar) -> MultiCommitGens { + MultiCommitGens { + n: self.n, + h: self.h, + G: (0..self.n).map(|i| s * self.G[i]).collect(), + } + } + + pub fn split_at(&self, mid: usize) -> (MultiCommitGens, MultiCommitGens) { + let (G1, G2) = self.G.split_at(mid); + + ( + MultiCommitGens { + n: G1.len(), + G: G1.to_vec(), + h: self.h, + }, + MultiCommitGens { + n: G2.len(), + G: G2.to_vec(), + h: self.h, + }, + ) + } +} + +pub trait Commitments { + fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement; +} + +impl Commitments for Scalar { + fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { + assert_eq!(gens_n.n, 1); + GroupElement::vartime_multiscalar_mul(&[*self, *blind], &[gens_n.G[0], gens_n.h]) + } +} + +impl Commitments for Vec { + fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { + assert_eq!(gens_n.n, self.len()); + GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h + } +} + +impl Commitments for [Scalar] { + fn commit(&self, blind: &Scalar, gens_n: &MultiCommitGens) -> GroupElement { + assert_eq!(gens_n.n, self.len()); + GroupElement::vartime_multiscalar_mul(self, &gens_n.G) + blind * gens_n.h + } +} diff --git a/third_party/Dorian/src/dense_mlpoly.rs b/third_party/Dorian/src/dense_mlpoly.rs new file mode 100644 index 000000000..c6ec9d9b4 --- /dev/null +++ b/third_party/Dorian/src/dense_mlpoly.rs @@ -0,0 +1,639 @@ +#![allow(clippy::too_many_arguments)] +use super::commitments::{Commitments, MultiCommitGens}; +use super::errors::ProofVerifyError; +use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; +use super::math::Math; +use super::nizk::{DotProductProofGens, DotProductProofLog}; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use core::ops::Index; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; +use crate::Timer; +#[cfg(feature = "multicore")] +use rayon::prelude::*; + +/// DensePolynomial +#[derive(Debug, Serialize, Deserialize)] +pub struct DensePolynomial { + num_vars: usize, // the number of variables in the multilinear polynomial + len: usize, + pub Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs +} + +#[derive(Serialize, Deserialize)] // to test +pub struct PolyCommitmentGens { + pub gens: DotProductProofGens, +} + +impl PolyCommitmentGens { + // the number of variables in the multilinear polynomial + pub fn new(num_vars: usize, label: &'static [u8]) -> PolyCommitmentGens { + let (_left, right) = EqPolynomial::compute_factored_lens(num_vars); + let gens = DotProductProofGens::new(right.pow2(), label); + PolyCommitmentGens { gens } + } +} + +#[derive(Clone)] +pub struct PolyCommitmentBlinds { + blinds: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct PolyCommitment { + C: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ConstPolyCommitment { + C: CompressedGroup, +} + +pub struct EqPolynomial { + r: Vec, +} + +impl EqPolynomial { + pub fn new(r: Vec) -> Self { + EqPolynomial { r } + } + + pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { + assert_eq!(self.r.len(), rx.len()); + (0..rx.len()) + .map(|i| self.r[i] * rx[i] + (Scalar::one() - self.r[i]) * (Scalar::one() - rx[i])) + .product() + } + + pub fn evals(&self) -> Vec { + let ell = self.r.len(); + + let mut evals: Vec = vec![Scalar::one(); ell.pow2()]; + let mut size = 1; + for j in 0..ell { + // in each iteration, we double the size of chis + size *= 2; + for i in (0..size).rev().step_by(2) { + // copy each element from the prior iteration twice + let scalar = evals[i / 2]; + evals[i] = scalar * self.r[j]; + evals[i - 1] = scalar - evals[i]; + } + } + evals + } + + pub fn compute_factored_lens(ell: usize) -> (usize, usize) { + (ell / 2, ell - ell / 2) + } + + pub fn compute_factored_evals(&self) -> (Vec, Vec) { + let ell = self.r.len(); + let (left_num_vars, _right_num_vars) = EqPolynomial::compute_factored_lens(ell); + + let L = EqPolynomial::new(self.r[..left_num_vars].to_vec()).evals(); + let R = EqPolynomial::new(self.r[left_num_vars..ell].to_vec()).evals(); + + (L, R) + } +} + +pub struct IdentityPolynomial { + size_point: usize, +} + +impl IdentityPolynomial { + pub fn new(size_point: usize) -> Self { + IdentityPolynomial { size_point } + } + + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + let len = r.len(); + assert_eq!(len, self.size_point); + (0..len) + .map(|i| Scalar::from((len - i - 1).pow2() as u64) * r[i]) + .sum() + } +} + +impl DensePolynomial { + #[allow(missing_docs)] + pub fn new(Z: Vec) -> Self { + DensePolynomial { + num_vars: Z.len().log_2(), + len: Z.len(), + Z, + } + } + + pub fn new_bool(len: usize, start: usize, end: usize) -> Self { + let mut Z: Vec = vec![Scalar::zero(); len]; + for i in start..end { + Z[i] = Scalar::one(); + } + DensePolynomial { + num_vars: len.log_2(), + len, + Z, + } + } + + #[allow(missing_docs)] + pub fn get_num_vars(&self) -> usize { + self.num_vars + } + + #[allow(missing_docs)] + pub fn len(&self) -> usize { + self.len + } + + #[allow(missing_docs)] + pub fn clone(&self) -> DensePolynomial { + DensePolynomial::new(self.Z[0..self.len].to_vec()) + } + + #[allow(missing_docs)] + pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) { + assert!(idx < self.len()); + ( + DensePolynomial::new(self.Z[..idx].to_vec()), + DensePolynomial::new(self.Z[idx..2 * idx].to_vec()), + ) + } + + #[cfg(feature = "multicore")] + fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment { + let L_size = blinds.len(); + let R_size = self.Z.len() / L_size; + assert_eq!(L_size * R_size, self.Z.len()); + let C = (0..L_size) + .into_par_iter() + .map(|i| { + self.Z[R_size * i..R_size * (i + 1)] + .commit(&blinds[i], gens) + .compress() + }) + .collect(); + PolyCommitment { C } + } + + #[cfg(not(feature = "multicore"))] + fn commit_inner(&self, blinds: &[Scalar], gens: &MultiCommitGens) -> PolyCommitment { + let L_size = blinds.len(); + let R_size = self.Z.len() / L_size; + assert_eq!(L_size * R_size, self.Z.len()); + let C = (0..L_size) + .map(|i| { + self.Z[R_size * i..R_size * (i + 1)] + .commit(&blinds[i], gens) + .compress() + }) + .collect(); + PolyCommitment { C } + } + + #[allow(missing_docs)] + pub fn commit( + &self, + gens: &PolyCommitmentGens, + random_tape: Option<&mut RandomTape>, + ) -> (PolyCommitment, PolyCommitmentBlinds) { + let n = self.Z.len(); + let ell = self.get_num_vars(); + assert_eq!(n, ell.pow2()); + + let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(ell); + let L_size = left_num_vars.pow2(); + let R_size = right_num_vars.pow2(); + assert_eq!(L_size * R_size, n); + + let blinds = if let Some(t) = random_tape { + PolyCommitmentBlinds { + blinds: t.random_vector(b"poly_blinds", L_size), + } + } else { + PolyCommitmentBlinds { + blinds: vec![Scalar::zero(); L_size], + } + }; + + (self.commit_inner(&blinds.blinds, &gens.gens.gens_n), blinds) + } + + #[allow(missing_docs)] + pub fn bound(&self, L: &[Scalar]) -> Vec { + let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(self.get_num_vars()); + let L_size = left_num_vars.pow2(); + let R_size = right_num_vars.pow2(); + (0..R_size) + .map(|i| (0..L_size).map(|j| L[j] * self.Z[j * R_size + i]).sum()) + .collect() + } + + #[allow(missing_docs)] + pub fn bound_poly_var_top(&mut self, r: &Scalar) { + let n = self.len() / 2; + for i in 0..n { + self.Z[i] = self.Z[i] + r * (self.Z[i + n] - self.Z[i]); + } + self.num_vars -= 1; + self.len = n; + } + + #[allow(missing_docs)] + pub fn bound_poly_var_bot(&mut self, r: &Scalar) { + let n = self.len() / 2; + for i in 0..n { + self.Z[i] = self.Z[2 * i] + r * (self.Z[2 * i + 1] - self.Z[2 * i]); + } + self.num_vars -= 1; + self.len = n; + } + + #[allow(missing_docs)] + // returns Z(r) in O(n) time + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + // r must have a value for each variable + assert_eq!(r.len(), self.get_num_vars()); + let chis = EqPolynomial::new(r.to_vec()).evals(); + assert_eq!(chis.len(), self.Z.len()); + DotProductProofLog::compute_dotproduct(&self.Z, &chis) + } + + #[allow(missing_docs)] + fn vec(&self) -> &Vec { + &self.Z + } + + #[allow(missing_docs)] + pub fn extend(&mut self, other: &DensePolynomial) { + // TODO: allow extension even when some vars are bound + assert_eq!(self.Z.len(), self.len); + let other_vec = other.vec(); + assert_eq!(other_vec.len(), self.len); + self.Z.extend(other_vec); + self.num_vars += 1; + self.len *= 2; + assert_eq!(self.Z.len(), self.len); + } + + pub fn extend_from_vec(&mut self, other: &Vec) { + assert_eq!(self.Z.len(), self.len); + assert_eq!(other.len(), self.len); + self.Z.extend(other); + self.num_vars += 1; + self.len *= 2; + assert_eq!(self.Z.len(), self.len); + } + + #[allow(missing_docs)] + pub fn merge<'a, I>(polys: I) -> DensePolynomial + where + I: IntoIterator, + { + let mut Z: Vec = Vec::new(); + for poly in polys.into_iter() { + Z.extend(poly.vec()); + } + + // pad the polynomial with zero polynomial at the end + Z.resize(Z.len().next_power_of_two(), Scalar::zero()); + + DensePolynomial::new(Z) + } + + #[allow(missing_docs)] + pub fn from_usize(Z: &[usize]) -> Self { + DensePolynomial::new( + (0..Z.len()) + .map(|i| Scalar::from(Z[i] as u64)) + .collect::>(), + ) + } +} + +impl Index for DensePolynomial { + type Output = Scalar; + + #[inline(always)] + fn index(&self, _index: usize) -> &Scalar { + &(self.Z[_index]) + } +} + +impl AppendToTranscript for PolyCommitment { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { + transcript.append_message(label, b"poly_commitment_begin"); + for i in 0..self.C.len() { + transcript.append_point(b"poly_commitment_share", &self.C[i]); + } + transcript.append_message(label, b"poly_commitment_end"); + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct PolyEvalProof { + proof: DotProductProofLog, +} + +impl PolyEvalProof { + fn protocol_name() -> &'static [u8] { + b"polynomial evaluation proof" + } + + pub fn prove( + poly: &DensePolynomial, + blinds_opt: Option<&PolyCommitmentBlinds>, + r: &[Scalar], // point at which the polynomial is evaluated + Zr: &Scalar, // evaluation of \widetilde{Z}(r) + blind_Zr_opt: Option<&Scalar>, // specifies a blind for Zr + gens: &PolyCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (PolyEvalProof, CompressedGroup) { + transcript.append_protocol_name(PolyEvalProof::protocol_name()); + + // assert vectors are of the right size + assert_eq!(poly.get_num_vars(), r.len()); + + let (left_num_vars, right_num_vars) = EqPolynomial::compute_factored_lens(r.len()); + let L_size = left_num_vars.pow2(); + let R_size = right_num_vars.pow2(); + + let default_blinds = PolyCommitmentBlinds { + blinds: vec![Scalar::zero(); L_size], + }; + let blinds = blinds_opt.map_or(&default_blinds, |p| p); + + assert_eq!(blinds.blinds.len(), L_size); + + let zero = Scalar::zero(); + let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); + + // compute the L and R vectors + let eq = EqPolynomial::new(r.to_vec()); + let (L, R) = eq.compute_factored_evals(); + assert_eq!(L.len(), L_size); + assert_eq!(R.len(), R_size); + + // compute the vector underneath L*Z and the L*blinds + // compute vector-matrix product between L and Z viewed as a matrix + let LZ = poly.bound(&L); + + let LZ_blind: Scalar = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); // 26.039µs + let timer_poly_eval2 = Timer::new("prove_poly_eval_inner2"); + + // a dot product proof of size R_size + let (proof, _C_LR, C_Zr_prime) = DotProductProofLog::prove( + &gens.gens, + transcript, + random_tape, + &LZ, + &LZ_blind, + &R, + Zr, + blind_Zr, + ); // 63 ms + (PolyEvalProof { proof }, C_Zr_prime) + } + + pub fn verify( + &self, + gens: &PolyCommitmentGens, + transcript: &mut Transcript, + r: &[Scalar], // point at which the polynomial is evaluated + C_Zr: &CompressedGroup, // commitment to \widetilde{Z}(r) + comm: &PolyCommitment, + ) -> Result<(), ProofVerifyError> { + transcript.append_protocol_name(PolyEvalProof::protocol_name()); + + // compute L and R + let eq = EqPolynomial::new(r.to_vec()); + let (L, R) = eq.compute_factored_evals(); + + // compute a weighted sum of commitments and L + let C_decompressed = comm.C.iter().map(|pt| pt.decompress().unwrap()); + + let C_LZ = GroupElement::vartime_multiscalar_mul(&L, C_decompressed).compress(); + + self + .proof + .verify(R.len(), &gens.gens, transcript, &R, &C_LZ, C_Zr) + } + + pub fn verify_plain( + &self, + gens: &PolyCommitmentGens, + transcript: &mut Transcript, + r: &[Scalar], // point at which the polynomial is evaluated + Zr: &Scalar, // evaluation \widetilde{Z}(r) + comm: &PolyCommitment, + ) -> Result<(), ProofVerifyError> { + // compute a commitment to Zr with a blind of zero + let C_Zr = Zr.commit(&Scalar::zero(), &gens.gens.gens_1).compress(); + + self.verify(gens, transcript, r, &C_Zr, comm) + } +} + +#[cfg(test)] +mod tests { + use super::super::scalar::ScalarFromPrimitives; + use super::*; + use rand::rngs::OsRng; + + fn evaluate_with_LR(Z: &[Scalar], r: &[Scalar]) -> Scalar { + let eq = EqPolynomial::new(r.to_vec()); + let (L, R) = eq.compute_factored_evals(); + + let ell = r.len(); + // ensure ell is even + assert!(ell % 2 == 0); + // compute n = 2^\ell + let n = ell.pow2(); + // compute m = sqrt(n) = 2^{\ell/2} + let m = n.square_root(); + + // compute vector-matrix product between L and Z viewed as a matrix + let LZ = (0..m) + .map(|i| (0..m).map(|j| L[j] * Z[j * m + i]).sum()) + .collect::>(); + + // compute dot product between LZ and R + DotProductProofLog::compute_dotproduct(&LZ, &R) + } + + #[test] + fn check_polynomial_evaluation() { + // Z = [1, 2, 1, 4] + let Z = vec![ + Scalar::one(), + (2_usize).to_scalar(), + (1_usize).to_scalar(), + (4_usize).to_scalar(), + ]; + + // r = [4,3] + let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()]; + + let eval_with_LR = evaluate_with_LR(&Z, &r); + let poly = DensePolynomial::new(Z); + + let eval = poly.evaluate(&r); + assert_eq!(eval, (28_usize).to_scalar()); + assert_eq!(eval_with_LR, eval); + } + + pub fn compute_factored_chis_at_r(r: &[Scalar]) -> (Vec, Vec) { + let mut L: Vec = Vec::new(); + let mut R: Vec = Vec::new(); + + let ell = r.len(); + assert!(ell % 2 == 0); // ensure ell is even + let n = ell.pow2(); + let m = n.square_root(); + + // compute row vector L + for i in 0..m { + let mut chi_i = Scalar::one(); + for j in 0..ell / 2 { + let bit_j = ((m * i) & (1 << (r.len() - j - 1))) > 0; + if bit_j { + chi_i *= r[j]; + } else { + chi_i *= Scalar::one() - r[j]; + } + } + L.push(chi_i); + } + + // compute column vector R + for i in 0..m { + let mut chi_i = Scalar::one(); + for j in ell / 2..ell { + let bit_j = (i & (1 << (r.len() - j - 1))) > 0; + if bit_j { + chi_i *= r[j]; + } else { + chi_i *= Scalar::one() - r[j]; + } + } + R.push(chi_i); + } + (L, R) + } + + pub fn compute_chis_at_r(r: &[Scalar]) -> Vec { + let ell = r.len(); + let n = ell.pow2(); + let mut chis: Vec = Vec::new(); + for i in 0..n { + let mut chi_i = Scalar::one(); + for j in 0..r.len() { + let bit_j = (i & (1 << (r.len() - j - 1))) > 0; + if bit_j { + chi_i *= r[j]; + } else { + chi_i *= Scalar::one() - r[j]; + } + } + chis.push(chi_i); + } + chis + } + + pub fn compute_outerproduct(L: Vec, R: Vec) -> Vec { + assert_eq!(L.len(), R.len()); + (0..L.len()) + .map(|i| (0..R.len()).map(|j| L[i] * R[j]).collect::>()) + .collect::>>() + .into_iter() + .flatten() + .collect::>() + } + + #[test] + fn check_memoized_chis() { + let mut csprng: OsRng = OsRng; + + let s = 10; + let mut r: Vec = Vec::new(); + for _i in 0..s { + r.push(Scalar::random(&mut csprng)); + } + let chis = tests::compute_chis_at_r(&r); + let chis_m = EqPolynomial::new(r).evals(); + assert_eq!(chis, chis_m); + } + + #[test] + fn check_factored_chis() { + let mut csprng: OsRng = OsRng; + + let s = 10; + let mut r: Vec = Vec::new(); + for _i in 0..s { + r.push(Scalar::random(&mut csprng)); + } + let chis = EqPolynomial::new(r.clone()).evals(); + let (L, R) = EqPolynomial::new(r).compute_factored_evals(); + let O = compute_outerproduct(L, R); + assert_eq!(chis, O); + } + + #[test] + fn check_memoized_factored_chis() { + let mut csprng: OsRng = OsRng; + + let s = 10; + let mut r: Vec = Vec::new(); + for _i in 0..s { + r.push(Scalar::random(&mut csprng)); + } + let (L, R) = tests::compute_factored_chis_at_r(&r); + let eq = EqPolynomial::new(r); + let (L2, R2) = eq.compute_factored_evals(); + assert_eq!(L, L2); + assert_eq!(R, R2); + } + + #[test] + fn check_polynomial_commit() { + let Z = vec![ + (1_usize).to_scalar(), + (2_usize).to_scalar(), + (1_usize).to_scalar(), + (4_usize).to_scalar(), + ]; + let poly = DensePolynomial::new(Z); + + // r = [4,3] + let r = vec![(4_usize).to_scalar(), (3_usize).to_scalar()]; + let eval = poly.evaluate(&r); + assert_eq!(eval, (28_usize).to_scalar()); + + let gens = PolyCommitmentGens::new(poly.get_num_vars(), b"test-two"); + let (poly_commitment, blinds) = poly.commit(&gens, None); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, C_Zr) = PolyEvalProof::prove( + &poly, + Some(&blinds), + &r, + &eval, + None, + &gens, + &mut prover_transcript, + &mut random_tape, + ); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(&gens, &mut verifier_transcript, &r, &C_Zr, &poly_commitment) + .is_ok()); + } +} diff --git a/third_party/Dorian/src/errors.rs b/third_party/Dorian/src/errors.rs new file mode 100644 index 000000000..ade919d7d --- /dev/null +++ b/third_party/Dorian/src/errors.rs @@ -0,0 +1,41 @@ +use core::{ + fmt::Display, + fmt::{self, Debug}, +}; + +#[derive(Debug, Default)] +pub enum ProofVerifyError { + #[default] + InternalError, + DecompressionError([u8; 32]), +} + +impl Display for ProofVerifyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self { + ProofVerifyError::DecompressionError(bytes) => write!( + f, + "Compressed group element failed to decompress: {bytes:?}", + ), + ProofVerifyError::InternalError => { + write!(f, "Proof verification failed",) + } + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum R1CSError { + /// returned if the number of constraints is not a power of 2 + NonPowerOfTwoCons, + /// returned if the number of variables is not a power of 2 + NonPowerOfTwoVars, + /// returned if a wrong number of inputs in an assignment are supplied + InvalidNumberOfInputs, + /// returned if a wrong number of variables in an assignment are supplied + InvalidNumberOfVars, + /// returned if a [u8;32] does not parse into a valid Scalar in the field of ristretto255 + InvalidScalar, + /// returned if the supplied row or col in (row,col,val) tuple is out of range + InvalidIndex, +} diff --git a/third_party/Dorian/src/group.rs b/third_party/Dorian/src/group.rs new file mode 100644 index 000000000..ee8b77098 --- /dev/null +++ b/third_party/Dorian/src/group.rs @@ -0,0 +1,117 @@ +use super::errors::ProofVerifyError; +use super::scalar::{Scalar, ScalarBytes, ScalarBytesFromScalar}; +use core::borrow::Borrow; +use core::ops::{Mul, MulAssign}; + +pub type GroupElement = curve25519_dalek::ristretto::RistrettoPoint; +pub type CompressedGroup = curve25519_dalek::ristretto::CompressedRistretto; + +pub trait CompressedGroupExt { + type Group; + fn unpack(&self) -> Result; +} + +impl CompressedGroupExt for CompressedGroup { + type Group = curve25519_dalek::ristretto::RistrettoPoint; + fn unpack(&self) -> Result { + self + .decompress() + .ok_or_else(|| ProofVerifyError::DecompressionError(self.to_bytes())) + } +} + +pub const GROUP_BASEPOINT_COMPRESSED: CompressedGroup = + curve25519_dalek::constants::RISTRETTO_BASEPOINT_COMPRESSED; + +impl<'b> MulAssign<&'b Scalar> for GroupElement { + fn mul_assign(&mut self, scalar: &'b Scalar) { + let result = (self as &GroupElement) * Scalar::decompress_scalar(scalar); + *self = result; + } +} + +impl<'a, 'b> Mul<&'b Scalar> for &'a GroupElement { + type Output = GroupElement; + fn mul(self, scalar: &'b Scalar) -> GroupElement { + self * Scalar::decompress_scalar(scalar) + } +} + +impl<'a, 'b> Mul<&'b GroupElement> for &'a Scalar { + type Output = GroupElement; + + fn mul(self, point: &'b GroupElement) -> GroupElement { + Scalar::decompress_scalar(self) * point + } +} + +macro_rules! define_mul_variants { + (LHS = $lhs:ty, RHS = $rhs:ty, Output = $out:ty) => { + impl<'b> Mul<&'b $rhs> for $lhs { + type Output = $out; + fn mul(self, rhs: &'b $rhs) -> $out { + &self * rhs + } + } + + impl<'a> Mul<$rhs> for &'a $lhs { + type Output = $out; + fn mul(self, rhs: $rhs) -> $out { + self * &rhs + } + } + + impl Mul<$rhs> for $lhs { + type Output = $out; + fn mul(self, rhs: $rhs) -> $out { + &self * &rhs + } + } + }; +} + +macro_rules! define_mul_assign_variants { + (LHS = $lhs:ty, RHS = $rhs:ty) => { + impl MulAssign<$rhs> for $lhs { + fn mul_assign(&mut self, rhs: $rhs) { + *self *= &rhs; + } + } + }; +} + +define_mul_assign_variants!(LHS = GroupElement, RHS = Scalar); +define_mul_variants!(LHS = GroupElement, RHS = Scalar, Output = GroupElement); +define_mul_variants!(LHS = Scalar, RHS = GroupElement, Output = GroupElement); + +pub trait VartimeMultiscalarMul { + type Scalar; + fn vartime_multiscalar_mul(scalars: I, points: J) -> Self + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + Self: Clone; +} + +impl VartimeMultiscalarMul for GroupElement { + type Scalar = super::scalar::Scalar; + fn vartime_multiscalar_mul(scalars: I, points: J) -> Self + where + I: IntoIterator, + I::Item: Borrow, + J: IntoIterator, + J::Item: Borrow, + Self: Clone, + { + use curve25519_dalek::traits::VartimeMultiscalarMul; + ::vartime_multiscalar_mul( + scalars + .into_iter() + .map(|s| Scalar::decompress_scalar(s.borrow())) + .collect::>(), + points, + ) + } +} diff --git a/third_party/Dorian/src/ir1csproof.rs b/third_party/Dorian/src/ir1csproof.rs new file mode 100644 index 000000000..20e10671a --- /dev/null +++ b/third_party/Dorian/src/ir1csproof.rs @@ -0,0 +1,692 @@ +#![allow(clippy::too_many_arguments)] +use super::commitments::{ + Commitments, + // MultiCommitGens +}; +use super::dense_mlpoly::{ + DensePolynomial, EqPolynomial, PolyCommitment, + PolyCommitmentGens, PolyEvalProof, PolyCommitmentBlinds +}; +use super::errors::ProofVerifyError; +use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; +use super::math::Math; +use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; +use super::r1csinstance::R1CSInstance; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial}; +use super::sumcheck::ZKSumcheckInstanceProof; +use super::timer::Timer; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use core::iter; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; +use super::r1csproof::{R1CSProof, R1CSSumcheckGens, R1CSGens}; + +#[cfg(feature = "multicore")] +use rayon::prelude::*; + +// use super::group::CompressedGroupExt; + +// use crate::{NIZKRandInter, VarsAssignment}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct IR1CSProof { + comm_vars_vec: Vec, + sc_proof_phase1: ZKSumcheckInstanceProof, + claims_phase2: ( + CompressedGroup, + CompressedGroup, + CompressedGroup, + CompressedGroup, + ), + pok_claims_phase2: (KnowledgeProof, ProductProof), + proof_eq_sc_phase1: EqualityProof, + sc_proof_phase2: ZKSumcheckInstanceProof, + comm_vars_at_ry_vec: Vec, + proof_eval_vars_at_ry_vec: Vec, + proof_eq_sc_phase2: EqualityProof, +} + +#[derive(Serialize, Deserialize)] +pub struct IR1CSGens { + gens_sc: R1CSSumcheckGens, + gens_pc: PolyCommitmentGens, + polys: Vec, + polys_extend: Vec, +} + +impl IR1CSGens { + // pub fn new(label: &'static [u8], _num_cons: usize, wit_len: &Vec) -> Self { + pub fn new(label: &'static [u8], _num_cons: usize, wit_len: &[usize]) -> Self { + let num_vars = wit_len.iter().sum::(); + let gens = R1CSGens::new(label, _num_cons, num_vars); + let mut polys = Vec::new(); + let mut start = 0; + for wit_len in wit_len.iter() { + let poly = DensePolynomial::new_bool(num_vars, start, start+*wit_len); + polys.push(poly); + start += *wit_len; + } + if polys.len() == 1 { // without verifier randomness + let poly = DensePolynomial::new(vec![Scalar::zero(); num_vars]); + polys.push(poly); + } + let mut polys_extend = Vec::new(); + for poly in polys.iter() { + let mut poly = poly.clone(); + poly.extend(&poly.clone()); + polys_extend.push(poly); // used for the second sum-check + } + Self { + gens_sc: gens.gens_sc, + gens_pc: gens.gens_pc, + polys, + polys_extend, + } + } +} + + +impl IR1CSProof { + #[inline] + fn comb_func_sc_two( + poly_A_comp: &Scalar, + poly_B0_comp: &Scalar, + poly_B1_comp: &Scalar, + poly_C0_comp: &Scalar, + poly_D_comp: &Scalar + ) -> Scalar { + poly_D_comp * (poly_A_comp + (poly_B0_comp - poly_B1_comp) * poly_C0_comp + poly_B1_comp) + } + /// Prove phase two + pub fn prove_phase_two( + num_rounds: usize, + claim: &Scalar, + blind_claim: &Scalar, + evals_io_one: &mut DensePolynomial, + // evals_wit: &mut [DensePolynomial; 2], + evals_wit: (&mut DensePolynomial, &mut DensePolynomial), + eval_v: &mut DensePolynomial, + evals_ABC: &mut DensePolynomial, + gens: &R1CSSumcheckGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (ZKSumcheckInstanceProof, Vec, Vec, Scalar) { + let timer = Timer::new("prove_phase_two inner"); + + // let comb_func = |poly_A_comp: &Scalar, + // poly_B0_comp: &Scalar, + // poly_B1_comp: &Scalar, + // poly_C0_comp: &Scalar, + // poly_D_comp: &Scalar| + // -> Scalar { poly_D_comp * + // (poly_A_comp + (poly_B0_comp - poly_B1_comp) * poly_C0_comp + poly_B1_comp) }; + let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_cubic_with_four_terms( + claim, + blind_claim, + num_rounds, + evals_io_one, + // (&mut evals_wit[0], &mut evals_wit[1]), // Pass as tuple + evals_wit, + eval_v, + evals_ABC, + // comb_func, + Self::comb_func_sc_two, + &gens.gens_1, + &gens.gens_4, // degree 3 instead of degree 2 + transcript, + random_tape, + ); + timer.stop(); + (sc_proof_phase_two, r, claims, blind_claim_postsc) + } + + fn protocol_name() -> &'static [u8] { + b"Interactive R1CS proof" + } + + // fn test_split(vars: &Vec, ry: &Vec) { + // println!("test_split"); + // println!("{:?}", DensePolynomial::new(vars.clone()).evaluate(&ry[1..])); + // println!("{:?}", DensePolynomial::new(vars.clone()).evaluate(&ry[1..])); + // // let middle: usize = vars.len() - 1; + // for i in 0..(1<, + vars: &[Scalar], + rand_len: usize, + poly_vars_vec: &mut Vec, + comm_vars_vec: &mut Vec, + blinds_vars_vec: &mut Vec, + gens: &IR1CSGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> Vec { + // create a multilinear polynomial using the supplied assignment for variables + // let poly_vars = DensePolynomial::new(vars.clone()); + let poly_vars = DensePolynomial::new(vars.to_owned()); + + // produce a commitment to the satisfying assignment + let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape)); + + // add the commitment to the prover's transcript + comm_vars.append_to_transcript(b"poly_commitment", transcript); + poly_vars_vec.push(poly_vars); + comm_vars_vec.push(comm_vars); + blinds_vars_vec.push(blinds_vars); + + transcript.challenge_vector(b"verifier_random", rand_len) + } + + pub fn prove_1( + inst: &R1CSInstance, + vars: &[Scalar], // witness in final round + wit: &[Scalar], + input: &[Scalar], + poly_vars_vec: &mut Vec, + comm_vars_vec: &mut Vec, + blinds_vars_vec: &mut Vec, + gens: &IR1CSGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (IR1CSProof, Vec, Vec) { + let timer_prove = Timer::new("IR1CSProof::prove1"); + + let timer_commit = Timer::new("polycommit"); + + let z = { + let num_vars = inst.get_num_vars(); + let wit0_len = wit.len(); + let mut padded_wit = vec![Scalar::zero(); num_vars]; + padded_wit[wit0_len..wit0_len + vars.len()].copy_from_slice(vars); + let poly_vars = DensePolynomial::new(padded_wit); + // produce a commitment to the satisfying assignment + let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape)); // 40 ms + // add the commitment to the prover's transcript + comm_vars.append_to_transcript(b"poly_commitment", transcript); + + // let mut z = Vec::with_capacity(num_vars); + // z.extend_from_slice(wit); + // z.extend_from_slice(vars); + // // wit.extend(vars); + // if z.len() < num_vars { + // z.extend(vec![Scalar::zero(); num_vars - wit.len()]); + // } + poly_vars_vec.push(poly_vars); + comm_vars_vec.push(comm_vars); + blinds_vars_vec.push(blinds_vars.clone()); + let num_inputs = input.len(); + let mut z = vec![Scalar::zero(); num_vars*2]; + z[0..wit0_len].copy_from_slice(wit); + z[wit0_len..wit0_len+vars.len()].copy_from_slice(vars); + z[num_vars] = Scalar::one(); + z[num_vars + 1..num_vars + 1 + num_inputs].copy_from_slice(input); + z + }; + + #[cfg(debug_assertions)] + { + let mut witness = wit.to_vec().clone(); + witness.extend(vars); + if witness.len() < inst.get_num_vars() { + witness.extend(vec![Scalar::zero(); inst.get_num_vars() - witness.len()]); + } + assert!(inst.is_sat(&witness, input)); + } + + timer_commit.stop(); + + let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one"); + + // derive the verifier's challenge tau + let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2()); + let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); + // compute the initial evaluation table for R(\tau, x) + let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals()); + + let (mut poly_Az, mut poly_Bz, mut poly_Cz) = + inst.multiply_vec(inst.get_num_cons(), z.len(), &z); + // let timer_sc_proof_phase1_inner2 = Timer::new("prove_sc_phase_one_inner2"); + let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one( + num_rounds_x, + &mut poly_tau, + &mut poly_Az, + &mut poly_Bz, + &mut poly_Cz, + &gens.gens_sc, + transcript, + random_tape, + ); + // timer_sc_proof_phase1_inner2.stop(); + + // println!("rx {:?}", rx); + assert_eq!(poly_tau.len(), 1); + assert_eq!(poly_Az.len(), 1); + assert_eq!(poly_Bz.len(), 1); + assert_eq!(poly_Cz.len(), 1); + timer_sc_proof_phase1.stop(); + + let (tau_claim, Az_claim, Bz_claim, Cz_claim) = + (&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]); + let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = ( + random_tape.random_scalar(b"Az_blind"), + random_tape.random_scalar(b"Bz_blind"), + random_tape.random_scalar(b"Cz_blind"), + random_tape.random_scalar(b"prod_Az_Bz_blind"), + ); + + let (pok_Cz_claim, comm_Cz_claim) = { + KnowledgeProof::prove( + &gens.gens_sc.gens_1, + transcript, + random_tape, + Cz_claim, + &Cz_blind, + ) + }; + + let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = { + let prod = Az_claim * Bz_claim; + ProductProof::prove( + &gens.gens_sc.gens_1, + transcript, + random_tape, + Az_claim, + &Az_blind, + Bz_claim, + &Bz_blind, + &prod, + &prod_Az_Bz_blind, + ) + }; + + comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript); + comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript); + comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript); + comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript); + + // prove the final step of sum-check #1 + let taus_bound_rx = tau_claim; + let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind); + let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx; + let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove( + &gens.gens_sc.gens_1, + transcript, + random_tape, + &claim_post_phase1, + &blind_expected_claim_postsc1, + &claim_post_phase1, + &blind_claim_postsc1, + ); + + let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two"); + + // combine the three claims into a single claim + let r_A = transcript.challenge_scalar(b"challenege_Az"); + let r_B = transcript.challenge_scalar(b"challenege_Bz"); + let r_C = transcript.challenge_scalar(b"challenege_Cz"); + let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; + let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind; + + + let evals_ABC = { + // let timer_sc_proof_phase2_inner1_1 = Timer::new("prove_sc_phase_two_inner1.1"); + // compute the initial evaluation table for R(\tau, x) + let evals_rx = EqPolynomial::new(rx.clone()).evals(); + // timer_sc_proof_phase2_inner1_1.stop(); + // let timer_sc_proof_phase2_inner1_2 = Timer::new("prove_sc_phase_two_inner1.2"); + let (evals_A, evals_B, evals_C) = + inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx); + // timer_sc_proof_phase2_inner1_2.stop(); + + assert_eq!(evals_A.len(), evals_B.len()); + assert_eq!(evals_A.len(), evals_C.len()); + + // Pre-allocate the result vector + + #[cfg(feature = "multicore")] + let result = { + let mut result = Vec::with_capacity(evals_A.len()); + // Parallelize the map operation + evals_A.par_iter() + .zip(evals_B.par_iter()) + .zip(evals_C.par_iter()) + .map(|((a, b), c)| r_A * a + r_B * b + r_C * c) + .collect_into_vec(&mut result); + result + }; + #[cfg(not(feature = "multicore"))] + let result = (0..evals_A.len()) + .map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i]) + .collect::>(); + + result + }; + // another instance of the sum-check protocol + let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = { + + let (mut io_one_poly, mut wit_poly_vec) = { + let io_one_poly = { + // let io_one = { + let num_inputs = input.len(); + // let num_vars = wit.len(); + let num_vars = inst.get_num_vars(); + let mut io_one = vec![Scalar::zero(); 2*num_vars]; // Initialize with zeros directly + io_one[num_vars] = Scalar::one(); // directly assigning the one after the initial zeros + io_one[num_vars + 1..num_vars + 1 + num_inputs].copy_from_slice(input); + // io_one + // }; // h(ry) = ry[0]\tilde{io, 1}(ry[1:]) + DensePolynomial::new(io_one) + }; + + assert_eq!(poly_vars_vec.len(), 2); + let wit_poly_vec = { + let ori_len = poly_vars_vec[0].len(); + let mut wit_poly_vec = ( + DensePolynomial::new(vec![Scalar::zero(); 2 * ori_len]), + DensePolynomial::new(vec![Scalar::zero(); 2 * ori_len]), + ); + // DensePolynomial::new(vec![Scalar::zero(); 2 * ori_len]), + // DensePolynomial::new(vec![Scalar::zero(); 2 * ori_len]), + // ]; + + // wit_poly_vec[0].Z[..ori_len].copy_from_slice(&poly_vars_vec[0].Z); + // wit_poly_vec[1].Z[..ori_len].copy_from_slice(&poly_vars_vec[1].Z); + wit_poly_vec.0.Z[..ori_len].copy_from_slice(&poly_vars_vec[0].Z); + wit_poly_vec.1.Z[..ori_len].copy_from_slice(&poly_vars_vec[1].Z); + wit_poly_vec + }; + (io_one_poly, wit_poly_vec) + }; + + + let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = IR1CSProof::prove_phase_two( + num_rounds_y, + &claim_phase2, + &blind_claim_phase2, + // &mut DensePolynomial::new(io_one.clone()), + &mut io_one_poly, + (&mut wit_poly_vec.0, &mut wit_poly_vec.1), + &mut gens.polys_extend[0].clone(), + &mut DensePolynomial::new(evals_ABC), + &gens.gens_sc, + transcript, + random_tape, + ); + + (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) + }; + + timer_sc_proof_phase2.stop(); + + let timer_polyeval = Timer::new("polyeval"); + // let mut proof_eval_vars_at_ry_vec = Vec::new(); + // let mut comm_vars_at_ry_vec = Vec::new(); + // let mut blind_eval_vec = Vec::new(); + let mut proof_eval_vars_at_ry_vec = Vec::with_capacity(2); + let mut comm_vars_at_ry_vec = Vec::with_capacity(2); + let mut blind_eval_vec = Vec::with_capacity(2); + assert_eq!(poly_vars_vec.len(), blinds_vars_vec.len()); // for debug only + + for (poly_vars, blind_vars) in poly_vars_vec.iter().zip(blinds_vars_vec.iter()) { + let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]); + let blind_eval = random_tape.random_scalar(b"blind_eval"); + let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove( + poly_vars, + Some(blind_vars), + &ry[1..], + &eval_vars_at_ry, + Some(&blind_eval), + &gens.gens_pc, + transcript, + random_tape, + ); + proof_eval_vars_at_ry_vec.push(proof_eval_vars_at_ry); + comm_vars_at_ry_vec.push(comm_vars_at_ry); + blind_eval_vec.push(blind_eval); + } + + timer_polyeval.stop(); + + // prove the final step of sum-check #2 + let blind_expected_claim_postsc2 = { + let eval_v0_at_ry = gens.polys[0].evaluate(&ry[1..]); + // let blind_eval_Z_at_ry = eval_v0_at_ry * blind_eval_vec[0] + // + (Scalar::one() - eval_v0_at_ry) * blind_eval_vec[1]; + let blind_eval_Z_at_ry = (blind_eval_vec[0] - blind_eval_vec[1]) * eval_v0_at_ry + blind_eval_vec[1]; + claims_phase2[4] * blind_eval_Z_at_ry * (Scalar::one() - ry[0]) // claims_phase2[5] = evals_ABC evaluated at ry + }; + + // let actual_z_at_ry = claims_phase2[0] + // + claims_phase2[1] * claims_phase2[3] + // + claims_phase2[2] * claims_phase2[4]; + let actual_z_at_ry = claims_phase2[0] + + (claims_phase2[1] - claims_phase2[2]) * claims_phase2[3] + + claims_phase2[2]; + let claim_post_phase2 = actual_z_at_ry * claims_phase2[4]; + let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove( + &gens.gens_pc.gens.gens_1, + transcript, + random_tape, + &claim_post_phase2, + &blind_expected_claim_postsc2, + &claim_post_phase2, + &blind_claim_postsc2, + ); + + timer_prove.stop(); + + ( + IR1CSProof { + comm_vars_vec: comm_vars_vec.clone(), + sc_proof_phase1, + claims_phase2: ( + comm_Az_claim, + comm_Bz_claim, + comm_Cz_claim, + comm_prod_Az_Bz_claims, + ), + pok_claims_phase2: (pok_Cz_claim, proof_prod), + proof_eq_sc_phase1, + sc_proof_phase2, + comm_vars_at_ry_vec, + proof_eval_vars_at_ry_vec, + proof_eq_sc_phase2, + }, + rx, + ry, + ) + } + + pub fn verify( + &self, + num_vars: usize, // might need change + num_cons: usize, + pubinp_lens: &[usize], // newlly added + input: &mut Vec, + evals: &(Scalar, Scalar, Scalar), + transcript: &mut Transcript, + gens: &IR1CSGens, + ) -> Result<(Vec, Vec), ProofVerifyError> { + transcript.append_protocol_name(IR1CSProof::protocol_name()); + + input.append_to_transcript(b"input", transcript); + let n = num_vars; + // add the commitment to the verifier's transcript + for (rand_len, comm_vars) in pubinp_lens.iter().skip(1) + .zip(self.comm_vars_vec.iter().take(self.comm_vars_vec.len()-1)) + { + comm_vars + .append_to_transcript(b"poly_commitment", transcript); + let verifier_random = transcript.challenge_vector(b"verifier_random", *rand_len); + input.extend(&verifier_random); + } + + self.comm_vars_vec // commitment to the witness in the final round + .last().unwrap() + .append_to_transcript(b"poly_commitment", transcript); + + let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2()); + + // derive the verifier's challenge tau + let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); + // verify the first sum-check instance + let claim_phase1 = Scalar::zero() + .commit(&Scalar::zero(), &gens.gens_sc.gens_1) + .compress(); + let (comm_claim_post_phase1, rx) = self.sc_proof_phase1.verify( + &claim_phase1, + num_rounds_x, + 3, + &gens.gens_sc.gens_1, + &gens.gens_sc.gens_4, + transcript, + )?; + // println!("rx {:?}", rx); + // perform the intermediate sum-check test with claimed Az, Bz, and Cz + let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2; + let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2; + + pok_Cz_claim.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)?; + proof_prod.verify( + &gens.gens_sc.gens_1, + transcript, + comm_Az_claim, + comm_Bz_claim, + comm_prod_Az_Bz_claims, + )?; + + comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript); + comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript); + comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript); + comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript); + + let taus_bound_rx: Scalar = (0..rx.len()) + .map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i])) + .product(); + let expected_claim_post_phase1 = (taus_bound_rx + * (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap())) + .compress(); + + // verify proof that expected_claim_post_phase1 == claim_post_phase1 + self.proof_eq_sc_phase1.verify( + &gens.gens_sc.gens_1, + transcript, + &expected_claim_post_phase1, + &comm_claim_post_phase1, + )?; + + // derive three public challenges and then derive a joint claim + let r_A = transcript.challenge_scalar(b"challenege_Az"); + let r_B = transcript.challenge_scalar(b"challenege_Bz"); + let r_C = transcript.challenge_scalar(b"challenege_Cz"); + + // r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim; + let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul( + iter::once(&r_A) + .chain(iter::once(&r_B)) + .chain(iter::once(&r_C)), + iter::once(&comm_Az_claim) + .chain(iter::once(&comm_Bz_claim)) + .chain(iter::once(&comm_Cz_claim)) + .map(|pt| pt.decompress().unwrap()) + .collect::>(), + ) + .compress(); + + // verify the joint claim with a sum-check protocol + let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify( + &comm_claim_phase2, + num_rounds_y, + 3, + &gens.gens_sc.gens_1, + // &gens.gens_sc.gens_3, + &gens.gens_sc.gens_4, + transcript, + )?; + + // verify Z(ry) proof against the initial commitment + self.comm_vars_at_ry_vec.iter() + .zip(self.proof_eval_vars_at_ry_vec.iter()) + .zip(self.comm_vars_vec.iter()) + .for_each(|((comm_vars_at_ry, proof_eval_vars_at_ry), comm_vars)| { + proof_eval_vars_at_ry + .verify( + &gens.gens_pc, + transcript, + &ry[1..], + comm_vars_at_ry, + comm_vars, + ) + .unwrap(); + }); + + let poly_input_eval = { + // constant term + let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())]; + //remaining inputs + input_as_sparse_poly_entries.extend( + (0..input.len()) + .map(|i| SparsePolyEntry::new(i + 1, input[i])) + .collect::>(), + ); + SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..]) + }; + + + let comm_eval_Z_at_ry = { + let decompress_comm_vars_at_ry_vec = self.comm_vars_at_ry_vec.iter() + .map(|cm| cm.decompress().unwrap()) + .collect::>(); + let v0_evaluate_at_ry = gens.polys[0].evaluate(&ry[1..]); + let vpolys_evaluate_at_ry = [v0_evaluate_at_ry, Scalar::one() - v0_evaluate_at_ry]; + + GroupElement::vartime_multiscalar_mul( + vpolys_evaluate_at_ry.iter().map(|res| (Scalar::one() - ry[0]) * res) + // gens.polys.iter().take(decompress_comm_vars_at_ry_vec.len()).map(|poly| (Scalar::one() - ry[0]) * poly.evaluate(&ry[1..])) // to be optimized + .chain(iter::once(ry[0])), + decompress_comm_vars_at_ry_vec.iter() + .chain(iter::once( + &poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1), + )), + ) + }; + + + // perform the final check in the second sum-check protocol + let (eval_A_r, eval_B_r, eval_C_r) = evals; + let expected_claim_post_phase2 = + ((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress(); + // verify proof that expected_claim_post_phase1 == claim_post_phase1 + self.proof_eq_sc_phase2.verify( + &gens.gens_sc.gens_1, + transcript, + &expected_claim_post_phase2, + &comm_claim_post_phase2, + )?; + + Ok((rx, ry)) + } +} \ No newline at end of file diff --git a/third_party/Dorian/src/lib.rs b/third_party/Dorian/src/lib.rs new file mode 100644 index 000000000..28233fa37 --- /dev/null +++ b/third_party/Dorian/src/lib.rs @@ -0,0 +1,997 @@ +#![allow(non_snake_case)] +#![doc = include_str!("../README.md")] +// #![deny(missing_docs)] +#![allow(clippy::assertions_on_result_states)] + +extern crate byteorder; +extern crate core; +extern crate curve25519_dalek; +extern crate digest; +extern crate merlin; +extern crate rand; +extern crate sha3; + +#[cfg(feature = "multicore")] +extern crate rayon; + +mod commitments; +mod dense_mlpoly; +pub use dense_mlpoly::DensePolynomial; +mod errors; +mod group; +mod math; +mod nizk; +mod product_tree; +mod r1csinstance; +mod r1csproof; +mod ir1csproof; +mod random; +pub mod scalar; +mod sparse_mlpoly; +mod sumcheck; +mod timer; +mod transcript; +mod unipoly; + +use core::cmp::max; +use errors::{ProofVerifyError, R1CSError}; +use merlin::Transcript; +use r1csinstance::{ + R1CSCommitment, R1CSCommitmentGens, R1CSDecommitment, R1CSEvalProof, R1CSInstance, +}; +use r1csproof::{R1CSGens, R1CSProof}; +use ir1csproof::IR1CSGens; +use ir1csproof::IR1CSProof; +use random::RandomTape; +use scalar::Scalar; +use serde::{Deserialize, Serialize}; +use timer::Timer; +use transcript::{AppendToTranscript, ProofTranscript}; +use crate::dense_mlpoly::{PolyCommitment, PolyCommitmentBlinds}; + +/// `ComputationCommitment` holds a public preprocessed NP statement (e.g., R1CS) +pub struct ComputationCommitment { + comm: R1CSCommitment, +} + +/// `ComputationDecommitment` holds information to decommit `ComputationCommitment` +pub struct ComputationDecommitment { + decomm: R1CSDecommitment, +} + +/// `Assignment` holds an assignment of values to either the inputs or variables in an `Instance` +#[derive(Clone)] +pub struct Assignment { + assignment: Vec, +} + +impl Assignment { + /// Constructs a new `Assignment` from a vector + pub fn new(assignment: &[[u8; 32]]) -> Result { + let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result, R1CSError> { + let mut vec_scalar: Vec = Vec::new(); + for v in vec { + let val = Scalar::from_bytes(v); + if val.is_some().unwrap_u8() == 1 { + vec_scalar.push(val.unwrap()); + } else { + return Err(R1CSError::InvalidScalar); + } + } + Ok(vec_scalar) + }; + + let assignment_scalar = bytes_to_scalar(assignment); + + // check for any parsing errors + if assignment_scalar.is_err() { + return Err(R1CSError::InvalidScalar); + } + + Ok(Assignment { + assignment: assignment_scalar.unwrap(), + }) + } + + /// pads Assignment to the specified length + fn pad(&self, len: usize) -> VarsAssignment { + // check that the new length is higher than current length + assert!(len > self.assignment.len()); + + let padded_assignment = { + let mut padded_assignment = self.assignment.clone(); + padded_assignment.extend(vec![Scalar::zero(); len - self.assignment.len()]); + padded_assignment + }; + + VarsAssignment { + assignment: padded_assignment, + } + } + + /// pads Assignment to the specified length + pub fn pad_inner(assignment: &mut Vec, len: usize) { + // check that the new length is higher than current length + assert!(len > assignment.len()); + + assignment.extend(vec![Scalar::zero(); len - assignment.len()]); + } +} + +/// `VarsAssignment` holds an assignment of values to variables in an `Instance` +pub type VarsAssignment = Assignment; + +/// `InputsAssignment` holds an assignment of values to variables in an `Instance` +pub type InputsAssignment = Assignment; + +/// `Instance` holds the description of R1CS matrices and a hash of the matrices +#[derive(Debug, Serialize, Deserialize)] // to test +pub struct Instance { + inst: R1CSInstance, + digest: Vec, +} + +impl Instance { + /// Constructs a new `Instance` and an associated satisfying assignment + pub fn new( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + A: &[(usize, usize, [u8; 32])], + B: &[(usize, usize, [u8; 32])], + C: &[(usize, usize, [u8; 32])], + ) -> Result { + let (num_vars_padded, num_cons_padded) = { + let num_vars_padded = { + let mut num_vars_padded = num_vars; + + // ensure that num_inputs + 1 <= num_vars + num_vars_padded = max(num_vars_padded, num_inputs + 1); + + // ensure that num_vars_padded a power of two + if num_vars_padded.next_power_of_two() != num_vars_padded { + num_vars_padded = num_vars_padded.next_power_of_two(); + } + num_vars_padded + }; + + let num_cons_padded = { + let mut num_cons_padded = num_cons; + + // ensure that num_cons_padded is at least 2 + if num_cons_padded == 0 || num_cons_padded == 1 { + num_cons_padded = 2; + } + + // ensure that num_cons_padded is power of 2 + if num_cons.next_power_of_two() != num_cons { + num_cons_padded = num_cons.next_power_of_two(); + } + num_cons_padded + }; + + (num_vars_padded, num_cons_padded) + }; + + let bytes_to_scalar = + |tups: &[(usize, usize, [u8; 32])]| -> Result, R1CSError> { + let mut mat: Vec<(usize, usize, Scalar)> = Vec::new(); + for &(row, col, val_bytes) in tups { + // row must be smaller than num_cons + if row >= num_cons { + return Err(R1CSError::InvalidIndex); + } + + // col must be smaller than num_vars + 1 + num_inputs + if col >= num_vars + 1 + num_inputs { + return Err(R1CSError::InvalidIndex); + } + + let val = Scalar::from_bytes(&val_bytes); + if val.is_some().unwrap_u8() == 1 { + // if col >= num_vars, it means that it is referencing a 1 or input in the satisfying + // assignment + if col >= num_vars { + mat.push((row, col + num_vars_padded - num_vars, val.unwrap())); + } else { + mat.push((row, col, val.unwrap())); + } + } else { + return Err(R1CSError::InvalidScalar); + } + } + + // pad with additional constraints up until num_cons_padded if the original constraints were 0 or 1 + // we do not need to pad otherwise because the dummy constraints are implicit in the sum-check protocol + if num_cons == 0 || num_cons == 1 { + for i in tups.len()..num_cons_padded { + mat.push((i, num_vars, Scalar::zero())); + } + } + + Ok(mat) + }; + + let A_scalar = bytes_to_scalar(A); + if A_scalar.is_err() { + return Err(A_scalar.err().unwrap()); + } + + let B_scalar = bytes_to_scalar(B); + if B_scalar.is_err() { + return Err(B_scalar.err().unwrap()); + } + + let C_scalar = bytes_to_scalar(C); + if C_scalar.is_err() { + return Err(C_scalar.err().unwrap()); + } + + let inst = R1CSInstance::new( + num_cons_padded, + num_vars_padded, + num_inputs, + &A_scalar.unwrap(), + &B_scalar.unwrap(), + &C_scalar.unwrap(), + ); + + let digest = inst.get_digest(); + + Ok(Instance { inst, digest }) + } + + /// Checks if a given R1CSInstance is satisfiable with a given variables and inputs assignments + pub fn is_sat( + &self, + vars: &VarsAssignment, + inputs: &InputsAssignment, + ) -> Result { + if vars.assignment.len() > self.inst.get_num_vars() { + return Err(R1CSError::InvalidNumberOfInputs); + } + + if inputs.assignment.len() != self.inst.get_num_inputs() { + return Err(R1CSError::InvalidNumberOfInputs); + } + + // we might need to pad variables + let padded_vars = { + let num_padded_vars = self.inst.get_num_vars(); + let num_vars = vars.assignment.len(); + if num_padded_vars > num_vars { + vars.pad(num_padded_vars) + } else { + vars.clone() + } + }; + + Ok( + self + .inst + .is_sat(&padded_vars.assignment, &inputs.assignment), + ) + } + + /// Constructs a new synthetic R1CS `Instance` and an associated satisfying assignment + pub fn produce_synthetic_r1cs( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + ) -> (Instance, VarsAssignment, InputsAssignment) { + let (inst, vars, inputs) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + let digest = inst.get_digest(); + ( + Instance { inst, digest }, + VarsAssignment { assignment: vars }, + InputsAssignment { assignment: inputs }, + ) + } +} + +/// `SNARKGens` holds public parameters for producing and verifying proofs with the Spartan SNARK +pub struct SNARKGens { + gens_r1cs_sat: R1CSGens, + gens_r1cs_eval: R1CSCommitmentGens, +} + +impl SNARKGens { + /// Constructs a new `SNARKGens` given the size of the R1CS statement + /// `num_nz_entries` specifies the maximum number of non-zero entries in any of the three R1CS matrices + pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize, num_nz_entries: usize) -> Self { + let num_vars_padded = { + let mut num_vars_padded = max(num_vars, num_inputs + 1); + if num_vars_padded != num_vars_padded.next_power_of_two() { + num_vars_padded = num_vars_padded.next_power_of_two(); + } + num_vars_padded + }; + + let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded); + let gens_r1cs_eval = R1CSCommitmentGens::new( + b"gens_r1cs_eval", + num_cons, + num_vars_padded, + num_inputs, + num_nz_entries, + ); + SNARKGens { + gens_r1cs_sat, + gens_r1cs_eval, + } + } +} + +/// `SNARK` holds a proof produced by Spartan SNARK +#[derive(Serialize, Deserialize, Debug)] +pub struct SNARK { + r1cs_sat_proof: R1CSProof, + inst_evals: (Scalar, Scalar, Scalar), + r1cs_eval_proof: R1CSEvalProof, +} + +impl SNARK { + fn protocol_name() -> &'static [u8] { + b"Spartan SNARK proof" + } + + /// A public computation to create a commitment to an R1CS instance + pub fn encode( + inst: &Instance, + gens: &SNARKGens, + ) -> (ComputationCommitment, ComputationDecommitment) { + let timer_encode = Timer::new("SNARK::encode"); + let (comm, decomm) = inst.inst.commit(&gens.gens_r1cs_eval); + timer_encode.stop(); + ( + ComputationCommitment { comm }, + ComputationDecommitment { decomm }, + ) + } + + /// A method to produce a SNARK proof of the satisfiability of an R1CS instance + pub fn prove( + inst: &Instance, + comm: &ComputationCommitment, + decomm: &ComputationDecommitment, + vars: VarsAssignment, + inputs: &InputsAssignment, + gens: &SNARKGens, + transcript: &mut Transcript, + ) -> Self { + let timer_prove = Timer::new("SNARK::prove"); + + // we create a Transcript object seeded with a random Scalar + // to aid the prover produce its randomness + let mut random_tape = RandomTape::new(b"proof"); + + transcript.append_protocol_name(SNARK::protocol_name()); + comm.comm.append_to_transcript(b"comm", transcript); + + let (r1cs_sat_proof, rx, ry) = { + let (proof, rx, ry) = { + // we might need to pad variables + let padded_vars = { + let num_padded_vars = inst.inst.get_num_vars(); + let num_vars = vars.assignment.len(); + if num_padded_vars > num_vars { + vars.pad(num_padded_vars) + } else { + vars + } + }; + + R1CSProof::prove( + &inst.inst, + padded_vars.assignment, + &inputs.assignment, + &gens.gens_r1cs_sat, + transcript, + &mut random_tape, + ) + }; + + let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); + Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); + + (proof, rx, ry) + }; + + // We send evaluations of A, B, C at r = (rx, ry) as claims + // to enable the verifier complete the first sum-check + let timer_eval = Timer::new("eval_sparse_polys"); + let inst_evals = { + let (Ar, Br, Cr) = inst.inst.evaluate(&rx, &ry); + Ar.append_to_transcript(b"Ar_claim", transcript); + Br.append_to_transcript(b"Br_claim", transcript); + Cr.append_to_transcript(b"Cr_claim", transcript); + (Ar, Br, Cr) + }; + timer_eval.stop(); + + let r1cs_eval_proof = { + let proof = R1CSEvalProof::prove( + &decomm.decomm, + &rx, + &ry, + &inst_evals, + &gens.gens_r1cs_eval, + transcript, + &mut random_tape, + ); + + let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); + Timer::print(&format!("len_r1cs_eval_proof {:?}", proof_encoded.len())); + proof + }; + + timer_prove.stop(); + SNARK { + r1cs_sat_proof, + inst_evals, + r1cs_eval_proof, + } + } + + /// A method to verify the SNARK proof of the satisfiability of an R1CS instance + pub fn verify( + &self, + comm: &ComputationCommitment, + input: &InputsAssignment, + transcript: &mut Transcript, + gens: &SNARKGens, + ) -> Result<(), ProofVerifyError> { + let timer_verify = Timer::new("SNARK::verify"); + transcript.append_protocol_name(SNARK::protocol_name()); + + // append a commitment to the computation to the transcript + comm.comm.append_to_transcript(b"comm", transcript); + + let timer_sat_proof = Timer::new("verify_sat_proof"); + assert_eq!(input.assignment.len(), comm.comm.get_num_inputs()); + let (rx, ry) = self.r1cs_sat_proof.verify( + comm.comm.get_num_vars(), + comm.comm.get_num_cons(), + &input.assignment, + &self.inst_evals, + transcript, + &gens.gens_r1cs_sat, + )?; + timer_sat_proof.stop(); + + let timer_eval_proof = Timer::new("verify_eval_proof"); + let (Ar, Br, Cr) = &self.inst_evals; + Ar.append_to_transcript(b"Ar_claim", transcript); + Br.append_to_transcript(b"Br_claim", transcript); + Cr.append_to_transcript(b"Cr_claim", transcript); + self.r1cs_eval_proof.verify( + &comm.comm, + &rx, + &ry, + &self.inst_evals, + &gens.gens_r1cs_eval, + transcript, + )?; + timer_eval_proof.stop(); + timer_verify.stop(); + Ok(()) + } +} + +/// `NIZKGens` holds public parameters for producing and verifying proofs with the Spartan NIZK +#[derive(Serialize, Deserialize)] // to test +pub struct NIZKGens { + gens_r1cs_sat: R1CSGens, +} + +impl NIZKGens { + /// Constructs a new `NIZKGens` given the size of the R1CS statement + pub fn new(num_cons: usize, num_vars: usize, num_inputs: usize) -> Self { + let num_vars_padded = { + let mut num_vars_padded = max(num_vars, num_inputs + 1); + if num_vars_padded != num_vars_padded.next_power_of_two() { + num_vars_padded = num_vars_padded.next_power_of_two(); + } + num_vars_padded + }; + + let gens_r1cs_sat = R1CSGens::new(b"gens_r1cs_sat", num_cons, num_vars_padded); + NIZKGens { gens_r1cs_sat } + } +} + +/// `NIZKRandGens` holds public parameters for producing and verifying proofs with the random extension of Spartan NIZK +#[derive(Serialize, Deserialize)] +pub struct NIZKRandGens { + gens_r1cs_sat: IR1CSGens, + pub pubinp_len: Vec, + pub wit_len: Vec +} + +impl NIZKRandGens { + /// Constructs a new `NIZKRandGens` + pub fn new( + num_cons: usize, + pubinp_len: &[usize], + wit_len: &[usize], + // num_inputs: usize + ) -> Self { + let num_inputs = pubinp_len.iter().sum::(); + let num_vars = wit_len.iter().sum::(); + + let mut num_vars_padded = max(num_vars, num_inputs + 1); + if num_vars_padded != num_vars_padded.next_power_of_two() { + num_vars_padded = num_vars_padded.next_power_of_two(); + } + + let mut wit_len_padded = wit_len.to_owned(); + if num_vars_padded != num_vars { + // println!("last wit len {}", wit_len[wit_len.len()-1]); + wit_len_padded[wit_len.len()-1] = wit_len_padded[wit_len.len()-1] + num_vars_padded - num_vars; + } + // println!("last wit len {}", wit_len_padded[wit_len.len()-1]); + Self { + gens_r1cs_sat: IR1CSGens::new(b"gens_r1cs_sat", num_cons, &wit_len_padded), + pubinp_len: pubinp_len.to_owned(), + wit_len: wit_len_padded.clone() // wit_len.clone() + } + } +} + +/// `NIZK` holds a proof produced by Spartan NIZK +#[derive(Serialize, Deserialize, Debug)] +pub struct NIZK { + r1cs_sat_proof: R1CSProof, + r: (Vec, Vec), +} + +impl NIZK { + fn protocol_name() -> &'static [u8] { + b"Spartan NIZK proof" + } + + /// A method to produce a NIZK proof of the satisfiability of an R1CS instance + pub fn prove( + inst: &Instance, + vars: VarsAssignment, + input: &InputsAssignment, + gens: &NIZKGens, + transcript: &mut Transcript, + ) -> Self { + let timer_prove = Timer::new("NIZK::prove"); + // we create a Transcript object seeded with a random Scalar + // to aid the prover produce its randomness + let mut random_tape = RandomTape::new(b"proof"); + + transcript.append_protocol_name(NIZK::protocol_name()); + transcript.append_message(b"R1CSInstanceDigest", &inst.digest); + + let (r1cs_sat_proof, rx, ry) = { + // we might need to pad variables + let padded_vars = { + let num_padded_vars = inst.inst.get_num_vars(); + let num_vars = vars.assignment.len(); + if num_padded_vars > num_vars { + vars.pad(num_padded_vars) + } else { + vars + } + }; + + let (proof, rx, ry) = R1CSProof::prove( + &inst.inst, + padded_vars.assignment, + &input.assignment, + &gens.gens_r1cs_sat, + transcript, + &mut random_tape, + ); + let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); + Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); + (proof, rx, ry) + }; + + timer_prove.stop(); + NIZK { + r1cs_sat_proof, + r: (rx, ry), + } + } + + /// A method to verify a NIZK proof of the satisfiability of an R1CS instance + pub fn verify( + &self, + inst: &Instance, + input: &InputsAssignment, + transcript: &mut Transcript, + gens: &NIZKGens, + ) -> Result<(), ProofVerifyError> { + let timer_verify = Timer::new("NIZK::verify"); + + transcript.append_protocol_name(NIZK::protocol_name()); + transcript.append_message(b"R1CSInstanceDigest", &inst.digest); + + // We send evaluations of A, B, C at r = (rx, ry) as claims + // to enable the verifier complete the first sum-check + let timer_eval = Timer::new("eval_sparse_polys"); + let (claimed_rx, claimed_ry) = &self.r; + let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry); + timer_eval.stop(); + + let timer_sat_proof = Timer::new("verify_sat_proof"); + assert_eq!(input.assignment.len(), inst.inst.get_num_inputs()); + let (rx, ry) = self.r1cs_sat_proof.verify( + inst.inst.get_num_vars(), + inst.inst.get_num_cons(), + &input.assignment, + &inst_evals, + transcript, + &gens.gens_r1cs_sat, + )?; + + // verify if claimed rx and ry are correct + assert_eq!(rx, *claimed_rx); + assert_eq!(ry, *claimed_ry); + timer_sat_proof.stop(); + timer_verify.stop(); + + Ok(()) + } +} + +/// Intermediate values for the NIZKRand protocol +pub struct NIZKRandInter { + // input: InputsAssignment, + input: Vec, + wit: Vec, + poly_vars_vec: Vec, + comm_vars_vec: Vec, + blinds_vars_vec: Vec, + random_tape: RandomTape, +} + +impl NIZKRandInter { + /// Constructs a new `NIZKRandInter` from a vector + pub fn new(input: &InputsAssignment) -> NIZKRandInter { + NIZKRandInter { + input: input.assignment.clone(), + wit: Vec::new(), + poly_vars_vec: Vec::new(), + comm_vars_vec: Vec::new(), + blinds_vars_vec: Vec::new(), + random_tape: RandomTape::new(b"proof"), + } + } +} + +/// `NIZKRand` holds a proof produced by Spartan NIZK supporting verifier randomness +#[derive(Serialize, Deserialize, Debug)] +pub struct NIZKRand { + r1cs_sat_proof: IR1CSProof, + r: (Vec, Vec), +} + +impl NIZKRand { + fn protocol_name() -> &'static [u8] { + b"Spartan NIZK proof with verifier randomness" + } + + pub fn prove_00( + inst: &Instance, + input: &InputsAssignment, + gens: &NIZKRandGens, + transcript: &mut Transcript, + ) { + let timer_prove = Timer::new("NIZKRand::prove00"); + assert!(gens.wit_len.len() == gens.pubinp_len.len()); + // we currently require the number of |inputs| + 1 to be at most number of vars + assert!(gens.pubinp_len.iter().sum::() < gens.wit_len.iter().sum::()); + // we create a Transcript object seeded with a random Scalar + // to aid the prover produce its randomness + // let mut random_tape = RandomTape::new(b"proof"); + + transcript.append_protocol_name(NIZKRand::protocol_name()); + transcript.append_message(b"R1CSInstanceDigest", &inst.digest); + + // let padded_vars = { + // let num_padded_vars = inst.inst.get_num_vars(); + // let num_vars = vars.assignment.len(); + // if num_padded_vars > num_vars { + // vars.pad(num_padded_vars) + // } else { + // vars + // } + // }; + + IR1CSProof::prove_00( + &input.assignment, + transcript + ); + timer_prove.stop(); + } + + pub fn prove_01( + inst: &Instance, + vars: &VarsAssignment, + rand_len: usize, + intermediate: &mut NIZKRandInter, + gens: &NIZKRandGens, + transcript: &mut Transcript, + ) -> Vec { + let timer_prove = Timer::new("NIZKRand::prove01"); + let mut padded_wit = vec![Scalar::zero(); inst.inst.get_num_vars()]; + padded_wit[intermediate.wit.len()..intermediate.wit.len() + vars.assignment.len()].copy_from_slice(&vars.assignment); + let verifier_rand: Vec = IR1CSProof::prove_01( + // &vars.assignment, + &padded_wit, + rand_len, + &mut intermediate.poly_vars_vec, + &mut intermediate.comm_vars_vec, + &mut intermediate.blinds_vars_vec, + &gens.gens_r1cs_sat, + transcript, + &mut intermediate.random_tape, + ); + intermediate.wit.extend(vars.assignment.clone()); + intermediate.input.extend(verifier_rand.clone()); + timer_prove.stop(); + verifier_rand + } + + pub fn prove_1( + inst: &Instance, + vars: &VarsAssignment, + intermediate: &mut NIZKRandInter, + gens: &NIZKRandGens, + transcript: &mut Transcript, + ) -> Self { + let timer_prove = Timer::new("NIZKRand::prove1"); + + let (r1cs_sat_proof, rx, ry) = { + // we might need to pad variables + // let mut padded_wit = vec![Scalar::zero(); inst.inst.get_num_vars()]; + // padded_wit[intermediate.wit.len()..intermediate.wit.len() + vars.assignment.len()].copy_from_slice(&vars.assignment); + + let (proof, rx, ry) = IR1CSProof::prove_1( + &inst.inst, + // padded_wit, + &vars.assignment, + &intermediate.wit, + &intermediate.input, + &mut intermediate.poly_vars_vec, + &mut intermediate.comm_vars_vec, + &mut intermediate.blinds_vars_vec, + &gens.gens_r1cs_sat, + transcript, + &mut intermediate.random_tape, + ); + // let proof_encoded: Vec = bincode::serialize(&proof).unwrap(); + // Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); + (proof, rx, ry) + }; + + #[cfg(feature = "bench")] + { + let proof_encoded: Vec = bincode::serialize(&r1cs_sat_proof).unwrap(); + Timer::print(&format!("len_r1cs_sat_proof {:?}", proof_encoded.len())); + } + + timer_prove.stop(); + NIZKRand { + r1cs_sat_proof, + r: (rx, ry), + } + } + + /// A method to verify a NIZKRand proof of the satisfiability of an R1CS instance + pub fn verify( + &self, + inst: &Instance, + input: &mut InputsAssignment, + transcript: &mut Transcript, + gens: &NIZKRandGens, + ) -> Result<(), ProofVerifyError> { + let timer_verify = Timer::new("NIZK::verify"); + + transcript.append_protocol_name(NIZKRand::protocol_name()); + transcript.append_message(b"R1CSInstanceDigest", &inst.digest); + + // We send evaluations of A, B, C at r = (rx, ry) as claims + // to enable the verifier complete the first sum-check + let timer_eval = Timer::new("eval_sparse_polys"); + let (claimed_rx, claimed_ry) = &self.r; + let inst_evals = inst.inst.evaluate(claimed_rx, claimed_ry); + timer_eval.stop(); + + let timer_sat_proof = Timer::new("verify_sat_proof"); + assert_eq!(input.assignment.len(), gens.pubinp_len[0], "input len: {}, pubinp_len: {}", input.assignment.len(), gens.pubinp_len[0]); + assert_eq!(gens.pubinp_len.iter().sum::(), // for debug only + inst.inst.get_num_inputs(), + "pubinp_len: {}, num_inputs: {}", gens.pubinp_len.iter().sum::(), inst.inst.get_num_inputs() + ); + let (rx, ry) = self.r1cs_sat_proof.verify( + inst.inst.get_num_vars(), + inst.inst.get_num_cons(), + &gens.pubinp_len, + &mut input.assignment, + &inst_evals, + transcript, + &gens.gens_r1cs_sat, + )?; + + // verify if claimed rx and ry are correct + assert_eq!(rx, *claimed_rx); + assert_eq!(ry, *claimed_ry); + timer_sat_proof.stop(); + timer_verify.stop(); + + Ok(()) + } + +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + pub fn check_snark() { + let num_vars = 256; + let num_cons = num_vars; + let num_inputs = 10; + + // produce public generators + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_cons); + + // produce a synthetic R1CSInstance + let (inst, vars, inputs) = Instance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + // create a commitment to R1CSInstance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a proof + let mut prover_transcript = Transcript::new(b"example"); + let proof = SNARK::prove( + &inst, + &comm, + &decomm, + vars, + &inputs, + &gens, + &mut prover_transcript, + ); + + // verify the proof + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(&comm, &inputs, &mut verifier_transcript, &gens) + .is_ok()); + } + + #[test] + pub fn check_r1cs_invalid_index() { + let num_cons = 4; + let num_vars = 8; + let num_inputs = 1; + + let zero: [u8; 32] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ]; + + let A = vec![(0, 0, zero)]; + let B = vec![(100, 1, zero)]; + let C = vec![(1, 1, zero)]; + + let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C); + assert!(inst.is_err()); + assert_eq!(inst.err(), Some(R1CSError::InvalidIndex)); + } + + #[test] + pub fn check_r1cs_invalid_scalar() { + let num_cons = 4; + let num_vars = 8; + let num_inputs = 1; + + let zero: [u8; 32] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ]; + + let larger_than_mod = [ + 3, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, + 57, 51, 72, 125, 157, 41, 83, 167, 237, 115, + ]; + + let A = vec![(0, 0, zero)]; + let B = vec![(1, 1, larger_than_mod)]; + let C = vec![(1, 1, zero)]; + + let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C); + assert!(inst.is_err()); + assert_eq!(inst.err(), Some(R1CSError::InvalidScalar)); + } + + #[test] + fn test_padded_constraints() { + // parameters of the R1CS instance + let num_cons = 1; + let num_vars = 0; + let num_inputs = 3; + let num_non_zero_entries = 3; + + // We will encode the above constraints into three matrices, where + // the coefficients in the matrix are in the little-endian byte order + let mut A: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut B: Vec<(usize, usize, [u8; 32])> = Vec::new(); + let mut C: Vec<(usize, usize, [u8; 32])> = Vec::new(); + + // Create a^2 + b + 13 + A.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a + B.push((0, num_vars + 2, Scalar::one().to_bytes())); // 1*a + C.push((0, num_vars + 1, Scalar::one().to_bytes())); // 1*z + C.push((0, num_vars, (-Scalar::from(13u64)).to_bytes())); // -13*1 + C.push((0, num_vars + 3, (-Scalar::one()).to_bytes())); // -1*b + + // Var Assignments (Z_0 = 16 is the only output) + let vars = vec![Scalar::zero().to_bytes(); num_vars]; + + // create an InputsAssignment (a = 1, b = 2) + let mut inputs = vec![Scalar::zero().to_bytes(); num_inputs]; + inputs[0] = Scalar::from(16u64).to_bytes(); + inputs[1] = Scalar::from(1u64).to_bytes(); + inputs[2] = Scalar::from(2u64).to_bytes(); + + let assignment_inputs = InputsAssignment::new(&inputs).unwrap(); + let assignment_vars = VarsAssignment::new(&vars).unwrap(); + + // Check if instance is satisfiable + let inst = Instance::new(num_cons, num_vars, num_inputs, &A, &B, &C).unwrap(); + let res = inst.is_sat(&assignment_vars, &assignment_inputs); + assert!(res.unwrap(), "should be satisfied"); + + // SNARK public params + let gens = SNARKGens::new(num_cons, num_vars, num_inputs, num_non_zero_entries); + + // create a commitment to the R1CS instance + let (comm, decomm) = SNARK::encode(&inst, &gens); + + // produce a SNARK + let mut prover_transcript = Transcript::new(b"snark_example"); + let proof = SNARK::prove( + &inst, + &comm, + &decomm, + assignment_vars.clone(), + &assignment_inputs, + &gens, + &mut prover_transcript, + ); + + // verify the SNARK + let mut verifier_transcript = Transcript::new(b"snark_example"); + assert!(proof + .verify(&comm, &assignment_inputs, &mut verifier_transcript, &gens) + .is_ok()); + + // NIZK public params + let gens = NIZKGens::new(num_cons, num_vars, num_inputs); + + // produce a NIZK + let mut prover_transcript = Transcript::new(b"nizk_example"); + let proof = NIZK::prove( + &inst, + assignment_vars, + &assignment_inputs, + &gens, + &mut prover_transcript, + ); + + // verify the NIZK + let mut verifier_transcript = Transcript::new(b"nizk_example"); + assert!(proof + .verify(&inst, &assignment_inputs, &mut verifier_transcript, &gens) + .is_ok()); + } +} diff --git a/third_party/Dorian/src/math.rs b/third_party/Dorian/src/math.rs new file mode 100644 index 000000000..33e9e14d1 --- /dev/null +++ b/third_party/Dorian/src/math.rs @@ -0,0 +1,36 @@ +pub trait Math { + fn square_root(self) -> usize; + fn pow2(self) -> usize; + fn get_bits(self, num_bits: usize) -> Vec; + fn log_2(self) -> usize; +} + +impl Math for usize { + #[inline] + fn square_root(self) -> usize { + (self as f64).sqrt() as usize + } + + #[inline] + fn pow2(self) -> usize { + let base: usize = 2; + base.pow(self as u32) + } + + /// Returns the num_bits from n in a canonical order + fn get_bits(self, num_bits: usize) -> Vec { + (0..num_bits) + .map(|shift_amount| ((self & (1 << (num_bits - shift_amount - 1))) > 0)) + .collect::>() + } + + fn log_2(self) -> usize { + assert_ne!(self, 0); + + if self.is_power_of_two() { + (1usize.leading_zeros() - self.leading_zeros()) as usize + } else { + (0usize.leading_zeros() - self.leading_zeros()) as usize + } + } +} diff --git a/third_party/Dorian/src/nizk/bullet.rs b/third_party/Dorian/src/nizk/bullet.rs new file mode 100644 index 000000000..583903f8e --- /dev/null +++ b/third_party/Dorian/src/nizk/bullet.rs @@ -0,0 +1,243 @@ +//! This module is an adaptation of code from the bulletproofs crate. +//! See NOTICE.md for more details +#![allow(non_snake_case)] +#![allow(clippy::type_complexity)] +#![allow(clippy::too_many_arguments)] +use super::super::errors::ProofVerifyError; +use super::super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; +use super::super::math::Math; +use super::super::scalar::Scalar; +use super::super::transcript::ProofTranscript; +use core::iter; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct BulletReductionProof { + L_vec: Vec, + R_vec: Vec, +} + +impl BulletReductionProof { + /// Create an inner-product proof. + /// + /// The proof is created with respect to the bases \\(G\\). + /// + /// The `transcript` is passed in as a parameter so that the + /// challenges depend on the *entire* transcript (including parent + /// protocols). + /// + /// The lengths of the vectors must all be the same, and must all be + /// either 0 or a power of 2. + pub fn prove( + transcript: &mut Transcript, + Q: &GroupElement, + G_vec: &[GroupElement], + H: &GroupElement, + a_vec: &[Scalar], + b_vec: &[Scalar], + blind: &Scalar, + blinds_vec: &[(Scalar, Scalar)], + ) -> ( + BulletReductionProof, + GroupElement, + Scalar, + Scalar, + GroupElement, + Scalar, + ) { + // Create slices G, H, a, b backed by their respective + // vectors. This lets us reslice as we compress the lengths + // of the vectors in the main loop below. + let mut G = &mut G_vec.to_owned()[..]; + let mut a = &mut a_vec.to_owned()[..]; + let mut b = &mut b_vec.to_owned()[..]; + + // All of the input vectors must have a length that is a power of two. + let mut n = G.len(); + assert!(n.is_power_of_two()); + let lg_n = n.log_2(); + + // All of the input vectors must have the same length. + assert_eq!(G.len(), n); + assert_eq!(a.len(), n); + assert_eq!(b.len(), n); + assert_eq!(blinds_vec.len(), 2 * lg_n); + + let mut L_vec = Vec::with_capacity(lg_n); + let mut R_vec = Vec::with_capacity(lg_n); + let mut blinds_iter = blinds_vec.iter(); + let mut blind_fin = *blind; + + while n != 1 { + n /= 2; + let (a_L, a_R) = a.split_at_mut(n); + let (b_L, b_R) = b.split_at_mut(n); + let (G_L, G_R) = G.split_at_mut(n); + + let c_L = inner_product(a_L, b_R); + let c_R = inner_product(a_R, b_L); + + let (blind_L, blind_R) = blinds_iter.next().unwrap(); + + let L = GroupElement::vartime_multiscalar_mul( + a_L + .iter() + .chain(iter::once(&c_L)) + .chain(iter::once(blind_L)), + G_R.iter().chain(iter::once(Q)).chain(iter::once(H)), + ); + + let R = GroupElement::vartime_multiscalar_mul( + a_R + .iter() + .chain(iter::once(&c_R)) + .chain(iter::once(blind_R)), + G_L.iter().chain(iter::once(Q)).chain(iter::once(H)), + ); + + transcript.append_point(b"L", &L.compress()); + transcript.append_point(b"R", &R.compress()); + + let u = transcript.challenge_scalar(b"u"); + let u_inv = u.invert().unwrap(); + + for i in 0..n { + a_L[i] = a_L[i] * u + u_inv * a_R[i]; + b_L[i] = b_L[i] * u_inv + u * b_R[i]; + G_L[i] = GroupElement::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); + } + + blind_fin = blind_fin + blind_L * u * u + blind_R * u_inv * u_inv; + + L_vec.push(L.compress()); + R_vec.push(R.compress()); + + a = a_L; + b = b_L; + G = G_L; + } + + let Gamma_hat = + GroupElement::vartime_multiscalar_mul(&[a[0], a[0] * b[0], blind_fin], &[G[0], *Q, *H]); + + ( + BulletReductionProof { L_vec, R_vec }, + Gamma_hat, + a[0], + b[0], + G[0], + blind_fin, + ) + } + + /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication + /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. + /// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof. + fn verification_scalars( + &self, + n: usize, + transcript: &mut Transcript, + ) -> Result<(Vec, Vec, Vec), ProofVerifyError> { + let lg_n = self.L_vec.len(); + if lg_n >= 32 { + // 4 billion multiplications should be enough for anyone + // and this check prevents overflow in 1< Result<(GroupElement, GroupElement, Scalar), ProofVerifyError> { + let (u_sq, u_inv_sq, s) = self.verification_scalars(n, transcript)?; + + let Ls = self + .L_vec + .iter() + .map(|p| p.decompress().ok_or(ProofVerifyError::InternalError)) + .collect::, _>>()?; + + let Rs = self + .R_vec + .iter() + .map(|p| p.decompress().ok_or(ProofVerifyError::InternalError)) + .collect::, _>>()?; + + let G_hat = GroupElement::vartime_multiscalar_mul(s.iter(), G.iter()); + let a_hat = inner_product(a, &s); + + let Gamma_hat = GroupElement::vartime_multiscalar_mul( + u_sq + .iter() + .chain(u_inv_sq.iter()) + .chain(iter::once(&Scalar::one())), + Ls.iter().chain(Rs.iter()).chain(iter::once(Gamma)), + ); + + Ok((G_hat, Gamma_hat, a_hat)) + } +} + +/// Computes an inner product of two vectors +/// \\[ +/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. +/// \\] +/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { + assert!( + a.len() == b.len(), + "inner_product(a,b): lengths of vectors do not match" + ); + let mut out = Scalar::zero(); + for i in 0..a.len() { + out += a[i] * b[i]; + } + out +} diff --git a/third_party/Dorian/src/nizk/mod.rs b/third_party/Dorian/src/nizk/mod.rs new file mode 100644 index 000000000..2d3749317 --- /dev/null +++ b/third_party/Dorian/src/nizk/mod.rs @@ -0,0 +1,735 @@ +#![allow(clippy::too_many_arguments)] +use super::commitments::{Commitments, MultiCommitGens}; +use super::errors::ProofVerifyError; +use super::group::{CompressedGroup, CompressedGroupExt}; +use super::math::Math; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +mod bullet; +use bullet::BulletReductionProof; + +#[derive(Serialize, Deserialize, Debug)] +pub struct KnowledgeProof { + alpha: CompressedGroup, + z1: Scalar, + z2: Scalar, +} + +impl KnowledgeProof { + fn protocol_name() -> &'static [u8] { + b"knowledge proof" + } + + pub fn prove( + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + x: &Scalar, + r: &Scalar, + ) -> (KnowledgeProof, CompressedGroup) { + transcript.append_protocol_name(KnowledgeProof::protocol_name()); + + // produce two random Scalars + let t1 = random_tape.random_scalar(b"t1"); + let t2 = random_tape.random_scalar(b"t2"); + + let C = x.commit(r, gens_n).compress(); + C.append_to_transcript(b"C", transcript); + + let alpha = t1.commit(&t2, gens_n).compress(); + alpha.append_to_transcript(b"alpha", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let z1 = x * c + t1; + let z2 = r * c + t2; + + (KnowledgeProof { alpha, z1, z2 }, C) + } + + pub fn verify( + &self, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + C: &CompressedGroup, + ) -> Result<(), ProofVerifyError> { + transcript.append_protocol_name(KnowledgeProof::protocol_name()); + C.append_to_transcript(b"C", transcript); + self.alpha.append_to_transcript(b"alpha", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let lhs = self.z1.commit(&self.z2, gens_n).compress(); + let rhs = (c * C.unpack()? + self.alpha.unpack()?).compress(); + + if lhs == rhs { + Ok(()) + } else { + Err(ProofVerifyError::InternalError) + } + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct EqualityProof { + alpha: CompressedGroup, + z: Scalar, +} + +impl EqualityProof { + fn protocol_name() -> &'static [u8] { + b"equality proof" + } + + pub fn prove( + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + v1: &Scalar, + s1: &Scalar, + v2: &Scalar, + s2: &Scalar, + ) -> (EqualityProof, CompressedGroup, CompressedGroup) { + transcript.append_protocol_name(EqualityProof::protocol_name()); + + // produce a random Scalar + let r = random_tape.random_scalar(b"r"); + + let C1 = v1.commit(s1, gens_n).compress(); + C1.append_to_transcript(b"C1", transcript); + + let C2 = v2.commit(s2, gens_n).compress(); + C2.append_to_transcript(b"C2", transcript); + + let alpha = (r * gens_n.h).compress(); + alpha.append_to_transcript(b"alpha", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let z = c * (s1 - s2) + r; + + (EqualityProof { alpha, z }, C1, C2) + } + + pub fn verify( + &self, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + C1: &CompressedGroup, + C2: &CompressedGroup, + ) -> Result<(), ProofVerifyError> { + transcript.append_protocol_name(EqualityProof::protocol_name()); + C1.append_to_transcript(b"C1", transcript); + C2.append_to_transcript(b"C2", transcript); + self.alpha.append_to_transcript(b"alpha", transcript); + + let c = transcript.challenge_scalar(b"c"); + let rhs = { + let C = C1.unpack()? - C2.unpack()?; + (c * C + self.alpha.unpack()?).compress() + }; + + let lhs = (self.z * gens_n.h).compress(); + + if lhs == rhs { + Ok(()) + } else { + Err(ProofVerifyError::InternalError) + } + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct ProductProof { + alpha: CompressedGroup, + beta: CompressedGroup, + delta: CompressedGroup, + z: [Scalar; 5], +} + +impl ProductProof { + fn protocol_name() -> &'static [u8] { + b"product proof" + } + + pub fn prove( + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + x: &Scalar, + rX: &Scalar, + y: &Scalar, + rY: &Scalar, + z: &Scalar, + rZ: &Scalar, + ) -> ( + ProductProof, + CompressedGroup, + CompressedGroup, + CompressedGroup, + ) { + transcript.append_protocol_name(ProductProof::protocol_name()); + + // produce five random Scalar + let b1 = random_tape.random_scalar(b"b1"); + let b2 = random_tape.random_scalar(b"b2"); + let b3 = random_tape.random_scalar(b"b3"); + let b4 = random_tape.random_scalar(b"b4"); + let b5 = random_tape.random_scalar(b"b5"); + + let X = x.commit(rX, gens_n).compress(); + X.append_to_transcript(b"X", transcript); + + let Y = y.commit(rY, gens_n).compress(); + Y.append_to_transcript(b"Y", transcript); + + let Z = z.commit(rZ, gens_n).compress(); + Z.append_to_transcript(b"Z", transcript); + + let alpha = b1.commit(&b2, gens_n).compress(); + alpha.append_to_transcript(b"alpha", transcript); + + let beta = b3.commit(&b4, gens_n).compress(); + beta.append_to_transcript(b"beta", transcript); + + let delta = { + let gens_X = &MultiCommitGens { + n: 1, + G: vec![X.decompress().unwrap()], + h: gens_n.h, + }; + b3.commit(&b5, gens_X).compress() + }; + delta.append_to_transcript(b"delta", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let z1 = b1 + c * x; + let z2 = b2 + c * rX; + let z3 = b3 + c * y; + let z4 = b4 + c * rY; + let z5 = b5 + c * (rZ - rX * y); + let z = [z1, z2, z3, z4, z5]; + + ( + ProductProof { + alpha, + beta, + delta, + z, + }, + X, + Y, + Z, + ) + } + + fn check_equality( + P: &CompressedGroup, + X: &CompressedGroup, + c: &Scalar, + gens_n: &MultiCommitGens, + z1: &Scalar, + z2: &Scalar, + ) -> bool { + let lhs = (P.decompress().unwrap() + c * X.decompress().unwrap()).compress(); + let rhs = z1.commit(z2, gens_n).compress(); + + lhs == rhs + } + + pub fn verify( + &self, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + X: &CompressedGroup, + Y: &CompressedGroup, + Z: &CompressedGroup, + ) -> Result<(), ProofVerifyError> { + transcript.append_protocol_name(ProductProof::protocol_name()); + + X.append_to_transcript(b"X", transcript); + Y.append_to_transcript(b"Y", transcript); + Z.append_to_transcript(b"Z", transcript); + self.alpha.append_to_transcript(b"alpha", transcript); + self.beta.append_to_transcript(b"beta", transcript); + self.delta.append_to_transcript(b"delta", transcript); + + let z1 = self.z[0]; + let z2 = self.z[1]; + let z3 = self.z[2]; + let z4 = self.z[3]; + let z5 = self.z[4]; + + let c = transcript.challenge_scalar(b"c"); + + if ProductProof::check_equality(&self.alpha, X, &c, gens_n, &z1, &z2) + && ProductProof::check_equality(&self.beta, Y, &c, gens_n, &z3, &z4) + && ProductProof::check_equality( + &self.delta, + Z, + &c, + &MultiCommitGens { + n: 1, + G: vec![X.unpack()?], + h: gens_n.h, + }, + &z3, + &z5, + ) + { + Ok(()) + } else { + Err(ProofVerifyError::InternalError) + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct DotProductProof { + delta: CompressedGroup, + beta: CompressedGroup, + z: Vec, + z_delta: Scalar, + z_beta: Scalar, +} + +impl DotProductProof { + fn protocol_name() -> &'static [u8] { + b"dot product proof" + } + + pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar { + assert_eq!(a.len(), b.len()); + (0..a.len()).map(|i| a[i] * b[i]).sum() + } + + pub fn prove( + gens_1: &MultiCommitGens, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + x_vec: &[Scalar], + blind_x: &Scalar, + a_vec: &[Scalar], + y: &Scalar, + blind_y: &Scalar, + ) -> (DotProductProof, CompressedGroup, CompressedGroup) { + transcript.append_protocol_name(DotProductProof::protocol_name()); + + let n = x_vec.len(); + assert_eq!(x_vec.len(), a_vec.len()); + assert_eq!(gens_n.n, a_vec.len()); + assert_eq!(gens_1.n, 1); + + // produce randomness for the proofs + let d_vec = random_tape.random_vector(b"d_vec", n); + let r_delta = random_tape.random_scalar(b"r_delta"); + let r_beta = random_tape.random_scalar(b"r_beta"); + + let Cx = x_vec.commit(blind_x, gens_n).compress(); + Cx.append_to_transcript(b"Cx", transcript); + + let Cy = y.commit(blind_y, gens_1).compress(); + Cy.append_to_transcript(b"Cy", transcript); + + a_vec.append_to_transcript(b"a", transcript); + + let delta = d_vec.commit(&r_delta, gens_n).compress(); + delta.append_to_transcript(b"delta", transcript); + + let dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec); + + let beta = dotproduct_a_d.commit(&r_beta, gens_1).compress(); + beta.append_to_transcript(b"beta", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let z = (0..d_vec.len()) + .map(|i| c * x_vec[i] + d_vec[i]) + .collect::>(); + + let z_delta = c * blind_x + r_delta; + let z_beta = c * blind_y + r_beta; + + ( + DotProductProof { + delta, + beta, + z, + z_delta, + z_beta, + }, + Cx, + Cy, + ) + } + + pub fn verify( + &self, + gens_1: &MultiCommitGens, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + a: &[Scalar], + Cx: &CompressedGroup, + Cy: &CompressedGroup, + ) -> Result<(), ProofVerifyError> { + assert_eq!(gens_n.n, a.len()); + assert_eq!(gens_1.n, 1); + + transcript.append_protocol_name(DotProductProof::protocol_name()); + Cx.append_to_transcript(b"Cx", transcript); + Cy.append_to_transcript(b"Cy", transcript); + a.append_to_transcript(b"a", transcript); + self.delta.append_to_transcript(b"delta", transcript); + self.beta.append_to_transcript(b"beta", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let mut result = + c * Cx.unpack()? + self.delta.unpack()? == self.z.commit(&self.z_delta, gens_n); + + let dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a); + result &= c * Cy.unpack()? + self.beta.unpack()? == dotproduct_z_a.commit(&self.z_beta, gens_1); + + if result { + Ok(()) + } else { + Err(ProofVerifyError::InternalError) + } + } +} + +#[derive(Serialize, Deserialize)] // to test +pub struct DotProductProofGens { + n: usize, + pub gens_n: MultiCommitGens, + pub gens_1: MultiCommitGens, +} + +impl DotProductProofGens { + pub fn new(n: usize, label: &[u8]) -> Self { + let (gens_n, gens_1) = MultiCommitGens::new(n + 1, label).split_at(n); + DotProductProofGens { n, gens_n, gens_1 } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct DotProductProofLog { + bullet_reduction_proof: BulletReductionProof, + delta: CompressedGroup, + beta: CompressedGroup, + z1: Scalar, + z2: Scalar, +} + +impl DotProductProofLog { + fn protocol_name() -> &'static [u8] { + b"dot product proof (log)" + } + + pub fn compute_dotproduct(a: &[Scalar], b: &[Scalar]) -> Scalar { + assert_eq!(a.len(), b.len()); + (0..a.len()).map(|i| a[i] * b[i]).sum() + } + + pub fn prove( + gens: &DotProductProofGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + x_vec: &[Scalar], + blind_x: &Scalar, + a_vec: &[Scalar], + y: &Scalar, + blind_y: &Scalar, + ) -> (DotProductProofLog, CompressedGroup, CompressedGroup) { + transcript.append_protocol_name(DotProductProofLog::protocol_name()); + + let n = x_vec.len(); + assert_eq!(x_vec.len(), a_vec.len()); + assert_eq!(gens.n, n); + + // produce randomness for generating a proof + let d = random_tape.random_scalar(b"d"); + let r_delta = random_tape.random_scalar(b"r_delta"); + let r_beta = random_tape.random_scalar(b"r_delta"); + let blinds_vec = { + let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2()); + let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2()); + (0..v1.len()) + .map(|i| (v1[i], v2[i])) + .collect::>() + }; + + let Cx = x_vec.commit(blind_x, &gens.gens_n).compress(); + Cx.append_to_transcript(b"Cx", transcript); + + let Cy = y.commit(blind_y, &gens.gens_1).compress(); + Cy.append_to_transcript(b"Cy", transcript); + + a_vec.append_to_transcript(b"a", transcript); + + // sample a random base and scale the generator used for + // the output of the inner product + let r = transcript.challenge_scalar(b"r"); + let gens_1_scaled = gens.gens_1.scale(&r); + + let blind_Gamma = blind_x + r * blind_y; + let (bullet_reduction_proof, _Gamma_hat, x_hat, a_hat, g_hat, rhat_Gamma) = + BulletReductionProof::prove( + transcript, + &gens_1_scaled.G[0], + &gens.gens_n.G, + &gens.gens_n.h, + x_vec, + a_vec, + &blind_Gamma, + &blinds_vec, + ); + let y_hat = x_hat * a_hat; + + let delta = { + let gens_hat = MultiCommitGens { + n: 1, + G: vec![g_hat], + h: gens.gens_1.h, + }; + d.commit(&r_delta, &gens_hat).compress() + }; + delta.append_to_transcript(b"delta", transcript); + + let beta = d.commit(&r_beta, &gens_1_scaled).compress(); + beta.append_to_transcript(b"beta", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let z1 = d + c * y_hat; + let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta; + + ( + DotProductProofLog { + bullet_reduction_proof, + delta, + beta, + z1, + z2, + }, + Cx, + Cy, + ) + } + + pub fn verify( + &self, + n: usize, + gens: &DotProductProofGens, + transcript: &mut Transcript, + a: &[Scalar], + Cx: &CompressedGroup, + Cy: &CompressedGroup, + ) -> Result<(), ProofVerifyError> { + assert_eq!(gens.n, n); + assert_eq!(a.len(), n); + + transcript.append_protocol_name(DotProductProofLog::protocol_name()); + Cx.append_to_transcript(b"Cx", transcript); + Cy.append_to_transcript(b"Cy", transcript); + a.append_to_transcript(b"a", transcript); + + // sample a random base and scale the generator used for + // the output of the inner product + let r = transcript.challenge_scalar(b"r"); + let gens_1_scaled = gens.gens_1.scale(&r); + + let Gamma = Cx.unpack()? + r * Cy.unpack()?; + + let (g_hat, Gamma_hat, a_hat) = + self + .bullet_reduction_proof + .verify(n, a, transcript, &Gamma, &gens.gens_n.G)?; + self.delta.append_to_transcript(b"delta", transcript); + self.beta.append_to_transcript(b"beta", transcript); + + let c = transcript.challenge_scalar(b"c"); + + let c_s = &c; + let beta_s = self.beta.unpack()?; + let a_hat_s = &a_hat; + let delta_s = self.delta.unpack()?; + let z1_s = &self.z1; + let z2_s = &self.z2; + + let lhs = ((Gamma_hat * c_s + beta_s) * a_hat_s + delta_s).compress(); + let rhs = ((g_hat + gens_1_scaled.G[0] * a_hat_s) * z1_s + gens_1_scaled.h * z2_s).compress(); + + assert_eq!(lhs, rhs); + + if lhs == rhs { + Ok(()) + } else { + Err(ProofVerifyError::InternalError) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::rngs::OsRng; + #[test] + fn check_knowledgeproof() { + let mut csprng: OsRng = OsRng; + + let gens_1 = MultiCommitGens::new(1, b"test-knowledgeproof"); + + let x = Scalar::random(&mut csprng); + let r = Scalar::random(&mut csprng); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, committed_value) = + KnowledgeProof::prove(&gens_1, &mut prover_transcript, &mut random_tape, &x, &r); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(&gens_1, &mut verifier_transcript, &committed_value) + .is_ok()); + } + + #[test] + fn check_equalityproof() { + let mut csprng: OsRng = OsRng; + + let gens_1 = MultiCommitGens::new(1, b"test-equalityproof"); + let v1 = Scalar::random(&mut csprng); + let v2 = v1; + let s1 = Scalar::random(&mut csprng); + let s2 = Scalar::random(&mut csprng); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, C1, C2) = EqualityProof::prove( + &gens_1, + &mut prover_transcript, + &mut random_tape, + &v1, + &s1, + &v2, + &s2, + ); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(&gens_1, &mut verifier_transcript, &C1, &C2) + .is_ok()); + } + + #[test] + fn check_productproof() { + let mut csprng: OsRng = OsRng; + + let gens_1 = MultiCommitGens::new(1, b"test-productproof"); + let x = Scalar::random(&mut csprng); + let rX = Scalar::random(&mut csprng); + let y = Scalar::random(&mut csprng); + let rY = Scalar::random(&mut csprng); + let z = x * y; + let rZ = Scalar::random(&mut csprng); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, X, Y, Z) = ProductProof::prove( + &gens_1, + &mut prover_transcript, + &mut random_tape, + &x, + &rX, + &y, + &rY, + &z, + &rZ, + ); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(&gens_1, &mut verifier_transcript, &X, &Y, &Z) + .is_ok()); + } + + #[test] + fn check_dotproductproof() { + let mut csprng: OsRng = OsRng; + + let n = 1024; + + let gens_1 = MultiCommitGens::new(1, b"test-two"); + let gens_1024 = MultiCommitGens::new(n, b"test-1024"); + + let mut x: Vec = Vec::new(); + let mut a: Vec = Vec::new(); + for _ in 0..n { + x.push(Scalar::random(&mut csprng)); + a.push(Scalar::random(&mut csprng)); + } + let y = DotProductProofLog::compute_dotproduct(&x, &a); + let r_x = Scalar::random(&mut csprng); + let r_y = Scalar::random(&mut csprng); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, Cx, Cy) = DotProductProof::prove( + &gens_1, + &gens_1024, + &mut prover_transcript, + &mut random_tape, + &x, + &r_x, + &a, + &y, + &r_y, + ); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(&gens_1, &gens_1024, &mut verifier_transcript, &a, &Cx, &Cy) + .is_ok()); + } + + #[test] + fn check_dotproductproof_log() { + let mut csprng: OsRng = OsRng; + + let n = 1024; + + let gens = DotProductProofGens::new(n, b"test-1024"); + + let x: Vec = (0..n).map(|_i| Scalar::random(&mut csprng)).collect(); + let a: Vec = (0..n).map(|_i| Scalar::random(&mut csprng)).collect(); + let y = DotProductProof::compute_dotproduct(&x, &a); + + let r_x = Scalar::random(&mut csprng); + let r_y = Scalar::random(&mut csprng); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, Cx, Cy) = DotProductProofLog::prove( + &gens, + &mut prover_transcript, + &mut random_tape, + &x, + &r_x, + &a, + &y, + &r_y, + ); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify(n, &gens, &mut verifier_transcript, &a, &Cx, &Cy) + .is_ok()); + } +} diff --git a/third_party/Dorian/src/product_tree.rs b/third_party/Dorian/src/product_tree.rs new file mode 100644 index 000000000..6e2f93276 --- /dev/null +++ b/third_party/Dorian/src/product_tree.rs @@ -0,0 +1,486 @@ +#![allow(dead_code)] +use super::dense_mlpoly::DensePolynomial; +use super::dense_mlpoly::EqPolynomial; +use super::math::Math; +use super::scalar::Scalar; +use super::sumcheck::SumcheckInstanceProof; +use super::transcript::ProofTranscript; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +#[derive(Debug)] +pub struct ProductCircuit { + left_vec: Vec, + right_vec: Vec, +} + +impl ProductCircuit { + fn compute_layer( + inp_left: &DensePolynomial, + inp_right: &DensePolynomial, + ) -> (DensePolynomial, DensePolynomial) { + let len = inp_left.len() + inp_right.len(); + let outp_left = (0..len / 4) + .map(|i| inp_left[i] * inp_right[i]) + .collect::>(); + let outp_right = (len / 4..len / 2) + .map(|i| inp_left[i] * inp_right[i]) + .collect::>(); + + ( + DensePolynomial::new(outp_left), + DensePolynomial::new(outp_right), + ) + } + + pub fn new(poly: &DensePolynomial) -> Self { + let mut left_vec: Vec = Vec::new(); + let mut right_vec: Vec = Vec::new(); + + let num_layers = poly.len().log_2(); + let (outp_left, outp_right) = poly.split(poly.len() / 2); + + left_vec.push(outp_left); + right_vec.push(outp_right); + + for i in 0..num_layers - 1 { + let (outp_left, outp_right) = ProductCircuit::compute_layer(&left_vec[i], &right_vec[i]); + left_vec.push(outp_left); + right_vec.push(outp_right); + } + + ProductCircuit { + left_vec, + right_vec, + } + } + + pub fn evaluate(&self) -> Scalar { + let len = self.left_vec.len(); + assert_eq!(self.left_vec[len - 1].get_num_vars(), 0); + assert_eq!(self.right_vec[len - 1].get_num_vars(), 0); + self.left_vec[len - 1][0] * self.right_vec[len - 1][0] + } +} + +pub struct DotProductCircuit { + left: DensePolynomial, + right: DensePolynomial, + weight: DensePolynomial, +} + +impl DotProductCircuit { + pub fn new(left: DensePolynomial, right: DensePolynomial, weight: DensePolynomial) -> Self { + assert_eq!(left.len(), right.len()); + assert_eq!(left.len(), weight.len()); + DotProductCircuit { + left, + right, + weight, + } + } + + pub fn evaluate(&self) -> Scalar { + (0..self.left.len()) + .map(|i| self.left[i] * self.right[i] * self.weight[i]) + .sum() + } + + pub fn split(&mut self) -> (DotProductCircuit, DotProductCircuit) { + let idx = self.left.len() / 2; + assert_eq!(idx * 2, self.left.len()); + let (l1, l2) = self.left.split(idx); + let (r1, r2) = self.right.split(idx); + let (w1, w2) = self.weight.split(idx); + ( + DotProductCircuit { + left: l1, + right: r1, + weight: w1, + }, + DotProductCircuit { + left: l2, + right: r2, + weight: w2, + }, + ) + } +} + +#[allow(dead_code)] +#[derive(Debug, Serialize, Deserialize)] +pub struct LayerProof { + pub proof: SumcheckInstanceProof, + pub claims: Vec, +} + +#[allow(dead_code)] +impl LayerProof { + pub fn verify( + &self, + claim: Scalar, + num_rounds: usize, + degree_bound: usize, + transcript: &mut Transcript, + ) -> (Scalar, Vec) { + self + .proof + .verify(claim, num_rounds, degree_bound, transcript) + .unwrap() + } +} + +#[allow(dead_code)] +#[derive(Debug, Serialize, Deserialize)] +pub struct LayerProofBatched { + pub proof: SumcheckInstanceProof, + pub claims_prod_left: Vec, + pub claims_prod_right: Vec, +} + +#[allow(dead_code)] +impl LayerProofBatched { + pub fn verify( + &self, + claim: Scalar, + num_rounds: usize, + degree_bound: usize, + transcript: &mut Transcript, + ) -> (Scalar, Vec) { + self + .proof + .verify(claim, num_rounds, degree_bound, transcript) + .unwrap() + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ProductCircuitEvalProof { + proof: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ProductCircuitEvalProofBatched { + proof: Vec, + claims_dotp: (Vec, Vec, Vec), +} + +impl ProductCircuitEvalProof { + #![allow(dead_code)] + pub fn prove( + circuit: &mut ProductCircuit, + transcript: &mut Transcript, + ) -> (Self, Scalar, Vec) { + let mut proof: Vec = Vec::new(); + let num_layers = circuit.left_vec.len(); + + let mut claim = circuit.evaluate(); + let mut rand = Vec::new(); + for layer_id in (0..num_layers).rev() { + let len = circuit.left_vec[layer_id].len() + circuit.right_vec[layer_id].len(); + + let mut poly_C = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); + assert_eq!(poly_C.len(), len / 2); + + let num_rounds_prod = poly_C.len().log_2(); + let comb_func_prod = |poly_A_comp: &Scalar, + poly_B_comp: &Scalar, + poly_C_comp: &Scalar| + -> Scalar { poly_A_comp * poly_B_comp * poly_C_comp }; + let (proof_prod, rand_prod, claims_prod) = SumcheckInstanceProof::prove_cubic( + &claim, + num_rounds_prod, + &mut circuit.left_vec[layer_id], + &mut circuit.right_vec[layer_id], + &mut poly_C, + comb_func_prod, + transcript, + ); + + transcript.append_scalar(b"claim_prod_left", &claims_prod[0]); + transcript.append_scalar(b"claim_prod_right", &claims_prod[1]); + + // produce a random challenge + let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); + claim = claims_prod[0] + r_layer * (claims_prod[1] - claims_prod[0]); + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + + proof.push(LayerProof { + proof: proof_prod, + claims: claims_prod[0..claims_prod.len() - 1].to_vec(), + }); + } + + (ProductCircuitEvalProof { proof }, claim, rand) + } + + pub fn verify( + &self, + eval: Scalar, + len: usize, + transcript: &mut Transcript, + ) -> (Scalar, Vec) { + let num_layers = len.log_2(); + let mut claim = eval; + let mut rand: Vec = Vec::new(); + //let mut num_rounds = 0; + assert_eq!(self.proof.len(), num_layers); + for (num_rounds, i) in (0..num_layers).enumerate() { + let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); + + let claims_prod = &self.proof[i].claims; + transcript.append_scalar(b"claim_prod_left", &claims_prod[0]); + transcript.append_scalar(b"claim_prod_right", &claims_prod[1]); + + assert_eq!(rand.len(), rand_prod.len()); + let eq: Scalar = (0..rand.len()) + .map(|i| { + rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i]) + }) + .product(); + assert_eq!(claims_prod[0] * claims_prod[1] * eq, claim_last); + + // produce a random challenge + let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); + claim = (Scalar::one() - r_layer) * claims_prod[0] + r_layer * claims_prod[1]; + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + } + + (claim, rand) + } +} + +impl ProductCircuitEvalProofBatched { + pub fn prove( + prod_circuit_vec: &mut Vec<&mut ProductCircuit>, + dotp_circuit_vec: &mut Vec<&mut DotProductCircuit>, + transcript: &mut Transcript, + ) -> (Self, Vec) { + assert!(!prod_circuit_vec.is_empty()); + + let mut claims_dotp_final = (Vec::new(), Vec::new(), Vec::new()); + + let mut proof_layers: Vec = Vec::new(); + let num_layers = prod_circuit_vec[0].left_vec.len(); + let mut claims_to_verify = (0..prod_circuit_vec.len()) + .map(|i| prod_circuit_vec[i].evaluate()) + .collect::>(); + let mut rand = Vec::new(); + for layer_id in (0..num_layers).rev() { + // prepare paralell instance that share poly_C first + let len = prod_circuit_vec[0].left_vec[layer_id].len() + + prod_circuit_vec[0].right_vec[layer_id].len(); + + let mut poly_C_par = DensePolynomial::new(EqPolynomial::new(rand.clone()).evals()); + assert_eq!(poly_C_par.len(), len / 2); + + let num_rounds_prod = poly_C_par.len().log_2(); + let comb_func_prod = |poly_A_comp: &Scalar, + poly_B_comp: &Scalar, + poly_C_comp: &Scalar| + -> Scalar { poly_A_comp * poly_B_comp * poly_C_comp }; + + let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new(); + for prod_circuit in prod_circuit_vec.iter_mut() { + poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]); + poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id]) + } + let poly_vec_par = ( + &mut poly_A_batched_par, + &mut poly_B_batched_par, + &mut poly_C_par, + ); + + // prepare sequential instances that don't share poly_C + let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + if layer_id == 0 && !dotp_circuit_vec.is_empty() { + // add additional claims + for item in dotp_circuit_vec.iter() { + claims_to_verify.push(item.evaluate()); + assert_eq!(len / 2, item.left.len()); + assert_eq!(len / 2, item.right.len()); + assert_eq!(len / 2, item.weight.len()); + } + + for dotp_circuit in dotp_circuit_vec.iter_mut() { + poly_A_batched_seq.push(&mut dotp_circuit.left); + poly_B_batched_seq.push(&mut dotp_circuit.right); + poly_C_batched_seq.push(&mut dotp_circuit.weight); + } + } + let poly_vec_seq = ( + &mut poly_A_batched_seq, + &mut poly_B_batched_seq, + &mut poly_C_batched_seq, + ); + + // produce a fresh set of coeffs and a joint claim + let coeff_vec = + transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len()); + let claim = (0..claims_to_verify.len()) + .map(|i| claims_to_verify[i] * coeff_vec[i]) + .sum(); + + let (proof, rand_prod, claims_prod, claims_dotp) = SumcheckInstanceProof::prove_cubic_batched( + &claim, + num_rounds_prod, + poly_vec_par, + poly_vec_seq, + &coeff_vec, + comb_func_prod, + transcript, + ); + + let (claims_prod_left, claims_prod_right, _claims_eq) = claims_prod; + for i in 0..prod_circuit_vec.len() { + transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]); + transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]); + } + + if layer_id == 0 && !dotp_circuit_vec.is_empty() { + let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = claims_dotp; + for i in 0..dotp_circuit_vec.len() { + transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]); + transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]); + transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]); + } + claims_dotp_final = (claims_dotp_left, claims_dotp_right, claims_dotp_weight); + } + + // produce a random challenge to condense two claims into a single claim + let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); + + claims_to_verify = (0..prod_circuit_vec.len()) + .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) + .collect::>(); + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + + proof_layers.push(LayerProofBatched { + proof, + claims_prod_left, + claims_prod_right, + }); + } + + ( + ProductCircuitEvalProofBatched { + proof: proof_layers, + claims_dotp: claims_dotp_final, + }, + rand, + ) + } + + pub fn verify( + &self, + claims_prod_vec: &[Scalar], + claims_dotp_vec: &[Scalar], + len: usize, + transcript: &mut Transcript, + ) -> (Vec, Vec, Vec) { + let num_layers = len.log_2(); + let mut rand: Vec = Vec::new(); + //let mut num_rounds = 0; + assert_eq!(self.proof.len(), num_layers); + + let mut claims_to_verify = claims_prod_vec.to_owned(); + let mut claims_to_verify_dotp: Vec = Vec::new(); + for (num_rounds, i) in (0..num_layers).enumerate() { + if i == num_layers - 1 { + claims_to_verify.extend(claims_dotp_vec); + } + + // produce random coefficients, one for each instance + let coeff_vec = + transcript.challenge_vector(b"rand_coeffs_next_layer", claims_to_verify.len()); + + // produce a joint claim + let claim = (0..claims_to_verify.len()) + .map(|i| claims_to_verify[i] * coeff_vec[i]) + .sum(); + + let (claim_last, rand_prod) = self.proof[i].verify(claim, num_rounds, 3, transcript); + + let claims_prod_left = &self.proof[i].claims_prod_left; + let claims_prod_right = &self.proof[i].claims_prod_right; + assert_eq!(claims_prod_left.len(), claims_prod_vec.len()); + assert_eq!(claims_prod_right.len(), claims_prod_vec.len()); + + for i in 0..claims_prod_vec.len() { + transcript.append_scalar(b"claim_prod_left", &claims_prod_left[i]); + transcript.append_scalar(b"claim_prod_right", &claims_prod_right[i]); + } + + assert_eq!(rand.len(), rand_prod.len()); + let eq: Scalar = (0..rand.len()) + .map(|i| { + rand[i] * rand_prod[i] + (Scalar::one() - rand[i]) * (Scalar::one() - rand_prod[i]) + }) + .product(); + let mut claim_expected: Scalar = (0..claims_prod_vec.len()) + .map(|i| coeff_vec[i] * (claims_prod_left[i] * claims_prod_right[i] * eq)) + .sum(); + + // add claims from the dotp instances + if i == num_layers - 1 { + let num_prod_instances = claims_prod_vec.len(); + let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; + for i in 0..claims_dotp_left.len() { + transcript.append_scalar(b"claim_dotp_left", &claims_dotp_left[i]); + transcript.append_scalar(b"claim_dotp_right", &claims_dotp_right[i]); + transcript.append_scalar(b"claim_dotp_weight", &claims_dotp_weight[i]); + + claim_expected += coeff_vec[i + num_prod_instances] + * claims_dotp_left[i] + * claims_dotp_right[i] + * claims_dotp_weight[i]; + } + } + + assert_eq!(claim_expected, claim_last); + + // produce a random challenge + let r_layer = transcript.challenge_scalar(b"challenge_r_layer"); + + claims_to_verify = (0..claims_prod_left.len()) + .map(|i| claims_prod_left[i] + r_layer * (claims_prod_right[i] - claims_prod_left[i])) + .collect::>(); + + // add claims to verify for dotp circuit + if i == num_layers - 1 { + let (claims_dotp_left, claims_dotp_right, claims_dotp_weight) = &self.claims_dotp; + + for i in 0..claims_dotp_vec.len() / 2 { + // combine left claims + let claim_left = claims_dotp_left[2 * i] + + r_layer * (claims_dotp_left[2 * i + 1] - claims_dotp_left[2 * i]); + + let claim_right = claims_dotp_right[2 * i] + + r_layer * (claims_dotp_right[2 * i + 1] - claims_dotp_right[2 * i]); + + let claim_weight = claims_dotp_weight[2 * i] + + r_layer * (claims_dotp_weight[2 * i + 1] - claims_dotp_weight[2 * i]); + claims_to_verify_dotp.push(claim_left); + claims_to_verify_dotp.push(claim_right); + claims_to_verify_dotp.push(claim_weight); + } + } + + let mut ext = vec![r_layer]; + ext.extend(rand_prod); + rand = ext; + } + (claims_to_verify, claims_to_verify_dotp, rand) + } +} diff --git a/third_party/Dorian/src/r1csinstance.rs b/third_party/Dorian/src/r1csinstance.rs new file mode 100644 index 000000000..ec50f2048 --- /dev/null +++ b/third_party/Dorian/src/r1csinstance.rs @@ -0,0 +1,416 @@ +use crate::transcript::AppendToTranscript; + +use super::dense_mlpoly::DensePolynomial; +use super::errors::ProofVerifyError; +use super::math::Math; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::sparse_mlpoly::{ + MultiSparseMatPolynomialAsDense, SparseMatEntry, SparseMatPolyCommitment, + SparseMatPolyCommitmentGens, SparseMatPolyEvalProof, SparseMatPolynomial, +}; +use super::timer::Timer; +use flate2::{write::ZlibEncoder, Compression}; +use merlin::Transcript; +use rand::rngs::OsRng; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct R1CSInstance { + num_cons: usize, + num_vars: usize, + num_inputs: usize, + A: SparseMatPolynomial, + B: SparseMatPolynomial, + C: SparseMatPolynomial, +} + +pub struct R1CSCommitmentGens { + gens: SparseMatPolyCommitmentGens, +} + +impl R1CSCommitmentGens { + pub fn new( + label: &'static [u8], + num_cons: usize, + num_vars: usize, + num_inputs: usize, + num_nz_entries: usize, + ) -> R1CSCommitmentGens { + assert!(num_inputs < num_vars); + let num_poly_vars_x = num_cons.log_2(); + let num_poly_vars_y = (2 * num_vars).log_2(); + let gens = + SparseMatPolyCommitmentGens::new(label, num_poly_vars_x, num_poly_vars_y, num_nz_entries, 3); + R1CSCommitmentGens { gens } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct R1CSCommitment { + num_cons: usize, + num_vars: usize, + num_inputs: usize, + comm: SparseMatPolyCommitment, +} + +impl AppendToTranscript for R1CSCommitment { + fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) { + transcript.append_u64(b"num_cons", self.num_cons as u64); + transcript.append_u64(b"num_vars", self.num_vars as u64); + transcript.append_u64(b"num_inputs", self.num_inputs as u64); + self.comm.append_to_transcript(b"comm", transcript); + } +} + +pub struct R1CSDecommitment { + dense: MultiSparseMatPolynomialAsDense, +} + +impl R1CSCommitment { + pub fn get_num_cons(&self) -> usize { + self.num_cons + } + + pub fn get_num_vars(&self) -> usize { + self.num_vars + } + + pub fn get_num_inputs(&self) -> usize { + self.num_inputs + } +} + +impl R1CSInstance { + pub fn new( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + A: &[(usize, usize, Scalar)], + B: &[(usize, usize, Scalar)], + C: &[(usize, usize, Scalar)], + ) -> R1CSInstance { + Timer::print(&format!("number_of_constraints {num_cons}")); + Timer::print(&format!("number_of_variables {num_vars}")); + Timer::print(&format!("number_of_inputs {num_inputs}")); + Timer::print(&format!("number_non-zero_entries_A {}", A.len())); + Timer::print(&format!("number_non-zero_entries_B {}", B.len())); + Timer::print(&format!("number_non-zero_entries_C {}", C.len())); + + // check that num_cons is a power of 2 + assert_eq!(num_cons.next_power_of_two(), num_cons); + + // check that num_vars is a power of 2 + assert_eq!(num_vars.next_power_of_two(), num_vars); + + // check that number_inputs + 1 <= num_vars + assert!(num_inputs < num_vars); + + // no errors, so create polynomials + let num_poly_vars_x = num_cons.log_2(); + let num_poly_vars_y = (2 * num_vars).log_2(); + + let mat_A = (0..A.len()) + .map(|i| SparseMatEntry::new(A[i].0, A[i].1, A[i].2)) + .collect::>(); + let mat_B = (0..B.len()) + .map(|i| SparseMatEntry::new(B[i].0, B[i].1, B[i].2)) + .collect::>(); + let mat_C = (0..C.len()) + .map(|i| SparseMatEntry::new(C[i].0, C[i].1, C[i].2)) + .collect::>(); + + let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_A); + let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_B); + let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, mat_C); + + R1CSInstance { + num_cons, + num_vars, + num_inputs, + A: poly_A, + B: poly_B, + C: poly_C, + } + } + + pub fn get_num_vars(&self) -> usize { + self.num_vars + } + + pub fn get_num_cons(&self) -> usize { + self.num_cons + } + + pub fn get_num_inputs(&self) -> usize { + self.num_inputs + } + + pub fn get_digest(&self) -> Vec { + // let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); + // bincode::serialize_into(&mut encoder, &self).unwrap(); + // encoder.finish().unwrap() + use digest::Digest; + sha3::Sha3_256::digest(&bincode::serialize(&self).unwrap()).to_vec() + } + + pub fn produce_synthetic_r1cs( + num_cons: usize, + num_vars: usize, + num_inputs: usize, + ) -> (R1CSInstance, Vec, Vec) { + Timer::print(&format!("number_of_constraints {num_cons}")); + Timer::print(&format!("number_of_variables {num_vars}")); + Timer::print(&format!("number_of_inputs {num_inputs}")); + + let mut csprng: OsRng = OsRng; + + // assert num_cons and num_vars are power of 2 + assert_eq!((num_cons.log_2()).pow2(), num_cons); + assert_eq!((num_vars.log_2()).pow2(), num_vars); + + // num_inputs + 1 <= num_vars + assert!(num_inputs < num_vars); + + // z is organized as [vars,1,io] + let size_z = num_vars + num_inputs + 1; + + // produce a random satisfying assignment + let Z = { + let mut Z: Vec = (0..size_z) + .map(|_i| Scalar::random(&mut csprng)) + .collect::>(); + Z[num_vars] = Scalar::one(); // set the constant term to 1 + Z + }; + + // three sparse matrices + let mut A: Vec = Vec::new(); + let mut B: Vec = Vec::new(); + let mut C: Vec = Vec::new(); + let one = Scalar::one(); + for i in 0..num_cons { + let A_idx = i % size_z; + let B_idx = (i + 2) % size_z; + A.push(SparseMatEntry::new(i, A_idx, one)); + B.push(SparseMatEntry::new(i, B_idx, one)); + let AB_val = Z[A_idx] * Z[B_idx]; + + let C_idx = (i + 3) % size_z; + let C_val = Z[C_idx]; + + if C_val == Scalar::zero() { + C.push(SparseMatEntry::new(i, num_vars, AB_val)); + } else { + C.push(SparseMatEntry::new( + i, + C_idx, + AB_val * C_val.invert().unwrap(), + )); + } + } + + Timer::print(&format!("number_non-zero_entries_A {}", A.len())); + Timer::print(&format!("number_non-zero_entries_B {}", B.len())); + Timer::print(&format!("number_non-zero_entries_C {}", C.len())); + + let num_poly_vars_x = num_cons.log_2(); + let num_poly_vars_y = (2 * num_vars).log_2(); + let poly_A = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, A); + let poly_B = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, B); + let poly_C = SparseMatPolynomial::new(num_poly_vars_x, num_poly_vars_y, C); + + let inst = R1CSInstance { + num_cons, + num_vars, + num_inputs, + A: poly_A, + B: poly_B, + C: poly_C, + }; + + assert!(inst.is_sat(&Z[..num_vars], &Z[num_vars + 1..])); + + (inst, Z[..num_vars].to_vec(), Z[num_vars + 1..].to_vec()) + } + + pub fn is_sat(&self, vars: &[Scalar], input: &[Scalar]) -> bool { + assert_eq!(vars.len(), self.num_vars); + assert_eq!(input.len(), self.num_inputs); + + let z = { + let mut z = vars.to_vec(); + z.extend(&vec![Scalar::one()]); + z.extend(input); + z + }; + + // verify if Az * Bz - Cz = [0...] + let Az = self + .A + .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); + let Bz = self + .B + .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); + let Cz = self + .C + .multiply_vec(self.num_cons, self.num_vars + self.num_inputs + 1, &z); + + assert_eq!(Az.len(), self.num_cons); + assert_eq!(Bz.len(), self.num_cons); + assert_eq!(Cz.len(), self.num_cons); + let res: usize = (0..self.num_cons) + .map(|i| usize::from(Az[i] * Bz[i] != Cz[i])) + .sum(); + + res == 0 + } + + #[cfg(not(feature = "multicore"))] + pub fn multiply_vec( + &self, + num_rows: usize, + num_cols: usize, + z: &[Scalar], + ) -> (DensePolynomial, DensePolynomial, DensePolynomial) { + assert_eq!(num_rows, self.num_cons); + assert_eq!(z.len(), num_cols); + assert!(num_cols > self.num_vars); + ( + DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)), + DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)), + DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)), + ) + } + + #[cfg(feature = "multicore")] + pub fn multiply_vec( + &self, + num_rows: usize, + num_cols: usize, + z: &[Scalar], + ) -> (DensePolynomial, DensePolynomial, DensePolynomial) { + assert_eq!(num_rows, self.num_cons); + assert_eq!(z.len(), num_cols); + assert!(num_cols > self.num_vars); + + // Use rayon's parallel iterator to compute A*z, B*z, and C*z in parallel + let (Az, (Bz, Cz)) = rayon::join( + || DensePolynomial::new(self.A.multiply_vec(num_rows, num_cols, z)), + || rayon::join( + || DensePolynomial::new(self.B.multiply_vec(num_rows, num_cols, z)), + || DensePolynomial::new(self.C.multiply_vec(num_rows, num_cols, z)), + ), + ); + + (Az, Bz, Cz) + } + + #[cfg(not(feature = "multicore"))] + pub fn compute_eval_table_sparse( + &self, + num_rows: usize, + num_cols: usize, + evals: &[Scalar], + ) -> (Vec, Vec, Vec) { + assert_eq!(num_rows, self.num_cons); + assert!(num_cols > self.num_vars); + + let evals_A = self.A.compute_eval_table_sparse(evals, num_rows, num_cols); + let evals_B = self.B.compute_eval_table_sparse(evals, num_rows, num_cols); + let evals_C = self.C.compute_eval_table_sparse(evals, num_rows, num_cols); + + (evals_A, evals_B, evals_C) + } + + #[cfg(feature = "multicore")] + pub fn compute_eval_table_sparse( + &self, + num_rows: usize, + num_cols: usize, + evals: &[Scalar], + ) -> (Vec, Vec, Vec) { + assert_eq!(num_rows, self.num_cons); + assert!(num_cols > self.num_vars); + + // Use rayon's parallel iterator to compute evals_A, evals_B, and evals_C in parallel + let (evals_A, (evals_B, evals_C)) = rayon::join( + || self.A.compute_eval_table_sparse(evals, num_rows, num_cols), + || rayon::join( + || self.B.compute_eval_table_sparse(evals, num_rows, num_cols), + || self.C.compute_eval_table_sparse(evals, num_rows, num_cols), + ), + ); + + (evals_A, evals_B, evals_C) + } + + pub fn evaluate(&self, rx: &[Scalar], ry: &[Scalar]) -> (Scalar, Scalar, Scalar) { + let evals = SparseMatPolynomial::multi_evaluate(&[&self.A, &self.B, &self.C], rx, ry); + (evals[0], evals[1], evals[2]) + } + + pub fn commit(&self, gens: &R1CSCommitmentGens) -> (R1CSCommitment, R1CSDecommitment) { + let (comm, dense) = SparseMatPolynomial::multi_commit(&[&self.A, &self.B, &self.C], &gens.gens); + let r1cs_comm = R1CSCommitment { + num_cons: self.num_cons, + num_vars: self.num_vars, + num_inputs: self.num_inputs, + comm, + }; + + let r1cs_decomm = R1CSDecommitment { dense }; + + (r1cs_comm, r1cs_decomm) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct R1CSEvalProof { + proof: SparseMatPolyEvalProof, +} + +impl R1CSEvalProof { + pub fn prove( + decomm: &R1CSDecommitment, + rx: &[Scalar], // point at which the polynomial is evaluated + ry: &[Scalar], + evals: &(Scalar, Scalar, Scalar), + gens: &R1CSCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> R1CSEvalProof { + let timer = Timer::new("R1CSEvalProof::prove"); + let proof = SparseMatPolyEvalProof::prove( + &decomm.dense, + rx, + ry, + &[evals.0, evals.1, evals.2], + &gens.gens, + transcript, + random_tape, + ); + timer.stop(); + + R1CSEvalProof { proof } + } + + pub fn verify( + &self, + comm: &R1CSCommitment, + rx: &[Scalar], // point at which the R1CS matrix polynomials are evaluated + ry: &[Scalar], + evals: &(Scalar, Scalar, Scalar), + gens: &R1CSCommitmentGens, + transcript: &mut Transcript, + ) -> Result<(), ProofVerifyError> { + self.proof.verify( + &comm.comm, + rx, + ry, + &[evals.0, evals.1, evals.2], + &gens.gens, + transcript, + ) + } +} diff --git a/third_party/Dorian/src/r1csproof.rs b/third_party/Dorian/src/r1csproof.rs new file mode 100644 index 000000000..6ce62bc97 --- /dev/null +++ b/third_party/Dorian/src/r1csproof.rs @@ -0,0 +1,614 @@ +#![allow(clippy::too_many_arguments)] +use super::commitments::{Commitments, MultiCommitGens}; +use super::dense_mlpoly::{ + DensePolynomial, EqPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof, +}; +use super::errors::ProofVerifyError; +use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; +use super::math::Math; +use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; +use super::r1csinstance::R1CSInstance; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::sparse_mlpoly::{SparsePolyEntry, SparsePolynomial}; +use super::sumcheck::ZKSumcheckInstanceProof; +use super::timer::Timer; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use core::iter; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct R1CSProof { + comm_vars: PolyCommitment, + sc_proof_phase1: ZKSumcheckInstanceProof, + claims_phase2: ( + CompressedGroup, + CompressedGroup, + CompressedGroup, + CompressedGroup, + ), + pok_claims_phase2: (KnowledgeProof, ProductProof), + proof_eq_sc_phase1: EqualityProof, + sc_proof_phase2: ZKSumcheckInstanceProof, + comm_vars_at_ry: CompressedGroup, + proof_eval_vars_at_ry: PolyEvalProof, + proof_eq_sc_phase2: EqualityProof, +} + +#[derive(Serialize, Deserialize)] // to test +pub struct R1CSSumcheckGens { + pub gens_1: MultiCommitGens, + pub gens_3: MultiCommitGens, + pub gens_4: MultiCommitGens, +} + +// TODO: fix passing gens_1_ref +impl R1CSSumcheckGens { + pub fn new(label: &'static [u8], gens_1_ref: &MultiCommitGens) -> Self { + let gens_1 = gens_1_ref.clone(); + let gens_3 = MultiCommitGens::new(3, label); + let gens_4 = MultiCommitGens::new(4, label); + + R1CSSumcheckGens { + gens_1, + gens_3, + gens_4, + } + } +} + +#[derive(Serialize, Deserialize)] // to test +pub struct R1CSGens { + pub gens_sc: R1CSSumcheckGens, + pub gens_pc: PolyCommitmentGens, +} + +impl R1CSGens { + pub fn new(label: &'static [u8], _num_cons: usize, num_vars: usize) -> Self { + let num_poly_vars = num_vars.log_2(); + let gens_pc = PolyCommitmentGens::new(num_poly_vars, label); + let gens_sc = R1CSSumcheckGens::new(label, &gens_pc.gens.gens_1); + R1CSGens { gens_sc, gens_pc } + } +} + +impl R1CSProof { + #[inline] + fn comb_func_sc_one( + poly_A_comp: &Scalar, + poly_B_comp: &Scalar, + poly_C_comp: &Scalar, + poly_D_comp: &Scalar + ) -> Scalar { + poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) + } + + pub fn prove_phase_one( + num_rounds: usize, + evals_tau: &mut DensePolynomial, + evals_Az: &mut DensePolynomial, + evals_Bz: &mut DensePolynomial, + evals_Cz: &mut DensePolynomial, + gens: &R1CSSumcheckGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (ZKSumcheckInstanceProof, Vec, Vec, Scalar) { + // let comb_func = |poly_A_comp: &Scalar, + // poly_B_comp: &Scalar, + // poly_C_comp: &Scalar, + // poly_D_comp: &Scalar| + // -> Scalar { poly_A_comp * (poly_B_comp * poly_C_comp - poly_D_comp) }; + + let (sc_proof_phase_one, r, claims, blind_claim_postsc) = + ZKSumcheckInstanceProof::prove_cubic_with_additive_term( + &Scalar::zero(), // claim is zero + &Scalar::zero(), // blind for claim is also zero + num_rounds, + evals_tau, + evals_Az, + evals_Bz, + evals_Cz, + // comb_func, + R1CSProof::comb_func_sc_one, + &gens.gens_1, + &gens.gens_4, + transcript, + random_tape, + ); + + (sc_proof_phase_one, r, claims, blind_claim_postsc) + } + + fn prove_phase_two( + num_rounds: usize, + claim: &Scalar, + blind_claim: &Scalar, + evals_z: &mut DensePolynomial, + evals_ABC: &mut DensePolynomial, + gens: &R1CSSumcheckGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (ZKSumcheckInstanceProof, Vec, Vec, Scalar) { + let comb_func = + |poly_A_comp: &Scalar, poly_B_comp: &Scalar| -> Scalar { poly_A_comp * poly_B_comp }; + let (sc_proof_phase_two, r, claims, blind_claim_postsc) = ZKSumcheckInstanceProof::prove_quad( + claim, + blind_claim, + num_rounds, + evals_z, + evals_ABC, + comb_func, + &gens.gens_1, + &gens.gens_3, + transcript, + random_tape, + ); + + (sc_proof_phase_two, r, claims, blind_claim_postsc) + } + + fn protocol_name() -> &'static [u8] { + b"R1CS proof" + } + + pub fn prove( + inst: &R1CSInstance, + vars: Vec, + input: &[Scalar], + gens: &R1CSGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (R1CSProof, Vec, Vec) { + let timer_prove = Timer::new("R1CSProof::prove"); + transcript.append_protocol_name(R1CSProof::protocol_name()); + + // we currently require the number of |inputs| + 1 to be at most number of vars + assert!(input.len() < vars.len()); + + input.append_to_transcript(b"input", transcript); + + let timer_commit = Timer::new("polycommit"); + let (poly_vars, comm_vars, blinds_vars) = { + // create a multilinear polynomial using the supplied assignment for variables + let poly_vars = DensePolynomial::new(vars.clone()); + + // produce a commitment to the satisfying assignment + let (comm_vars, blinds_vars) = poly_vars.commit(&gens.gens_pc, Some(random_tape)); + + // add the commitment to the prover's transcript + comm_vars.append_to_transcript(b"poly_commitment", transcript); + (poly_vars, comm_vars, blinds_vars) + }; + timer_commit.stop(); + + let timer_sc_proof_phase1 = Timer::new("prove_sc_phase_one"); + + // append input to variables to create a single vector z + let z = { + let num_inputs = input.len(); + let num_vars = vars.len(); + let mut z = vars; + z.extend(&vec![Scalar::one()]); // add constant term in z + z.extend(input); + z.extend(&vec![Scalar::zero(); num_vars - num_inputs - 1]); // we will pad with zeros + z + }; + + // derive the verifier's challenge tau + let (num_rounds_x, num_rounds_y) = (inst.get_num_cons().log_2(), z.len().log_2()); + let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); + // compute the initial evaluation table for R(\tau, x) + let mut poly_tau = DensePolynomial::new(EqPolynomial::new(tau).evals()); + let (mut poly_Az, mut poly_Bz, mut poly_Cz) = + inst.multiply_vec(inst.get_num_cons(), z.len(), &z); + + let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one( + num_rounds_x, + &mut poly_tau, + &mut poly_Az, + &mut poly_Bz, + &mut poly_Cz, + &gens.gens_sc, + transcript, + random_tape, + ); + assert_eq!(poly_tau.len(), 1); + assert_eq!(poly_Az.len(), 1); + assert_eq!(poly_Bz.len(), 1); + assert_eq!(poly_Cz.len(), 1); + timer_sc_proof_phase1.stop(); + + let (tau_claim, Az_claim, Bz_claim, Cz_claim) = + (&poly_tau[0], &poly_Az[0], &poly_Bz[0], &poly_Cz[0]); + let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = ( + random_tape.random_scalar(b"Az_blind"), + random_tape.random_scalar(b"Bz_blind"), + random_tape.random_scalar(b"Cz_blind"), + random_tape.random_scalar(b"prod_Az_Bz_blind"), + ); + + let (pok_Cz_claim, comm_Cz_claim) = { + KnowledgeProof::prove( + &gens.gens_sc.gens_1, + transcript, + random_tape, + Cz_claim, + &Cz_blind, + ) + }; + + let (proof_prod, comm_Az_claim, comm_Bz_claim, comm_prod_Az_Bz_claims) = { + let prod = Az_claim * Bz_claim; + ProductProof::prove( + &gens.gens_sc.gens_1, + transcript, + random_tape, + Az_claim, + &Az_blind, + Bz_claim, + &Bz_blind, + &prod, + &prod_Az_Bz_blind, + ) + }; + + comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript); + comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript); + comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript); + comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript); + + // prove the final step of sum-check #1 + let taus_bound_rx = tau_claim; + let blind_expected_claim_postsc1 = taus_bound_rx * (prod_Az_Bz_blind - Cz_blind); + let claim_post_phase1 = (Az_claim * Bz_claim - Cz_claim) * taus_bound_rx; + let (proof_eq_sc_phase1, _C1, _C2) = EqualityProof::prove( + &gens.gens_sc.gens_1, + transcript, + random_tape, + &claim_post_phase1, + &blind_expected_claim_postsc1, + &claim_post_phase1, + &blind_claim_postsc1, + ); + + let timer_sc_proof_phase2 = Timer::new("prove_sc_phase_two"); + // combine the three claims into a single claim + let r_A = transcript.challenge_scalar(b"challenege_Az"); + let r_B = transcript.challenge_scalar(b"challenege_Bz"); + let r_C = transcript.challenge_scalar(b"challenege_Cz"); + let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; + let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind; + + let evals_ABC = { + // compute the initial evaluation table for R(\tau, x) + let evals_rx = EqPolynomial::new(rx.clone()).evals(); + let (evals_A, evals_B, evals_C) = + inst.compute_eval_table_sparse(inst.get_num_cons(), z.len(), &evals_rx); + + assert_eq!(evals_A.len(), evals_B.len()); + assert_eq!(evals_A.len(), evals_C.len()); + (0..evals_A.len()) + .map(|i| r_A * evals_A[i] + r_B * evals_B[i] + r_C * evals_C[i]) + .collect::>() + }; + + // another instance of the sum-check protocol + let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two( + num_rounds_y, + &claim_phase2, + &blind_claim_phase2, + &mut DensePolynomial::new(z), + &mut DensePolynomial::new(evals_ABC), + &gens.gens_sc, + transcript, + random_tape, + ); + timer_sc_proof_phase2.stop(); + + let timer_polyeval = Timer::new("polyeval"); + let eval_vars_at_ry = poly_vars.evaluate(&ry[1..]); + let blind_eval = random_tape.random_scalar(b"blind_eval"); + let (proof_eval_vars_at_ry, comm_vars_at_ry) = PolyEvalProof::prove( + &poly_vars, + Some(&blinds_vars), + &ry[1..], + &eval_vars_at_ry, + Some(&blind_eval), + &gens.gens_pc, + transcript, + random_tape, + ); + timer_polyeval.stop(); + + // prove the final step of sum-check #2 + let blind_eval_Z_at_ry = (Scalar::one() - ry[0]) * blind_eval; + let blind_expected_claim_postsc2 = claims_phase2[1] * blind_eval_Z_at_ry; + let claim_post_phase2 = claims_phase2[0] * claims_phase2[1]; + let (proof_eq_sc_phase2, _C1, _C2) = EqualityProof::prove( + &gens.gens_pc.gens.gens_1, + transcript, + random_tape, + &claim_post_phase2, + &blind_expected_claim_postsc2, + &claim_post_phase2, + &blind_claim_postsc2, + ); + + timer_prove.stop(); + + ( + R1CSProof { + comm_vars, + sc_proof_phase1, + claims_phase2: ( + comm_Az_claim, + comm_Bz_claim, + comm_Cz_claim, + comm_prod_Az_Bz_claims, + ), + pok_claims_phase2: (pok_Cz_claim, proof_prod), + proof_eq_sc_phase1, + sc_proof_phase2, + comm_vars_at_ry, + proof_eval_vars_at_ry, + proof_eq_sc_phase2, + }, + rx, + ry, + ) + } + + pub fn verify( + &self, + num_vars: usize, + num_cons: usize, + input: &[Scalar], + evals: &(Scalar, Scalar, Scalar), + transcript: &mut Transcript, + gens: &R1CSGens, + ) -> Result<(Vec, Vec), ProofVerifyError> { + transcript.append_protocol_name(R1CSProof::protocol_name()); + + input.append_to_transcript(b"input", transcript); + + let n = num_vars; + // add the commitment to the verifier's transcript + self + .comm_vars + .append_to_transcript(b"poly_commitment", transcript); + + let (num_rounds_x, num_rounds_y) = (num_cons.log_2(), (2 * num_vars).log_2()); + + // derive the verifier's challenge tau + let tau = transcript.challenge_vector(b"challenge_tau", num_rounds_x); + + // verify the first sum-check instance + let claim_phase1 = Scalar::zero() + .commit(&Scalar::zero(), &gens.gens_sc.gens_1) + .compress(); + let (comm_claim_post_phase1, rx) = self.sc_proof_phase1.verify( + &claim_phase1, + num_rounds_x, + 3, + &gens.gens_sc.gens_1, + &gens.gens_sc.gens_4, + transcript, + )?; + // perform the intermediate sum-check test with claimed Az, Bz, and Cz + let (comm_Az_claim, comm_Bz_claim, comm_Cz_claim, comm_prod_Az_Bz_claims) = &self.claims_phase2; + let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2; + + pok_Cz_claim.verify(&gens.gens_sc.gens_1, transcript, comm_Cz_claim)?; + proof_prod.verify( + &gens.gens_sc.gens_1, + transcript, + comm_Az_claim, + comm_Bz_claim, + comm_prod_Az_Bz_claims, + )?; + + comm_Az_claim.append_to_transcript(b"comm_Az_claim", transcript); + comm_Bz_claim.append_to_transcript(b"comm_Bz_claim", transcript); + comm_Cz_claim.append_to_transcript(b"comm_Cz_claim", transcript); + comm_prod_Az_Bz_claims.append_to_transcript(b"comm_prod_Az_Bz_claims", transcript); + + let taus_bound_rx: Scalar = (0..rx.len()) + .map(|i| rx[i] * tau[i] + (Scalar::one() - rx[i]) * (Scalar::one() - tau[i])) + .product(); + let expected_claim_post_phase1 = (taus_bound_rx + * (comm_prod_Az_Bz_claims.decompress().unwrap() - comm_Cz_claim.decompress().unwrap())) + .compress(); + + // verify proof that expected_claim_post_phase1 == claim_post_phase1 + self.proof_eq_sc_phase1.verify( + &gens.gens_sc.gens_1, + transcript, + &expected_claim_post_phase1, + &comm_claim_post_phase1, + )?; + + // derive three public challenges and then derive a joint claim + let r_A = transcript.challenge_scalar(b"challenege_Az"); + let r_B = transcript.challenge_scalar(b"challenege_Bz"); + let r_C = transcript.challenge_scalar(b"challenege_Cz"); + + // r_A * comm_Az_claim + r_B * comm_Bz_claim + r_C * comm_Cz_claim; + let comm_claim_phase2 = GroupElement::vartime_multiscalar_mul( + iter::once(&r_A) + .chain(iter::once(&r_B)) + .chain(iter::once(&r_C)), + iter::once(&comm_Az_claim) + .chain(iter::once(&comm_Bz_claim)) + .chain(iter::once(&comm_Cz_claim)) + .map(|pt| pt.decompress().unwrap()) + .collect::>(), + ) + .compress(); + + // verify the joint claim with a sum-check protocol + let (comm_claim_post_phase2, ry) = self.sc_proof_phase2.verify( + &comm_claim_phase2, + num_rounds_y, + 2, + &gens.gens_sc.gens_1, + &gens.gens_sc.gens_3, + transcript, + )?; + + // verify Z(ry) proof against the initial commitment + self.proof_eval_vars_at_ry.verify( + &gens.gens_pc, + transcript, + &ry[1..], + &self.comm_vars_at_ry, + &self.comm_vars, + )?; + + let poly_input_eval = { + // constant term + let mut input_as_sparse_poly_entries = vec![SparsePolyEntry::new(0, Scalar::one())]; + //remaining inputs + input_as_sparse_poly_entries.extend( + (0..input.len()) + .map(|i| SparsePolyEntry::new(i + 1, input[i])) + .collect::>(), + ); + SparsePolynomial::new(n.log_2(), input_as_sparse_poly_entries).evaluate(&ry[1..]) + }; + + // compute commitment to eval_Z_at_ry = (Scalar::one() - ry[0]) * self.eval_vars_at_ry + ry[0] * poly_input_eval + let comm_eval_Z_at_ry = GroupElement::vartime_multiscalar_mul( + iter::once(Scalar::one() - ry[0]).chain(iter::once(ry[0])), + iter::once(&self.comm_vars_at_ry.decompress().unwrap()).chain(iter::once( + &poly_input_eval.commit(&Scalar::zero(), &gens.gens_pc.gens.gens_1), + )), + ); + + // perform the final check in the second sum-check protocol + let (eval_A_r, eval_B_r, eval_C_r) = evals; + let expected_claim_post_phase2 = + ((r_A * eval_A_r + r_B * eval_B_r + r_C * eval_C_r) * comm_eval_Z_at_ry).compress(); + // verify proof that expected_claim_post_phase1 == claim_post_phase1 + self.proof_eq_sc_phase2.verify( + &gens.gens_sc.gens_1, + transcript, + &expected_claim_post_phase2, + &comm_claim_post_phase2, + )?; + + Ok((rx, ry)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::rngs::OsRng; + + fn produce_tiny_r1cs() -> (R1CSInstance, Vec, Vec) { + // three constraints over five variables Z1, Z2, Z3, Z4, and Z5 + // rounded to the nearest power of two + let num_cons = 128; + let num_vars = 256; + let num_inputs = 2; + + // encode the above constraints into three matrices + let mut A: Vec<(usize, usize, Scalar)> = Vec::new(); + let mut B: Vec<(usize, usize, Scalar)> = Vec::new(); + let mut C: Vec<(usize, usize, Scalar)> = Vec::new(); + + let one = Scalar::one(); + // constraint 0 entries + // (Z1 + Z2) * I0 - Z3 = 0; + A.push((0, 0, one)); + A.push((0, 1, one)); + B.push((0, num_vars + 1, one)); + C.push((0, 2, one)); + + // constraint 1 entries + // (Z1 + I1) * (Z3) - Z4 = 0 + A.push((1, 0, one)); + A.push((1, num_vars + 2, one)); + B.push((1, 2, one)); + C.push((1, 3, one)); + // constraint 3 entries + // Z5 * 1 - 0 = 0 + A.push((2, 4, one)); + B.push((2, num_vars, one)); + + let inst = R1CSInstance::new(num_cons, num_vars, num_inputs, &A, &B, &C); + + // compute a satisfying assignment + let mut csprng: OsRng = OsRng; + let i0 = Scalar::random(&mut csprng); + let i1 = Scalar::random(&mut csprng); + let z1 = Scalar::random(&mut csprng); + let z2 = Scalar::random(&mut csprng); + let z3 = (z1 + z2) * i0; // constraint 1: (Z1 + Z2) * I0 - Z3 = 0; + let z4 = (z1 + i1) * z3; // constraint 2: (Z1 + I1) * (Z3) - Z4 = 0 + let z5 = Scalar::zero(); //constraint 3 + + let mut vars = vec![Scalar::zero(); num_vars]; + vars[0] = z1; + vars[1] = z2; + vars[2] = z3; + vars[3] = z4; + vars[4] = z5; + + let mut input = vec![Scalar::zero(); num_inputs]; + input[0] = i0; + input[1] = i1; + + (inst, vars, input) + } + + #[test] + fn test_tiny_r1cs() { + let (inst, vars, input) = tests::produce_tiny_r1cs(); + let is_sat = inst.is_sat(&vars, &input); + assert!(is_sat); + } + + #[test] + fn test_synthetic_r1cs() { + let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(1024, 1024, 10); + let is_sat = inst.is_sat(&vars, &input); + assert!(is_sat); + } + + #[test] + pub fn check_r1cs_proof() { + let num_vars = 1024; + let num_cons = num_vars; + let num_inputs = 10; + let (inst, vars, input) = R1CSInstance::produce_synthetic_r1cs(num_cons, num_vars, num_inputs); + + let gens = R1CSGens::new(b"test-m", num_cons, num_vars); + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let (proof, rx, ry) = R1CSProof::prove( + &inst, + vars, + &input, + &gens, + &mut prover_transcript, + &mut random_tape, + ); + + let inst_evals = inst.evaluate(&rx, &ry); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify( + inst.get_num_vars(), + inst.get_num_cons(), + &input, + &inst_evals, + &mut verifier_transcript, + &gens, + ) + .is_ok()); + } +} diff --git a/third_party/Dorian/src/random.rs b/third_party/Dorian/src/random.rs new file mode 100644 index 000000000..2a1a3a2eb --- /dev/null +++ b/third_party/Dorian/src/random.rs @@ -0,0 +1,28 @@ +use super::scalar::Scalar; +use super::transcript::ProofTranscript; +use merlin::Transcript; +use rand::rngs::OsRng; + +pub struct RandomTape { + tape: Transcript, +} + +impl RandomTape { + pub fn new(name: &'static [u8]) -> Self { + let tape = { + let mut csprng: OsRng = OsRng; + let mut tape = Transcript::new(name); + tape.append_scalar(b"init_randomness", &Scalar::random(&mut csprng)); + tape + }; + Self { tape } + } + + pub fn random_scalar(&mut self, label: &'static [u8]) -> Scalar { + self.tape.challenge_scalar(label) + } + + pub fn random_vector(&mut self, label: &'static [u8], len: usize) -> Vec { + self.tape.challenge_vector(label, len) + } +} diff --git a/third_party/Dorian/src/scalar/mod.rs b/third_party/Dorian/src/scalar/mod.rs new file mode 100644 index 000000000..f2cfd7a8f --- /dev/null +++ b/third_party/Dorian/src/scalar/mod.rs @@ -0,0 +1,43 @@ +mod ristretto255; + +pub type Scalar = ristretto255::Scalar; +pub type ScalarBytes = curve25519_dalek::scalar::Scalar; + +pub trait ScalarFromPrimitives { + fn to_scalar(self) -> Scalar; +} + +impl ScalarFromPrimitives for usize { + #[inline] + fn to_scalar(self) -> Scalar { + (0..self).map(|_i| Scalar::one()).sum() + } +} + +impl ScalarFromPrimitives for bool { + #[inline] + fn to_scalar(self) -> Scalar { + if self { + Scalar::one() + } else { + Scalar::zero() + } + } +} + +pub trait ScalarBytesFromScalar { + fn decompress_scalar(s: &Scalar) -> ScalarBytes; + fn decompress_vector(s: &[Scalar]) -> Vec; +} + +impl ScalarBytesFromScalar for Scalar { + fn decompress_scalar(s: &Scalar) -> ScalarBytes { + ScalarBytes::from_bytes_mod_order(s.to_bytes()) + } + + fn decompress_vector(s: &[Scalar]) -> Vec { + (0..s.len()) + .map(|i| Scalar::decompress_scalar(&s[i])) + .collect::>() + } +} diff --git a/third_party/Dorian/src/scalar/ristretto255.rs b/third_party/Dorian/src/scalar/ristretto255.rs new file mode 100755 index 000000000..94ce945d2 --- /dev/null +++ b/third_party/Dorian/src/scalar/ristretto255.rs @@ -0,0 +1,1214 @@ +//! This module provides an implementation of the Curve25519's scalar field $\mathbb{F}_q$ +//! where `q = 2^252 + 27742317777372353535851937790883648493 = 0x1000000000000000 0000000000000000 14def9dea2f79cd6 5812631a5cf5d3ed` +//! This module is an adaptation of code from the bls12-381 crate. +//! We modify various constants (MODULUS, R, R2, etc.) to appropriate values for Curve25519 and update tests +//! We borrow the `invert` method from the curve25519-dalek crate. +//! See NOTICE.md for more details +#![allow(clippy::all)] +use core::borrow::Borrow; +use core::convert::TryFrom; +use core::fmt; +use core::iter::{Product, Sum}; +use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; +use rand::{CryptoRng, RngCore}; +use serde::{Deserialize, Serialize}; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; +use zeroize::Zeroize; + +// use crate::util::{adc, mac, sbb}; +/// Compute a + b + carry, returning the result and the new carry over. +#[inline(always)] +pub const fn adc(a: u64, b: u64, carry: u64) -> (u64, u64) { + let ret = (a as u128) + (b as u128) + (carry as u128); + (ret as u64, (ret >> 64) as u64) +} + +/// Compute a - (b + borrow), returning the result and the new borrow. +#[inline(always)] +pub const fn sbb(a: u64, b: u64, borrow: u64) -> (u64, u64) { + let ret = (a as u128).wrapping_sub((b as u128) + ((borrow >> 63) as u128)); + (ret as u64, (ret >> 64) as u64) +} + +/// Compute a + (b * c) + carry, returning the result and the new carry over. +#[inline(always)] +pub const fn mac(a: u64, b: u64, c: u64, carry: u64) -> (u64, u64) { + let ret = (a as u128) + ((b as u128) * (c as u128)) + (carry as u128); + (ret as u64, (ret >> 64) as u64) +} + +macro_rules! impl_add_binop_specify_output { + ($lhs:ident, $rhs:ident, $output:ident) => { + impl<'b> Add<&'b $rhs> for $lhs { + type Output = $output; + + #[inline] + fn add(self, rhs: &'b $rhs) -> $output { + &self + rhs + } + } + + impl<'a> Add<$rhs> for &'a $lhs { + type Output = $output; + + #[inline] + fn add(self, rhs: $rhs) -> $output { + self + &rhs + } + } + + impl Add<$rhs> for $lhs { + type Output = $output; + + #[inline] + fn add(self, rhs: $rhs) -> $output { + &self + &rhs + } + } + }; +} + +macro_rules! impl_sub_binop_specify_output { + ($lhs:ident, $rhs:ident, $output:ident) => { + impl<'b> Sub<&'b $rhs> for $lhs { + type Output = $output; + + #[inline] + fn sub(self, rhs: &'b $rhs) -> $output { + &self - rhs + } + } + + impl<'a> Sub<$rhs> for &'a $lhs { + type Output = $output; + + #[inline] + fn sub(self, rhs: $rhs) -> $output { + self - &rhs + } + } + + impl Sub<$rhs> for $lhs { + type Output = $output; + + #[inline] + fn sub(self, rhs: $rhs) -> $output { + &self - &rhs + } + } + }; +} + +macro_rules! impl_binops_additive_specify_output { + ($lhs:ident, $rhs:ident, $output:ident) => { + impl_add_binop_specify_output!($lhs, $rhs, $output); + impl_sub_binop_specify_output!($lhs, $rhs, $output); + }; +} + +macro_rules! impl_binops_multiplicative_mixed { + ($lhs:ident, $rhs:ident, $output:ident) => { + impl<'b> Mul<&'b $rhs> for $lhs { + type Output = $output; + + #[inline] + fn mul(self, rhs: &'b $rhs) -> $output { + &self * rhs + } + } + + impl<'a> Mul<$rhs> for &'a $lhs { + type Output = $output; + + #[inline] + fn mul(self, rhs: $rhs) -> $output { + self * &rhs + } + } + + impl Mul<$rhs> for $lhs { + type Output = $output; + + #[inline] + fn mul(self, rhs: $rhs) -> $output { + &self * &rhs + } + } + }; +} + +macro_rules! impl_binops_additive { + ($lhs:ident, $rhs:ident) => { + impl_binops_additive_specify_output!($lhs, $rhs, $lhs); + + impl SubAssign<$rhs> for $lhs { + #[inline] + fn sub_assign(&mut self, rhs: $rhs) { + *self = &*self - &rhs; + } + } + + impl AddAssign<$rhs> for $lhs { + #[inline] + fn add_assign(&mut self, rhs: $rhs) { + *self = &*self + &rhs; + } + } + + impl<'b> SubAssign<&'b $rhs> for $lhs { + #[inline] + fn sub_assign(&mut self, rhs: &'b $rhs) { + *self = &*self - rhs; + } + } + + impl<'b> AddAssign<&'b $rhs> for $lhs { + #[inline] + fn add_assign(&mut self, rhs: &'b $rhs) { + *self = &*self + rhs; + } + } + }; +} + +macro_rules! impl_binops_multiplicative { + ($lhs:ident, $rhs:ident) => { + impl_binops_multiplicative_mixed!($lhs, $rhs, $lhs); + + impl MulAssign<$rhs> for $lhs { + #[inline] + fn mul_assign(&mut self, rhs: $rhs) { + *self = &*self * &rhs; + } + } + + impl<'b> MulAssign<&'b $rhs> for $lhs { + #[inline] + fn mul_assign(&mut self, rhs: &'b $rhs) { + *self = &*self * rhs; + } + } + }; +} + +/// Represents an element of the scalar field $\mathbb{F}_q$ of the Curve25519 elliptic +/// curve construction. +// The internal representation of this type is four 64-bit unsigned +// integers in little-endian order. `Scalar` values are always in +// Montgomery form; i.e., Scalar(a) = aR mod q, with R = 2^256. +#[derive(Clone, Copy, Eq, Serialize, Deserialize)] +pub struct Scalar(pub(crate) [u64; 4]); + +impl fmt::Debug for Scalar { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let tmp = self.to_bytes(); + write!(f, "0x")?; + for &b in tmp.iter().rev() { + write!(f, "{:02x}", b)?; + } + Ok(()) + } +} + +impl From for Scalar { + fn from(val: u64) -> Scalar { + Scalar([val, 0, 0, 0]) * R2 + } +} + +impl ConstantTimeEq for Scalar { + fn ct_eq(&self, other: &Self) -> Choice { + self.0[0].ct_eq(&other.0[0]) + & self.0[1].ct_eq(&other.0[1]) + & self.0[2].ct_eq(&other.0[2]) + & self.0[3].ct_eq(&other.0[3]) + } +} + +impl PartialEq for Scalar { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.ct_eq(other).unwrap_u8() == 1 + } +} + +impl ConditionallySelectable for Scalar { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Scalar([ + u64::conditional_select(&a.0[0], &b.0[0], choice), + u64::conditional_select(&a.0[1], &b.0[1], choice), + u64::conditional_select(&a.0[2], &b.0[2], choice), + u64::conditional_select(&a.0[3], &b.0[3], choice), + ]) + } +} + +/// Constant representing the modulus +/// q = 2^252 + 27742317777372353535851937790883648493 +/// 0x1000000000000000 0000000000000000 14def9dea2f79cd6 5812631a5cf5d3ed +const MODULUS: Scalar = Scalar([ + 0x5812_631a_5cf5_d3ed, + 0x14de_f9de_a2f7_9cd6, + 0x0000_0000_0000_0000, + 0x1000_0000_0000_0000, +]); + +impl<'a> Neg for &'a Scalar { + type Output = Scalar; + + #[inline] + fn neg(self) -> Scalar { + self.neg() + } +} + +impl Neg for Scalar { + type Output = Scalar; + + #[inline] + fn neg(self) -> Scalar { + -&self + } +} + +impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar { + type Output = Scalar; + + #[inline] + fn sub(self, rhs: &'b Scalar) -> Scalar { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Scalar> for &'a Scalar { + type Output = Scalar; + + #[inline] + fn add(self, rhs: &'b Scalar) -> Scalar { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar { + type Output = Scalar; + + #[inline] + fn mul(self, rhs: &'b Scalar) -> Scalar { + self.mul(rhs) + } +} + +impl_binops_additive!(Scalar, Scalar); +impl_binops_multiplicative!(Scalar, Scalar); + +/// INV = -(q^{-1} mod 2^64) mod 2^64 +const INV: u64 = 0xd2b5_1da3_1254_7e1b; + +/// R = 2^256 mod q +const R: Scalar = Scalar([ + 0xd6ec_3174_8d98_951d, + 0xc6ef_5bf4_737d_cf70, + 0xffff_ffff_ffff_fffe, + 0x0fff_ffff_ffff_ffff, +]); + +/// R^2 = 2^512 mod q +const R2: Scalar = Scalar([ + 0xa406_11e3_449c_0f01, + 0xd00e_1ba7_6885_9347, + 0xceec_73d2_17f5_be65, + 0x0399_411b_7c30_9a3d, +]); + +/// R^3 = 2^768 mod q +const R3: Scalar = Scalar([ + 0x2a9e_4968_7b83_a2db, + 0x2783_24e6_aef7_f3ec, + 0x8065_dc6c_04ec_5b65, + 0x0e53_0b77_3599_cec7, +]); + +impl Default for Scalar { + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl Product for Scalar +where + T: Borrow, +{ + fn product(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Scalar::one(), |acc, item| acc * item.borrow()) + } +} + +impl Sum for Scalar +where + T: Borrow, +{ + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Scalar::zero(), |acc, item| acc + item.borrow()) + } +} + +impl Zeroize for Scalar { + fn zeroize(&mut self) { + self.0 = [0u64; 4]; + } +} + +impl Scalar { + /// Returns zero, the additive identity. + #[inline] + pub const fn zero() -> Scalar { + Scalar([0, 0, 0, 0]) + } + + /// Returns one, the multiplicative identity. + #[inline] + pub const fn one() -> Scalar { + R + } + + pub fn random(rng: &mut Rng) -> Self { + let mut limbs = [0u64; 8]; + for i in 0..8 { + limbs[i] = rng.next_u64(); + } + Scalar::from_u512(limbs) + } + + /// Doubles this field element. + #[inline] + pub const fn double(&self) -> Scalar { + // TODO: This can be achieved more efficiently with a bitshift. + self.add(self) + } + + /// Attempts to convert a little-endian byte representation of + /// a scalar into a `Scalar`, failing if the input is not canonical. + pub fn from_bytes(bytes: &[u8; 32]) -> CtOption { + let mut tmp = Scalar([0, 0, 0, 0]); + + tmp.0[0] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[..8]).unwrap()); + tmp.0[1] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap()); + tmp.0[2] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap()); + tmp.0[3] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap()); + + // Try to subtract the modulus + let (_, borrow) = sbb(tmp.0[0], MODULUS.0[0], 0); + let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow); + let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow); + let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow); + + // If the element is smaller than MODULUS then the + // subtraction will underflow, producing a borrow value + // of 0xffff...ffff. Otherwise, it'll be zero. + let is_some = (borrow as u8) & 1; + + // Convert to Montgomery form by computing + // (a.R^0 * R^2) / R = a.R + tmp *= &R2; + + CtOption::new(tmp, Choice::from(is_some)) + } + + /// Converts an element of `Scalar` into a byte representation in + /// little-endian byte order. + pub fn to_bytes(&self) -> [u8; 32] { + // Turn into canonical form by computing + // (a.R) / R = a + let tmp = Scalar::montgomery_reduce(self.0[0], self.0[1], self.0[2], self.0[3], 0, 0, 0, 0); + + let mut res = [0; 32]; + res[..8].copy_from_slice(&tmp.0[0].to_le_bytes()); + res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes()); + res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes()); + res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes()); + + res + } + + /// Converts a 512-bit little endian integer into + /// a `Scalar` by reducing by the modulus. + pub fn from_bytes_wide(bytes: &[u8; 64]) -> Scalar { + Scalar::from_u512([ + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[..8]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[32..40]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[40..48]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[48..56]).unwrap()), + u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[56..64]).unwrap()), + ]) + } + + fn from_u512(limbs: [u64; 8]) -> Scalar { + // We reduce an arbitrary 512-bit number by decomposing it into two 256-bit digits + // with the higher bits multiplied by 2^256. Thus, we perform two reductions + // + // 1. the lower bits are multiplied by R^2, as normal + // 2. the upper bits are multiplied by R^2 * 2^256 = R^3 + // + // and computing their sum in the field. It remains to see that arbitrary 256-bit + // numbers can be placed into Montgomery form safely using the reduction. The + // reduction works so long as the product is less than R=2^256 multipled by + // the modulus. This holds because for any `c` smaller than the modulus, we have + // that (2^256 - 1)*c is an acceptable product for the reduction. Therefore, the + // reduction always works so long as `c` is in the field; in this case it is either the + // constant `R2` or `R3`. + let d0 = Scalar([limbs[0], limbs[1], limbs[2], limbs[3]]); + let d1 = Scalar([limbs[4], limbs[5], limbs[6], limbs[7]]); + // Convert to Montgomery form + d0 * R2 + d1 * R3 + } + + /// Converts from an integer represented in little endian + /// into its (congruent) `Scalar` representation. + pub const fn from_raw(val: [u64; 4]) -> Self { + (&Scalar(val)).mul(&R2) + } + + /// Squares this element. + #[inline] + pub const fn square(&self) -> Scalar { + let (r1, carry) = mac(0, self.0[0], self.0[1], 0); + let (r2, carry) = mac(0, self.0[0], self.0[2], carry); + let (r3, r4) = mac(0, self.0[0], self.0[3], carry); + + let (r3, carry) = mac(r3, self.0[1], self.0[2], 0); + let (r4, r5) = mac(r4, self.0[1], self.0[3], carry); + + let (r5, r6) = mac(r5, self.0[2], self.0[3], 0); + + let r7 = r6 >> 63; + let r6 = (r6 << 1) | (r5 >> 63); + let r5 = (r5 << 1) | (r4 >> 63); + let r4 = (r4 << 1) | (r3 >> 63); + let r3 = (r3 << 1) | (r2 >> 63); + let r2 = (r2 << 1) | (r1 >> 63); + let r1 = r1 << 1; + + let (r0, carry) = mac(0, self.0[0], self.0[0], 0); + let (r1, carry) = adc(0, r1, carry); + let (r2, carry) = mac(r2, self.0[1], self.0[1], carry); + let (r3, carry) = adc(0, r3, carry); + let (r4, carry) = mac(r4, self.0[2], self.0[2], carry); + let (r5, carry) = adc(0, r5, carry); + let (r6, carry) = mac(r6, self.0[3], self.0[3], carry); + let (r7, _) = adc(0, r7, carry); + + Scalar::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7) + } + + /// Exponentiates `self` by `by`, where `by` is a + /// little-endian order integer exponent. + pub fn pow(&self, by: &[u64; 4]) -> Self { + let mut res = Self::one(); + for e in by.iter().rev() { + for i in (0..64).rev() { + res = res.square(); + let mut tmp = res; + tmp *= self; + res.conditional_assign(&tmp, (((*e >> i) & 0x1) as u8).into()); + } + } + res + } + + /// Exponentiates `self` by `by`, where `by` is a + /// little-endian order integer exponent. + /// + /// **This operation is variable time with respect + /// to the exponent.** If the exponent is fixed, + /// this operation is effectively constant time. + pub fn pow_vartime(&self, by: &[u64; 4]) -> Self { + let mut res = Self::one(); + for e in by.iter().rev() { + for i in (0..64).rev() { + res = res.square(); + + if ((*e >> i) & 1) == 1 { + res.mul_assign(self); + } + } + } + res + } + + pub fn invert(&self) -> CtOption { + // Uses the addition chain from + // https://briansmith.org/ecc-inversion-addition-chains-01#curve25519_scalar_inversion + // implementation adapted from curve25519-dalek + let _1 = self; + let _10 = _1.square(); + let _100 = _10.square(); + let _11 = &_10 * _1; + let _101 = &_10 * &_11; + let _111 = &_10 * &_101; + let _1001 = &_10 * &_111; + let _1011 = &_10 * &_1001; + let _1111 = &_100 * &_1011; + + // _10000 + let mut y = &_1111 * _1; + + #[inline] + fn square_multiply(y: &mut Scalar, squarings: usize, x: &Scalar) { + for _ in 0..squarings { + *y = y.square(); + } + *y = y.mul(x); + } + + square_multiply(&mut y, 123 + 3, &_101); + square_multiply(&mut y, 2 + 2, &_11); + square_multiply(&mut y, 1 + 4, &_1111); + square_multiply(&mut y, 1 + 4, &_1111); + square_multiply(&mut y, 4, &_1001); + square_multiply(&mut y, 2, &_11); + square_multiply(&mut y, 1 + 4, &_1111); + square_multiply(&mut y, 1 + 3, &_101); + square_multiply(&mut y, 3 + 3, &_101); + square_multiply(&mut y, 3, &_111); + square_multiply(&mut y, 1 + 4, &_1111); + square_multiply(&mut y, 2 + 3, &_111); + square_multiply(&mut y, 2 + 2, &_11); + square_multiply(&mut y, 1 + 4, &_1011); + square_multiply(&mut y, 2 + 4, &_1011); + square_multiply(&mut y, 6 + 4, &_1001); + square_multiply(&mut y, 2 + 2, &_11); + square_multiply(&mut y, 3 + 2, &_11); + square_multiply(&mut y, 3 + 2, &_11); + square_multiply(&mut y, 1 + 4, &_1001); + square_multiply(&mut y, 1 + 3, &_111); + square_multiply(&mut y, 2 + 4, &_1111); + square_multiply(&mut y, 1 + 4, &_1011); + square_multiply(&mut y, 3, &_101); + square_multiply(&mut y, 2 + 4, &_1111); + square_multiply(&mut y, 3, &_101); + square_multiply(&mut y, 1 + 2, &_11); + + CtOption::new(y, !self.ct_eq(&Self::zero())) + } + + pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar { + // This code is essentially identical to the FieldElement + // implementation, and is documented there. Unfortunately, + // it's not easy to write it generically, since here we want + // to use `UnpackedScalar`s internally, and `Scalar`s + // externally, but there's no corresponding distinction for + // field elements. + + use zeroize::Zeroizing; + + let n = inputs.len(); + let one = Scalar::one(); + + // Place scratch storage in a Zeroizing wrapper to wipe it when + // we pass out of scope. + let scratch_vec = vec![one; n]; + let mut scratch = Zeroizing::new(scratch_vec); + + // Keep an accumulator of all of the previous products + let mut acc = Scalar::one(); + + // Pass through the input vector, recording the previous + // products in the scratch space + for (input, scratch) in inputs.iter().zip(scratch.iter_mut()) { + *scratch = acc; + + acc = acc * input; + } + + // acc is nonzero iff all inputs are nonzero + debug_assert!(acc != Scalar::zero()); + + // Compute the inverse of all products + acc = acc.invert().unwrap(); + + // We need to return the product of all inverses later + let ret = acc; + + // Pass through the vector backwards to compute the inverses + // in place + for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) { + let tmp = &acc * input.clone(); + *input = &acc * scratch; + acc = tmp; + } + + ret + } + + #[inline(always)] + const fn montgomery_reduce( + r0: u64, + r1: u64, + r2: u64, + r3: u64, + r4: u64, + r5: u64, + r6: u64, + r7: u64, + ) -> Self { + // The Montgomery reduction here is based on Algorithm 14.32 in + // Handbook of Applied Cryptography + // . + + let k = r0.wrapping_mul(INV); + let (_, carry) = mac(r0, k, MODULUS.0[0], 0); + let (r1, carry) = mac(r1, k, MODULUS.0[1], carry); + let (r2, carry) = mac(r2, k, MODULUS.0[2], carry); + let (r3, carry) = mac(r3, k, MODULUS.0[3], carry); + let (r4, carry2) = adc(r4, 0, carry); + + let k = r1.wrapping_mul(INV); + let (_, carry) = mac(r1, k, MODULUS.0[0], 0); + let (r2, carry) = mac(r2, k, MODULUS.0[1], carry); + let (r3, carry) = mac(r3, k, MODULUS.0[2], carry); + let (r4, carry) = mac(r4, k, MODULUS.0[3], carry); + let (r5, carry2) = adc(r5, carry2, carry); + + let k = r2.wrapping_mul(INV); + let (_, carry) = mac(r2, k, MODULUS.0[0], 0); + let (r3, carry) = mac(r3, k, MODULUS.0[1], carry); + let (r4, carry) = mac(r4, k, MODULUS.0[2], carry); + let (r5, carry) = mac(r5, k, MODULUS.0[3], carry); + let (r6, carry2) = adc(r6, carry2, carry); + + let k = r3.wrapping_mul(INV); + let (_, carry) = mac(r3, k, MODULUS.0[0], 0); + let (r4, carry) = mac(r4, k, MODULUS.0[1], carry); + let (r5, carry) = mac(r5, k, MODULUS.0[2], carry); + let (r6, carry) = mac(r6, k, MODULUS.0[3], carry); + let (r7, _) = adc(r7, carry2, carry); + + // Result may be within MODULUS of the correct value + (&Scalar([r4, r5, r6, r7])).sub(&MODULUS) + } + + /// Multiplies `rhs` by `self`, returning the result. + #[inline] + pub const fn mul(&self, rhs: &Self) -> Self { + // Schoolbook multiplication + + let (r0, carry) = mac(0, self.0[0], rhs.0[0], 0); + let (r1, carry) = mac(0, self.0[0], rhs.0[1], carry); + let (r2, carry) = mac(0, self.0[0], rhs.0[2], carry); + let (r3, r4) = mac(0, self.0[0], rhs.0[3], carry); + + let (r1, carry) = mac(r1, self.0[1], rhs.0[0], 0); + let (r2, carry) = mac(r2, self.0[1], rhs.0[1], carry); + let (r3, carry) = mac(r3, self.0[1], rhs.0[2], carry); + let (r4, r5) = mac(r4, self.0[1], rhs.0[3], carry); + + let (r2, carry) = mac(r2, self.0[2], rhs.0[0], 0); + let (r3, carry) = mac(r3, self.0[2], rhs.0[1], carry); + let (r4, carry) = mac(r4, self.0[2], rhs.0[2], carry); + let (r5, r6) = mac(r5, self.0[2], rhs.0[3], carry); + + let (r3, carry) = mac(r3, self.0[3], rhs.0[0], 0); + let (r4, carry) = mac(r4, self.0[3], rhs.0[1], carry); + let (r5, carry) = mac(r5, self.0[3], rhs.0[2], carry); + let (r6, r7) = mac(r6, self.0[3], rhs.0[3], carry); + + Scalar::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7) + } + + /// Subtracts `rhs` from `self`, returning the result. + #[inline] + pub const fn sub(&self, rhs: &Self) -> Self { + let (d0, borrow) = sbb(self.0[0], rhs.0[0], 0); + let (d1, borrow) = sbb(self.0[1], rhs.0[1], borrow); + let (d2, borrow) = sbb(self.0[2], rhs.0[2], borrow); + let (d3, borrow) = sbb(self.0[3], rhs.0[3], borrow); + + // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise + // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus. + let (d0, carry) = adc(d0, MODULUS.0[0] & borrow, 0); + let (d1, carry) = adc(d1, MODULUS.0[1] & borrow, carry); + let (d2, carry) = adc(d2, MODULUS.0[2] & borrow, carry); + let (d3, _) = adc(d3, MODULUS.0[3] & borrow, carry); + + Scalar([d0, d1, d2, d3]) + } + + /// Adds `rhs` to `self`, returning the result. + #[inline] + pub const fn add(&self, rhs: &Self) -> Self { + let (d0, carry) = adc(self.0[0], rhs.0[0], 0); + let (d1, carry) = adc(self.0[1], rhs.0[1], carry); + let (d2, carry) = adc(self.0[2], rhs.0[2], carry); + let (d3, _) = adc(self.0[3], rhs.0[3], carry); + + // Attempt to subtract the modulus, to ensure the value + // is smaller than the modulus. + (&Scalar([d0, d1, d2, d3])).sub(&MODULUS) + } + + /// Negates `self`. + #[inline] + pub const fn neg(&self) -> Self { + // Subtract `self` from `MODULUS` to negate. Ignore the final + // borrow because it cannot underflow; self is guaranteed to + // be in the field. + let (d0, borrow) = sbb(MODULUS.0[0], self.0[0], 0); + let (d1, borrow) = sbb(MODULUS.0[1], self.0[1], borrow); + let (d2, borrow) = sbb(MODULUS.0[2], self.0[2], borrow); + let (d3, _) = sbb(MODULUS.0[3], self.0[3], borrow); + + // `tmp` could be `MODULUS` if `self` was zero. Create a mask that is + // zero if `self` was zero, and `u64::max_value()` if self was nonzero. + let mask = (((self.0[0] | self.0[1] | self.0[2] | self.0[3]) == 0) as u64).wrapping_sub(1); + + Scalar([d0 & mask, d1 & mask, d2 & mask, d3 & mask]) + } +} + +impl<'a> From<&'a Scalar> for [u8; 32] { + fn from(value: &'a Scalar) -> [u8; 32] { + value.to_bytes() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_inv() { + // Compute -(q^{-1} mod 2^64) mod 2^64 by exponentiating + // by totient(2**64) - 1 + + let mut inv = 1u64; + for _ in 0..63 { + inv = inv.wrapping_mul(inv); + inv = inv.wrapping_mul(MODULUS.0[0]); + } + inv = inv.wrapping_neg(); + + assert_eq!(inv, INV); + } + + #[cfg(feature = "std")] + #[test] + fn test_debug() { + assert_eq!( + format!("{:?}", Scalar::zero()), + "0x0000000000000000000000000000000000000000000000000000000000000000" + ); + assert_eq!( + format!("{:?}", Scalar::one()), + "0x0000000000000000000000000000000000000000000000000000000000000001" + ); + assert_eq!( + format!("{:?}", R2), + "0x0ffffffffffffffffffffffffffffffec6ef5bf4737dcf70d6ec31748d98951d" + ); + } + + #[test] + fn test_equality() { + assert_eq!(Scalar::zero(), Scalar::zero()); + assert_eq!(Scalar::one(), Scalar::one()); + assert_eq!(R2, R2); + + assert!(Scalar::zero() != Scalar::one()); + assert!(Scalar::one() != R2); + } + + #[test] + fn test_to_bytes() { + assert_eq!( + Scalar::zero().to_bytes(), + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0 + ] + ); + + assert_eq!( + Scalar::one().to_bytes(), + [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0 + ] + ); + + assert_eq!( + R2.to_bytes(), + [ + 29, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15 + ] + ); + + assert_eq!( + (-&Scalar::one()).to_bytes(), + [ + 236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + ] + ); + } + + #[test] + fn test_from_bytes() { + assert_eq!( + Scalar::from_bytes(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0 + ]) + .unwrap(), + Scalar::zero() + ); + + assert_eq!( + Scalar::from_bytes(&[ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0 + ]) + .unwrap(), + Scalar::one() + ); + + assert_eq!( + Scalar::from_bytes(&[ + 29, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15 + ]) + .unwrap(), + R2 + ); + + // -1 should work + assert!( + Scalar::from_bytes(&[ + 236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + ]) + .is_some() + .unwrap_u8() + == 1 + ); + + // modulus is invalid + assert!( + Scalar::from_bytes(&[ + 1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, + 57, 51, 72, 125, 157, 41, 83, 167, 237, 115 + ]) + .is_none() + .unwrap_u8() + == 1 + ); + + // Anything larger than the modulus is invalid + assert!( + Scalar::from_bytes(&[ + 2, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, + 57, 51, 72, 125, 157, 41, 83, 167, 237, 115 + ]) + .is_none() + .unwrap_u8() + == 1 + ); + assert!( + Scalar::from_bytes(&[ + 1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, + 58, 51, 72, 125, 157, 41, 83, 167, 237, 115 + ]) + .is_none() + .unwrap_u8() + == 1 + ); + assert!( + Scalar::from_bytes(&[ + 1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8, 216, + 57, 51, 72, 125, 157, 41, 83, 167, 237, 116 + ]) + .is_none() + .unwrap_u8() + == 1 + ); + } + + #[test] + fn test_from_u512_zero() { + assert_eq!( + Scalar::zero(), + Scalar::from_u512([ + MODULUS.0[0], + MODULUS.0[1], + MODULUS.0[2], + MODULUS.0[3], + 0, + 0, + 0, + 0 + ]) + ); + } + + #[test] + fn test_from_u512_r() { + assert_eq!(R, Scalar::from_u512([1, 0, 0, 0, 0, 0, 0, 0])); + } + + #[test] + fn test_from_u512_r2() { + assert_eq!(R2, Scalar::from_u512([0, 0, 0, 0, 1, 0, 0, 0])); + } + + #[test] + fn test_from_u512_max() { + let max_u64 = 0xffffffffffffffff; + assert_eq!( + R3 - R, + Scalar::from_u512([max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64]) + ); + } + + #[test] + fn test_from_bytes_wide_r2() { + assert_eq!( + R2, + Scalar::from_bytes_wide(&[ + 29, 149, 152, 141, 116, 49, 236, 214, 112, 207, 125, 115, 244, 91, 239, 198, 254, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + } + + #[test] + fn test_from_bytes_wide_negative_one() { + assert_eq!( + -&Scalar::one(), + Scalar::from_bytes_wide(&[ + 236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + } + + #[test] + fn test_from_bytes_wide_maximum() { + assert_eq!( + Scalar::from_raw([ + 0xa40611e3449c0f00, + 0xd00e1ba768859347, + 0xceec73d217f5be65, + 0x0399411b7c309a3d + ]), + Scalar::from_bytes_wide(&[0xff; 64]) + ); + } + + #[test] + fn test_zero() { + assert_eq!(Scalar::zero(), -&Scalar::zero()); + assert_eq!(Scalar::zero(), Scalar::zero() + Scalar::zero()); + assert_eq!(Scalar::zero(), Scalar::zero() - Scalar::zero()); + assert_eq!(Scalar::zero(), Scalar::zero() * Scalar::zero()); + } + + const LARGEST: Scalar = Scalar([ + 0x5812631a5cf5d3ec, + 0x14def9dea2f79cd6, + 0x0000000000000000, + 0x1000000000000000, + ]); + + #[test] + fn test_addition() { + let mut tmp = LARGEST; + tmp += &LARGEST; + + assert_eq!( + tmp, + Scalar([ + 0x5812631a5cf5d3eb, + 0x14def9dea2f79cd6, + 0x0000000000000000, + 0x1000000000000000, + ]) + ); + + let mut tmp = LARGEST; + tmp += &Scalar([1, 0, 0, 0]); + + assert_eq!(tmp, Scalar::zero()); + } + + #[test] + fn test_negation() { + let tmp = -&LARGEST; + + assert_eq!(tmp, Scalar([1, 0, 0, 0])); + + let tmp = -&Scalar::zero(); + assert_eq!(tmp, Scalar::zero()); + let tmp = -&Scalar([1, 0, 0, 0]); + assert_eq!(tmp, LARGEST); + } + + #[test] + fn test_subtraction() { + let mut tmp = LARGEST; + tmp -= &LARGEST; + + assert_eq!(tmp, Scalar::zero()); + + let mut tmp = Scalar::zero(); + tmp -= &LARGEST; + + let mut tmp2 = MODULUS; + tmp2 -= &LARGEST; + + assert_eq!(tmp, tmp2); + } + + #[test] + fn test_multiplication() { + let mut cur = LARGEST; + + for _ in 0..100 { + let mut tmp = cur; + tmp *= &cur; + + let mut tmp2 = Scalar::zero(); + for b in cur + .to_bytes() + .iter() + .rev() + .flat_map(|byte| (0..8).rev().map(move |i| ((byte >> i) & 1u8) == 1u8)) + { + let tmp3 = tmp2; + tmp2.add_assign(&tmp3); + + if b { + tmp2.add_assign(&cur); + } + } + + assert_eq!(tmp, tmp2); + + cur.add_assign(&LARGEST); + } + } + + #[test] + fn test_squaring() { + let mut cur = LARGEST; + + for _ in 0..100 { + let mut tmp = cur; + tmp = tmp.square(); + + let mut tmp2 = Scalar::zero(); + for b in cur + .to_bytes() + .iter() + .rev() + .flat_map(|byte| (0..8).rev().map(move |i| ((byte >> i) & 1u8) == 1u8)) + { + let tmp3 = tmp2; + tmp2.add_assign(&tmp3); + + if b { + tmp2.add_assign(&cur); + } + } + + assert_eq!(tmp, tmp2); + + cur.add_assign(&LARGEST); + } + } + + #[test] + fn test_inversion() { + assert_eq!(Scalar::zero().invert().is_none().unwrap_u8(), 1); + assert_eq!(Scalar::one().invert().unwrap(), Scalar::one()); + assert_eq!((-&Scalar::one()).invert().unwrap(), -&Scalar::one()); + + let mut tmp = R2; + + for _ in 0..100 { + let mut tmp2 = tmp.invert().unwrap(); + tmp2.mul_assign(&tmp); + + assert_eq!(tmp2, Scalar::one()); + + tmp.add_assign(&R2); + } + } + + #[test] + fn test_invert_is_pow() { + let q_minus_2 = [ + 0x5812631a5cf5d3eb, + 0x14def9dea2f79cd6, + 0x0000000000000000, + 0x1000000000000000, + ]; + + let mut r1 = R; + let mut r2 = R; + let mut r3 = R; + + for _ in 0..100 { + r1 = r1.invert().unwrap(); + r2 = r2.pow_vartime(&q_minus_2); + r3 = r3.pow(&q_minus_2); + + assert_eq!(r1, r2); + assert_eq!(r2, r3); + // Add R so we check something different next time around + r1.add_assign(&R); + r2 = r1; + r3 = r1; + } + } + + #[test] + fn test_from_raw() { + assert_eq!( + Scalar::from_raw([ + 0xd6ec31748d98951c, + 0xc6ef5bf4737dcf70, + 0xfffffffffffffffe, + 0x0fffffffffffffff + ]), + Scalar::from_raw([0xffffffffffffffff; 4]) + ); + + assert_eq!(Scalar::from_raw(MODULUS.0), Scalar::zero()); + + assert_eq!(Scalar::from_raw([1, 0, 0, 0]), R); + } + + #[test] + fn test_double() { + let a = Scalar::from_raw([ + 0x1fff3231233ffffd, + 0x4884b7fa00034802, + 0x998c4fefecbc4ff3, + 0x1824b159acc50562, + ]); + + assert_eq!(a.double(), a + a); + } +} diff --git a/third_party/Dorian/src/sparse_mlpoly.rs b/third_party/Dorian/src/sparse_mlpoly.rs new file mode 100644 index 000000000..cbfa42e8e --- /dev/null +++ b/third_party/Dorian/src/sparse_mlpoly.rs @@ -0,0 +1,1697 @@ +#![allow(clippy::type_complexity)] +#![allow(clippy::too_many_arguments)] +#![allow(clippy::needless_range_loop)] +use super::dense_mlpoly::DensePolynomial; +use super::dense_mlpoly::{ + EqPolynomial, IdentityPolynomial, PolyCommitment, PolyCommitmentGens, PolyEvalProof, +}; +use super::errors::ProofVerifyError; +use super::math::Math; +use super::product_tree::{DotProductCircuit, ProductCircuit, ProductCircuitEvalProofBatched}; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::timer::Timer; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use core::cmp::Ordering; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "multicore")] +use rayon::prelude::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct SparseMatEntry { + row: usize, + col: usize, + val: Scalar, +} + +impl SparseMatEntry { + pub fn new(row: usize, col: usize, val: Scalar) -> Self { + SparseMatEntry { row, col, val } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct SparseMatPolynomial { + num_vars_x: usize, + num_vars_y: usize, + M: Vec, +} + +pub struct Derefs { + row_ops_val: Vec, + col_ops_val: Vec, + comb: DensePolynomial, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct DerefsCommitment { + comm_ops_val: PolyCommitment, +} + +impl Derefs { + pub fn new(row_ops_val: Vec, col_ops_val: Vec) -> Self { + assert_eq!(row_ops_val.len(), col_ops_val.len()); + + let derefs = { + // combine all polynomials into a single polynomial (used below to produce a single commitment) + let comb = DensePolynomial::merge(row_ops_val.iter().chain(col_ops_val.iter())); + + Derefs { + row_ops_val, + col_ops_val, + comb, + } + }; + + derefs + } + + pub fn commit(&self, gens: &PolyCommitmentGens) -> DerefsCommitment { + let (comm_ops_val, _blinds) = self.comb.commit(gens, None); + DerefsCommitment { comm_ops_val } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct DerefsEvalProof { + proof_derefs: PolyEvalProof, +} + +impl DerefsEvalProof { + fn protocol_name() -> &'static [u8] { + b"Derefs evaluation proof" + } + + fn prove_single( + joint_poly: &DensePolynomial, + r: &[Scalar], + evals: Vec, + gens: &PolyCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> PolyEvalProof { + assert_eq!(joint_poly.get_num_vars(), r.len() + evals.len().log_2()); + + // append the claimed evaluations to transcript + evals.append_to_transcript(b"evals_ops_val", transcript); + + // n-to-1 reduction + let (r_joint, eval_joint) = { + let challenges = + transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2()); + let mut poly_evals = DensePolynomial::new(evals); + for i in (0..challenges.len()).rev() { + poly_evals.bound_poly_var_bot(&challenges[i]); + } + assert_eq!(poly_evals.len(), 1); + let joint_claim_eval = poly_evals[0]; + let mut r_joint = challenges; + r_joint.extend(r); + + debug_assert_eq!(joint_poly.evaluate(&r_joint), joint_claim_eval); + (r_joint, joint_claim_eval) + }; + // decommit the joint polynomial at r_joint + eval_joint.append_to_transcript(b"joint_claim_eval", transcript); + let (proof_derefs, _comm_derefs_eval) = PolyEvalProof::prove( + joint_poly, + None, + &r_joint, + &eval_joint, + None, + gens, + transcript, + random_tape, + ); + + proof_derefs + } + + // evalues both polynomials at r and produces a joint proof of opening + pub fn prove( + derefs: &Derefs, + eval_row_ops_val_vec: &[Scalar], + eval_col_ops_val_vec: &[Scalar], + r: &[Scalar], + gens: &PolyCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> Self { + transcript.append_protocol_name(DerefsEvalProof::protocol_name()); + + let evals = { + let mut evals = eval_row_ops_val_vec.to_owned(); + evals.extend(eval_col_ops_val_vec); + evals.resize(evals.len().next_power_of_two(), Scalar::zero()); + evals + }; + let proof_derefs = + DerefsEvalProof::prove_single(&derefs.comb, r, evals, gens, transcript, random_tape); + + DerefsEvalProof { proof_derefs } + } + + fn verify_single( + proof: &PolyEvalProof, + comm: &PolyCommitment, + r: &[Scalar], + evals: Vec, + gens: &PolyCommitmentGens, + transcript: &mut Transcript, + ) -> Result<(), ProofVerifyError> { + // append the claimed evaluations to transcript + evals.append_to_transcript(b"evals_ops_val", transcript); + + // n-to-1 reduction + let challenges = + transcript.challenge_vector(b"challenge_combine_n_to_one", evals.len().log_2()); + let mut poly_evals = DensePolynomial::new(evals); + for i in (0..challenges.len()).rev() { + poly_evals.bound_poly_var_bot(&challenges[i]); + } + assert_eq!(poly_evals.len(), 1); + let joint_claim_eval = poly_evals[0]; + let mut r_joint = challenges; + r_joint.extend(r); + + // decommit the joint polynomial at r_joint + joint_claim_eval.append_to_transcript(b"joint_claim_eval", transcript); + + proof.verify_plain(gens, transcript, &r_joint, &joint_claim_eval, comm) + } + + // verify evaluations of both polynomials at r + pub fn verify( + &self, + r: &[Scalar], + eval_row_ops_val_vec: &[Scalar], + eval_col_ops_val_vec: &[Scalar], + gens: &PolyCommitmentGens, + comm: &DerefsCommitment, + transcript: &mut Transcript, + ) -> Result<(), ProofVerifyError> { + transcript.append_protocol_name(DerefsEvalProof::protocol_name()); + let mut evals = eval_row_ops_val_vec.to_owned(); + evals.extend(eval_col_ops_val_vec); + evals.resize(evals.len().next_power_of_two(), Scalar::zero()); + + DerefsEvalProof::verify_single( + &self.proof_derefs, + &comm.comm_ops_val, + r, + evals, + gens, + transcript, + ) + } +} + +impl AppendToTranscript for DerefsCommitment { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { + transcript.append_message(b"derefs_commitment", b"begin_derefs_commitment"); + self.comm_ops_val.append_to_transcript(label, transcript); + transcript.append_message(b"derefs_commitment", b"end_derefs_commitment"); + } +} + +struct AddrTimestamps { + ops_addr_usize: Vec>, + ops_addr: Vec, + read_ts: Vec, + audit_ts: DensePolynomial, +} + +impl AddrTimestamps { + pub fn new(num_cells: usize, num_ops: usize, ops_addr: Vec>) -> Self { + for item in ops_addr.iter() { + assert_eq!(item.len(), num_ops); + } + + let mut audit_ts = vec![0usize; num_cells]; + let mut ops_addr_vec: Vec = Vec::new(); + let mut read_ts_vec: Vec = Vec::new(); + for ops_addr_inst in ops_addr.iter() { + let mut read_ts = vec![0usize; num_ops]; + + // since read timestamps are trustworthy, we can simply increment the r-ts to obtain a w-ts + // this is sufficient to ensure that the write-set, consisting of (addr, val, ts) tuples, is a set + for i in 0..num_ops { + let addr = ops_addr_inst[i]; + assert!(addr < num_cells); + let r_ts = audit_ts[addr]; + read_ts[i] = r_ts; + + let w_ts = r_ts + 1; + audit_ts[addr] = w_ts; + } + + ops_addr_vec.push(DensePolynomial::from_usize(ops_addr_inst)); + read_ts_vec.push(DensePolynomial::from_usize(&read_ts)); + } + + AddrTimestamps { + ops_addr: ops_addr_vec, + ops_addr_usize: ops_addr, + read_ts: read_ts_vec, + audit_ts: DensePolynomial::from_usize(&audit_ts), + } + } + + fn deref_mem(addr: &[usize], mem_val: &[Scalar]) -> DensePolynomial { + DensePolynomial::new( + (0..addr.len()) + .map(|i| { + let a = addr[i]; + mem_val[a] + }) + .collect::>(), + ) + } + + pub fn deref(&self, mem_val: &[Scalar]) -> Vec { + (0..self.ops_addr.len()) + .map(|i| AddrTimestamps::deref_mem(&self.ops_addr_usize[i], mem_val)) + .collect::>() + } +} + +pub struct MultiSparseMatPolynomialAsDense { + batch_size: usize, + val: Vec, + row: AddrTimestamps, + col: AddrTimestamps, + comb_ops: DensePolynomial, + comb_mem: DensePolynomial, +} + +pub struct SparseMatPolyCommitmentGens { + gens_ops: PolyCommitmentGens, + gens_mem: PolyCommitmentGens, + gens_derefs: PolyCommitmentGens, +} + +impl SparseMatPolyCommitmentGens { + pub fn new( + label: &'static [u8], + num_vars_x: usize, + num_vars_y: usize, + num_nz_entries: usize, + batch_size: usize, + ) -> SparseMatPolyCommitmentGens { + let num_vars_ops = + num_nz_entries.next_power_of_two().log_2() + (batch_size * 5).next_power_of_two().log_2(); + let num_vars_mem = if num_vars_x > num_vars_y { + num_vars_x + } else { + num_vars_y + } + 1; + let num_vars_derefs = + num_nz_entries.next_power_of_two().log_2() + (batch_size * 2).next_power_of_two().log_2(); + + let gens_ops = PolyCommitmentGens::new(num_vars_ops, label); + let gens_mem = PolyCommitmentGens::new(num_vars_mem, label); + let gens_derefs = PolyCommitmentGens::new(num_vars_derefs, label); + SparseMatPolyCommitmentGens { + gens_ops, + gens_mem, + gens_derefs, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct SparseMatPolyCommitment { + batch_size: usize, + num_ops: usize, + num_mem_cells: usize, + comm_comb_ops: PolyCommitment, + comm_comb_mem: PolyCommitment, +} + +impl AppendToTranscript for SparseMatPolyCommitment { + fn append_to_transcript(&self, _label: &'static [u8], transcript: &mut Transcript) { + transcript.append_u64(b"batch_size", self.batch_size as u64); + transcript.append_u64(b"num_ops", self.num_ops as u64); + transcript.append_u64(b"num_mem_cells", self.num_mem_cells as u64); + self + .comm_comb_ops + .append_to_transcript(b"comm_comb_ops", transcript); + self + .comm_comb_mem + .append_to_transcript(b"comm_comb_mem", transcript); + } +} + +impl SparseMatPolynomial { + pub fn new(num_vars_x: usize, num_vars_y: usize, M: Vec) -> Self { + SparseMatPolynomial { + num_vars_x, + num_vars_y, + M, + } + } + + pub fn get_num_nz_entries(&self) -> usize { + self.M.len().next_power_of_two() + } + + fn sparse_to_dense_vecs(&self, N: usize) -> (Vec, Vec, Vec) { + assert!(N >= self.get_num_nz_entries()); + let mut ops_row: Vec = vec![0; N]; + let mut ops_col: Vec = vec![0; N]; + let mut val: Vec = vec![Scalar::zero(); N]; + + for i in 0..self.M.len() { + ops_row[i] = self.M[i].row; + ops_col[i] = self.M[i].col; + val[i] = self.M[i].val; + } + (ops_row, ops_col, val) + } + + fn multi_sparse_to_dense_rep( + sparse_polys: &[&SparseMatPolynomial], + ) -> MultiSparseMatPolynomialAsDense { + assert!(!sparse_polys.is_empty()); + for i in 1..sparse_polys.len() { + assert_eq!(sparse_polys[i].num_vars_x, sparse_polys[0].num_vars_x); + assert_eq!(sparse_polys[i].num_vars_y, sparse_polys[0].num_vars_y); + } + + let N = (0..sparse_polys.len()) + .map(|i| sparse_polys[i].get_num_nz_entries()) + .max() + .unwrap(); + + let mut ops_row_vec: Vec> = Vec::new(); + let mut ops_col_vec: Vec> = Vec::new(); + let mut val_vec: Vec = Vec::new(); + for poly in sparse_polys { + let (ops_row, ops_col, val) = poly.sparse_to_dense_vecs(N); + ops_row_vec.push(ops_row); + ops_col_vec.push(ops_col); + val_vec.push(DensePolynomial::new(val)); + } + + let any_poly = &sparse_polys[0]; + + let num_mem_cells = if any_poly.num_vars_x > any_poly.num_vars_y { + any_poly.num_vars_x.pow2() + } else { + any_poly.num_vars_y.pow2() + }; + + let row = AddrTimestamps::new(num_mem_cells, N, ops_row_vec); + let col = AddrTimestamps::new(num_mem_cells, N, ops_col_vec); + + // combine polynomials into a single polynomial for commitment purposes + let comb_ops = DensePolynomial::merge( + row + .ops_addr + .iter() + .chain(row.read_ts.iter()) + .chain(col.ops_addr.iter()) + .chain(col.read_ts.iter()) + .chain(val_vec.iter()), + ); + let mut comb_mem = row.audit_ts.clone(); + comb_mem.extend(&col.audit_ts); + + MultiSparseMatPolynomialAsDense { + batch_size: sparse_polys.len(), + row, + col, + val: val_vec, + comb_ops, + comb_mem, + } + } + + fn evaluate_with_tables(&self, eval_table_rx: &[Scalar], eval_table_ry: &[Scalar]) -> Scalar { + assert_eq!(self.num_vars_x.pow2(), eval_table_rx.len()); + assert_eq!(self.num_vars_y.pow2(), eval_table_ry.len()); + + (0..self.M.len()) + .map(|i| { + let row = self.M[i].row; + let col = self.M[i].col; + let val = &self.M[i].val; + eval_table_rx[row] * eval_table_ry[col] * val + }) + .sum() + } + + #[cfg(not(feature = "multicore"))] + pub fn multi_evaluate( + polys: &[&SparseMatPolynomial], + rx: &[Scalar], + ry: &[Scalar], + ) -> Vec { + let eval_table_rx = EqPolynomial::new(rx.to_vec()).evals(); + let eval_table_ry = EqPolynomial::new(ry.to_vec()).evals(); + + (0..polys.len()) + .map(|i| polys[i].evaluate_with_tables(&eval_table_rx, &eval_table_ry)) + .collect::>() + } + + #[cfg(feature = "multicore")] + pub fn multi_evaluate( + polys: &[&SparseMatPolynomial], + rx: &[Scalar], + ry: &[Scalar], + ) -> Vec { + let eval_table_rx = EqPolynomial::new(rx.to_vec()).evals(); + let eval_table_ry = EqPolynomial::new(ry.to_vec()).evals(); + + polys.par_iter() + .map(|poly| poly.evaluate_with_tables(&eval_table_rx, &eval_table_ry)) + .collect() + } + + pub fn multiply_vec(&self, num_rows: usize, num_cols: usize, z: &[Scalar]) -> Vec { + assert_eq!(z.len(), num_cols); + + (0..self.M.len()) + .map(|i| { + let row = self.M[i].row; + let col = self.M[i].col; + let val = &self.M[i].val; + (row, val * z[col]) + }) + .fold(vec![Scalar::zero(); num_rows], |mut Mz, (r, v)| { + Mz[r] += v; + Mz + }) + } + + pub fn compute_eval_table_sparse( + &self, + rx: &[Scalar], + num_rows: usize, + num_cols: usize, + ) -> Vec { + assert_eq!(rx.len(), num_rows); + + let mut M_evals: Vec = vec![Scalar::zero(); num_cols]; + + for i in 0..self.M.len() { + let entry = &self.M[i]; + M_evals[entry.col] += rx[entry.row] * entry.val; + } + M_evals + } + + pub fn multi_commit( + sparse_polys: &[&SparseMatPolynomial], + gens: &SparseMatPolyCommitmentGens, + ) -> (SparseMatPolyCommitment, MultiSparseMatPolynomialAsDense) { + let batch_size = sparse_polys.len(); + let dense = SparseMatPolynomial::multi_sparse_to_dense_rep(sparse_polys); + + let (comm_comb_ops, _blinds_comb_ops) = dense.comb_ops.commit(&gens.gens_ops, None); + let (comm_comb_mem, _blinds_comb_mem) = dense.comb_mem.commit(&gens.gens_mem, None); + + ( + SparseMatPolyCommitment { + batch_size, + num_mem_cells: dense.row.audit_ts.len(), + num_ops: dense.row.read_ts[0].len(), + comm_comb_ops, + comm_comb_mem, + }, + dense, + ) + } +} + +impl MultiSparseMatPolynomialAsDense { + pub fn deref(&self, row_mem_val: &[Scalar], col_mem_val: &[Scalar]) -> Derefs { + let row_ops_val = self.row.deref(row_mem_val); + let col_ops_val = self.col.deref(col_mem_val); + + Derefs::new(row_ops_val, col_ops_val) + } +} + +#[derive(Debug)] +struct ProductLayer { + init: ProductCircuit, + read_vec: Vec, + write_vec: Vec, + audit: ProductCircuit, +} + +#[derive(Debug)] +struct Layers { + prod_layer: ProductLayer, +} + +impl Layers { + fn build_hash_layer( + eval_table: &[Scalar], + addrs_vec: &[DensePolynomial], + derefs_vec: &[DensePolynomial], + read_ts_vec: &[DensePolynomial], + audit_ts: &DensePolynomial, + r_mem_check: &(Scalar, Scalar), + ) -> ( + DensePolynomial, + Vec, + Vec, + DensePolynomial, + ) { + let (r_hash, r_multiset_check) = r_mem_check; + + //hash(addr, val, ts) = ts * r_hash_sqr + val * r_hash + addr + let r_hash_sqr = r_hash * r_hash; + let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar { + ts * r_hash_sqr + val * r_hash + addr + }; + + // hash init and audit that does not depend on #instances + let num_mem_cells = eval_table.len(); + let poly_init_hashed = DensePolynomial::new( + (0..num_mem_cells) + .map(|i| { + // at init time, addr is given by i, init value is given by eval_table, and ts = 0 + hash_func(&Scalar::from(i as u64), &eval_table[i], &Scalar::zero()) - r_multiset_check + }) + .collect::>(), + ); + let poly_audit_hashed = DensePolynomial::new( + (0..num_mem_cells) + .map(|i| { + // at audit time, addr is given by i, value is given by eval_table, and ts is given by audit_ts + hash_func(&Scalar::from(i as u64), &eval_table[i], &audit_ts[i]) - r_multiset_check + }) + .collect::>(), + ); + + // hash read and write that depends on #instances + let mut poly_read_hashed_vec: Vec = Vec::new(); + let mut poly_write_hashed_vec: Vec = Vec::new(); + for i in 0..addrs_vec.len() { + let (addrs, derefs, read_ts) = (&addrs_vec[i], &derefs_vec[i], &read_ts_vec[i]); + assert_eq!(addrs.len(), derefs.len()); + assert_eq!(addrs.len(), read_ts.len()); + let num_ops = addrs.len(); + let poly_read_hashed = DensePolynomial::new( + (0..num_ops) + .map(|i| { + // at read time, addr is given by addrs, value is given by derefs, and ts is given by read_ts + hash_func(&addrs[i], &derefs[i], &read_ts[i]) - r_multiset_check + }) + .collect::>(), + ); + poly_read_hashed_vec.push(poly_read_hashed); + + let poly_write_hashed = DensePolynomial::new( + (0..num_ops) + .map(|i| { + // at write time, addr is given by addrs, value is given by derefs, and ts is given by write_ts = read_ts + 1 + hash_func(&addrs[i], &derefs[i], &(read_ts[i] + Scalar::one())) - r_multiset_check + }) + .collect::>(), + ); + poly_write_hashed_vec.push(poly_write_hashed); + } + + ( + poly_init_hashed, + poly_read_hashed_vec, + poly_write_hashed_vec, + poly_audit_hashed, + ) + } + + pub fn new( + eval_table: &[Scalar], + addr_timestamps: &AddrTimestamps, + poly_ops_val: &[DensePolynomial], + r_mem_check: &(Scalar, Scalar), + ) -> Self { + let (poly_init_hashed, poly_read_hashed_vec, poly_write_hashed_vec, poly_audit_hashed) = + Layers::build_hash_layer( + eval_table, + &addr_timestamps.ops_addr, + poly_ops_val, + &addr_timestamps.read_ts, + &addr_timestamps.audit_ts, + r_mem_check, + ); + + let prod_init = ProductCircuit::new(&poly_init_hashed); + let prod_read_vec = (0..poly_read_hashed_vec.len()) + .map(|i| ProductCircuit::new(&poly_read_hashed_vec[i])) + .collect::>(); + let prod_write_vec = (0..poly_write_hashed_vec.len()) + .map(|i| ProductCircuit::new(&poly_write_hashed_vec[i])) + .collect::>(); + let prod_audit = ProductCircuit::new(&poly_audit_hashed); + + // subset audit check + let hashed_writes: Scalar = (0..prod_write_vec.len()) + .map(|i| prod_write_vec[i].evaluate()) + .product(); + let hashed_write_set: Scalar = prod_init.evaluate() * hashed_writes; + + let hashed_reads: Scalar = (0..prod_read_vec.len()) + .map(|i| prod_read_vec[i].evaluate()) + .product(); + let hashed_read_set: Scalar = hashed_reads * prod_audit.evaluate(); + + //assert_eq!(hashed_read_set, hashed_write_set); + debug_assert_eq!(hashed_read_set, hashed_write_set); + + Layers { + prod_layer: ProductLayer { + init: prod_init, + read_vec: prod_read_vec, + write_vec: prod_write_vec, + audit: prod_audit, + }, + } + } +} + +#[derive(Debug)] +struct PolyEvalNetwork { + row_layers: Layers, + col_layers: Layers, +} + +impl PolyEvalNetwork { + pub fn new( + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + mem_rx: &[Scalar], + mem_ry: &[Scalar], + r_mem_check: &(Scalar, Scalar), + ) -> Self { + let row_layers = Layers::new(mem_rx, &dense.row, &derefs.row_ops_val, r_mem_check); + let col_layers = Layers::new(mem_ry, &dense.col, &derefs.col_ops_val, r_mem_check); + + PolyEvalNetwork { + row_layers, + col_layers, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct HashLayerProof { + eval_row: (Vec, Vec, Scalar), + eval_col: (Vec, Vec, Scalar), + eval_val: Vec, + eval_derefs: (Vec, Vec), + proof_ops: PolyEvalProof, + proof_mem: PolyEvalProof, + proof_derefs: DerefsEvalProof, +} + +impl HashLayerProof { + fn protocol_name() -> &'static [u8] { + b"Sparse polynomial hash layer proof" + } + + fn prove_helper( + rand: (&Vec, &Vec), + addr_timestamps: &AddrTimestamps, + ) -> (Vec, Vec, Scalar) { + let (rand_mem, rand_ops) = rand; + + // decommit ops-addr at rand_ops + let mut eval_ops_addr_vec: Vec = Vec::new(); + for i in 0..addr_timestamps.ops_addr.len() { + let eval_ops_addr = addr_timestamps.ops_addr[i].evaluate(rand_ops); + eval_ops_addr_vec.push(eval_ops_addr); + } + + // decommit read_ts at rand_ops + let mut eval_read_ts_vec: Vec = Vec::new(); + for i in 0..addr_timestamps.read_ts.len() { + let eval_read_ts = addr_timestamps.read_ts[i].evaluate(rand_ops); + eval_read_ts_vec.push(eval_read_ts); + } + + // decommit audit-ts at rand_mem + let eval_audit_ts = addr_timestamps.audit_ts.evaluate(rand_mem); + + (eval_ops_addr_vec, eval_read_ts_vec, eval_audit_ts) + } + + fn prove( + rand: (&Vec, &Vec), + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + gens: &SparseMatPolyCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> Self { + transcript.append_protocol_name(HashLayerProof::protocol_name()); + + let (rand_mem, rand_ops) = rand; + + // decommit derefs at rand_ops + let eval_row_ops_val = (0..derefs.row_ops_val.len()) + .map(|i| derefs.row_ops_val[i].evaluate(rand_ops)) + .collect::>(); + let eval_col_ops_val = (0..derefs.col_ops_val.len()) + .map(|i| derefs.col_ops_val[i].evaluate(rand_ops)) + .collect::>(); + let proof_derefs = DerefsEvalProof::prove( + derefs, + &eval_row_ops_val, + &eval_col_ops_val, + rand_ops, + &gens.gens_derefs, + transcript, + random_tape, + ); + let eval_derefs = (eval_row_ops_val, eval_col_ops_val); + + // evaluate row_addr, row_read-ts, col_addr, col_read-ts, val at rand_ops + // evaluate row_audit_ts and col_audit_ts at rand_mem + let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) = + HashLayerProof::prove_helper((rand_mem, rand_ops), &dense.row); + let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) = + HashLayerProof::prove_helper((rand_mem, rand_ops), &dense.col); + let eval_val_vec = (0..dense.val.len()) + .map(|i| dense.val[i].evaluate(rand_ops)) + .collect::>(); + + // form a single decommitment using comm_comb_ops + let mut evals_ops: Vec = Vec::new(); + evals_ops.extend(&eval_row_addr_vec); + evals_ops.extend(&eval_row_read_ts_vec); + evals_ops.extend(&eval_col_addr_vec); + evals_ops.extend(&eval_col_read_ts_vec); + evals_ops.extend(&eval_val_vec); + evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero()); + evals_ops.append_to_transcript(b"claim_evals_ops", transcript); + let challenges_ops = + transcript.challenge_vector(b"challenge_combine_n_to_one", evals_ops.len().log_2()); + + let mut poly_evals_ops = DensePolynomial::new(evals_ops); + for i in (0..challenges_ops.len()).rev() { + poly_evals_ops.bound_poly_var_bot(&challenges_ops[i]); + } + assert_eq!(poly_evals_ops.len(), 1); + let joint_claim_eval_ops = poly_evals_ops[0]; + let mut r_joint_ops = challenges_ops; + r_joint_ops.extend(rand_ops); + debug_assert_eq!(dense.comb_ops.evaluate(&r_joint_ops), joint_claim_eval_ops); + joint_claim_eval_ops.append_to_transcript(b"joint_claim_eval_ops", transcript); + let (proof_ops, _comm_ops_eval) = PolyEvalProof::prove( + &dense.comb_ops, + None, + &r_joint_ops, + &joint_claim_eval_ops, + None, + &gens.gens_ops, + transcript, + random_tape, + ); + + // form a single decommitment using comb_comb_mem at rand_mem + let evals_mem: Vec = vec![eval_row_audit_ts, eval_col_audit_ts]; + evals_mem.append_to_transcript(b"claim_evals_mem", transcript); + let challenges_mem = + transcript.challenge_vector(b"challenge_combine_two_to_one", evals_mem.len().log_2()); + + let mut poly_evals_mem = DensePolynomial::new(evals_mem); + for i in (0..challenges_mem.len()).rev() { + poly_evals_mem.bound_poly_var_bot(&challenges_mem[i]); + } + assert_eq!(poly_evals_mem.len(), 1); + let joint_claim_eval_mem = poly_evals_mem[0]; + let mut r_joint_mem = challenges_mem; + r_joint_mem.extend(rand_mem); + debug_assert_eq!(dense.comb_mem.evaluate(&r_joint_mem), joint_claim_eval_mem); + joint_claim_eval_mem.append_to_transcript(b"joint_claim_eval_mem", transcript); + let (proof_mem, _comm_mem_eval) = PolyEvalProof::prove( + &dense.comb_mem, + None, + &r_joint_mem, + &joint_claim_eval_mem, + None, + &gens.gens_mem, + transcript, + random_tape, + ); + + HashLayerProof { + eval_row: (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts), + eval_col: (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts), + eval_val: eval_val_vec, + eval_derefs, + proof_ops, + proof_mem, + proof_derefs, + } + } + + fn verify_helper( + rand: &(&Vec, &Vec), + claims: &(Scalar, Vec, Vec, Scalar), + eval_ops_val: &[Scalar], + eval_ops_addr: &[Scalar], + eval_read_ts: &[Scalar], + eval_audit_ts: &Scalar, + r: &[Scalar], + r_hash: &Scalar, + r_multiset_check: &Scalar, + ) -> Result<(), ProofVerifyError> { + let r_hash_sqr = r_hash * r_hash; + let hash_func = |addr: &Scalar, val: &Scalar, ts: &Scalar| -> Scalar { + ts * r_hash_sqr + val * r_hash + addr + }; + + let (rand_mem, _rand_ops) = rand; + let (claim_init, claim_read, claim_write, claim_audit) = claims; + + // init + let eval_init_addr = IdentityPolynomial::new(rand_mem.len()).evaluate(rand_mem); + let eval_init_val = EqPolynomial::new(r.to_vec()).evaluate(rand_mem); + let hash_init_at_rand_mem = + hash_func(&eval_init_addr, &eval_init_val, &Scalar::zero()) - r_multiset_check; // verify the claim_last of init chunk + assert_eq!(&hash_init_at_rand_mem, claim_init); + + // read + for i in 0..eval_ops_addr.len() { + let hash_read_at_rand_ops = + hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_read_ts[i]) - r_multiset_check; // verify the claim_last of init chunk + assert_eq!(&hash_read_at_rand_ops, &claim_read[i]); + } + + // write: shares addr, val component; only decommit write_ts + for i in 0..eval_ops_addr.len() { + let eval_write_ts = eval_read_ts[i] + Scalar::one(); + let hash_write_at_rand_ops = + hash_func(&eval_ops_addr[i], &eval_ops_val[i], &eval_write_ts) - r_multiset_check; // verify the claim_last of init chunk + assert_eq!(&hash_write_at_rand_ops, &claim_write[i]); + } + + // audit: shares addr and val with init + let eval_audit_addr = eval_init_addr; + let eval_audit_val = eval_init_val; + let hash_audit_at_rand_mem = + hash_func(&eval_audit_addr, &eval_audit_val, eval_audit_ts) - r_multiset_check; + assert_eq!(&hash_audit_at_rand_mem, claim_audit); // verify the last step of the sum-check for audit + + Ok(()) + } + + fn verify( + &self, + rand: (&Vec, &Vec), + claims_row: &(Scalar, Vec, Vec, Scalar), + claims_col: &(Scalar, Vec, Vec, Scalar), + claims_dotp: &[Scalar], + comm: &SparseMatPolyCommitment, + gens: &SparseMatPolyCommitmentGens, + comm_derefs: &DerefsCommitment, + rx: &[Scalar], + ry: &[Scalar], + r_hash: &Scalar, + r_multiset_check: &Scalar, + transcript: &mut Transcript, + ) -> Result<(), ProofVerifyError> { + let timer = Timer::new("verify_hash_proof"); + transcript.append_protocol_name(HashLayerProof::protocol_name()); + + let (rand_mem, rand_ops) = rand; + + // verify derefs at rand_ops + let (eval_row_ops_val, eval_col_ops_val) = &self.eval_derefs; + assert_eq!(eval_row_ops_val.len(), eval_col_ops_val.len()); + self.proof_derefs.verify( + rand_ops, + eval_row_ops_val, + eval_col_ops_val, + &gens.gens_derefs, + comm_derefs, + transcript, + )?; + + // verify the decommitments used in evaluation sum-check + let eval_val_vec = &self.eval_val; + assert_eq!(claims_dotp.len(), 3 * eval_row_ops_val.len()); + for i in 0..claims_dotp.len() / 3 { + let claim_row_ops_val = claims_dotp[3 * i]; + let claim_col_ops_val = claims_dotp[3 * i + 1]; + let claim_val = claims_dotp[3 * i + 2]; + + assert_eq!(claim_row_ops_val, eval_row_ops_val[i]); + assert_eq!(claim_col_ops_val, eval_col_ops_val[i]); + assert_eq!(claim_val, eval_val_vec[i]); + } + + // verify addr-timestamps using comm_comb_ops at rand_ops + let (eval_row_addr_vec, eval_row_read_ts_vec, eval_row_audit_ts) = &self.eval_row; + let (eval_col_addr_vec, eval_col_read_ts_vec, eval_col_audit_ts) = &self.eval_col; + + let mut evals_ops: Vec = Vec::new(); + evals_ops.extend(eval_row_addr_vec); + evals_ops.extend(eval_row_read_ts_vec); + evals_ops.extend(eval_col_addr_vec); + evals_ops.extend(eval_col_read_ts_vec); + evals_ops.extend(eval_val_vec); + evals_ops.resize(evals_ops.len().next_power_of_two(), Scalar::zero()); + evals_ops.append_to_transcript(b"claim_evals_ops", transcript); + let challenges_ops = + transcript.challenge_vector(b"challenge_combine_n_to_one", evals_ops.len().log_2()); + + let mut poly_evals_ops = DensePolynomial::new(evals_ops); + for i in (0..challenges_ops.len()).rev() { + poly_evals_ops.bound_poly_var_bot(&challenges_ops[i]); + } + assert_eq!(poly_evals_ops.len(), 1); + let joint_claim_eval_ops = poly_evals_ops[0]; + let mut r_joint_ops = challenges_ops; + r_joint_ops.extend(rand_ops); + joint_claim_eval_ops.append_to_transcript(b"joint_claim_eval_ops", transcript); + self.proof_ops.verify_plain( + &gens.gens_ops, + transcript, + &r_joint_ops, + &joint_claim_eval_ops, + &comm.comm_comb_ops, + )?; + + // verify proof-mem using comm_comb_mem at rand_mem + // form a single decommitment using comb_comb_mem at rand_mem + let evals_mem: Vec = vec![*eval_row_audit_ts, *eval_col_audit_ts]; + evals_mem.append_to_transcript(b"claim_evals_mem", transcript); + let challenges_mem = + transcript.challenge_vector(b"challenge_combine_two_to_one", evals_mem.len().log_2()); + + let mut poly_evals_mem = DensePolynomial::new(evals_mem); + for i in (0..challenges_mem.len()).rev() { + poly_evals_mem.bound_poly_var_bot(&challenges_mem[i]); + } + assert_eq!(poly_evals_mem.len(), 1); + let joint_claim_eval_mem = poly_evals_mem[0]; + let mut r_joint_mem = challenges_mem; + r_joint_mem.extend(rand_mem); + joint_claim_eval_mem.append_to_transcript(b"joint_claim_eval_mem", transcript); + self.proof_mem.verify_plain( + &gens.gens_mem, + transcript, + &r_joint_mem, + &joint_claim_eval_mem, + &comm.comm_comb_mem, + )?; + + // verify the claims from the product layer + let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_row; + HashLayerProof::verify_helper( + &(rand_mem, rand_ops), + claims_row, + eval_row_ops_val, + eval_ops_addr, + eval_read_ts, + eval_audit_ts, + rx, + r_hash, + r_multiset_check, + )?; + + let (eval_ops_addr, eval_read_ts, eval_audit_ts) = &self.eval_col; + HashLayerProof::verify_helper( + &(rand_mem, rand_ops), + claims_col, + eval_col_ops_val, + eval_ops_addr, + eval_read_ts, + eval_audit_ts, + ry, + r_hash, + r_multiset_check, + )?; + + timer.stop(); + Ok(()) + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct ProductLayerProof { + eval_row: (Scalar, Vec, Vec, Scalar), + eval_col: (Scalar, Vec, Vec, Scalar), + eval_val: (Vec, Vec), + proof_mem: ProductCircuitEvalProofBatched, + proof_ops: ProductCircuitEvalProofBatched, +} + +impl ProductLayerProof { + fn protocol_name() -> &'static [u8] { + b"Sparse polynomial product layer proof" + } + + pub fn prove( + row_prod_layer: &mut ProductLayer, + col_prod_layer: &mut ProductLayer, + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + eval: &[Scalar], + transcript: &mut Transcript, + ) -> (Self, Vec, Vec) { + transcript.append_protocol_name(ProductLayerProof::protocol_name()); + + let row_eval_init = row_prod_layer.init.evaluate(); + let row_eval_audit = row_prod_layer.audit.evaluate(); + let row_eval_read = (0..row_prod_layer.read_vec.len()) + .map(|i| row_prod_layer.read_vec[i].evaluate()) + .collect::>(); + let row_eval_write = (0..row_prod_layer.write_vec.len()) + .map(|i| row_prod_layer.write_vec[i].evaluate()) + .collect::>(); + + // subset check + let ws: Scalar = (0..row_eval_write.len()) + .map(|i| row_eval_write[i]) + .product(); + let rs: Scalar = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product(); + assert_eq!(row_eval_init * ws, rs * row_eval_audit); + + row_eval_init.append_to_transcript(b"claim_row_eval_init", transcript); + row_eval_read.append_to_transcript(b"claim_row_eval_read", transcript); + row_eval_write.append_to_transcript(b"claim_row_eval_write", transcript); + row_eval_audit.append_to_transcript(b"claim_row_eval_audit", transcript); + + let col_eval_init = col_prod_layer.init.evaluate(); + let col_eval_audit = col_prod_layer.audit.evaluate(); + let col_eval_read: Vec = (0..col_prod_layer.read_vec.len()) + .map(|i| col_prod_layer.read_vec[i].evaluate()) + .collect(); + let col_eval_write: Vec = (0..col_prod_layer.write_vec.len()) + .map(|i| col_prod_layer.write_vec[i].evaluate()) + .collect(); + + // subset check + let ws: Scalar = (0..col_eval_write.len()) + .map(|i| col_eval_write[i]) + .product(); + let rs: Scalar = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product(); + assert_eq!(col_eval_init * ws, rs * col_eval_audit); + + col_eval_init.append_to_transcript(b"claim_col_eval_init", transcript); + col_eval_read.append_to_transcript(b"claim_col_eval_read", transcript); + col_eval_write.append_to_transcript(b"claim_col_eval_write", transcript); + col_eval_audit.append_to_transcript(b"claim_col_eval_audit", transcript); + + // prepare dotproduct circuit for batching then with ops-related product circuits + assert_eq!(eval.len(), derefs.row_ops_val.len()); + assert_eq!(eval.len(), derefs.col_ops_val.len()); + assert_eq!(eval.len(), dense.val.len()); + let mut dotp_circuit_left_vec: Vec = Vec::new(); + let mut dotp_circuit_right_vec: Vec = Vec::new(); + let mut eval_dotp_left_vec: Vec = Vec::new(); + let mut eval_dotp_right_vec: Vec = Vec::new(); + for i in 0..derefs.row_ops_val.len() { + // evaluate sparse polynomial evaluation using two dotp checks + let left = derefs.row_ops_val[i].clone(); + let right = derefs.col_ops_val[i].clone(); + let weights = dense.val[i].clone(); + + // build two dot product circuits to prove evaluation of sparse polynomial + let mut dotp_circuit = DotProductCircuit::new(left, right, weights); + let (dotp_circuit_left, dotp_circuit_right) = dotp_circuit.split(); + + let (eval_dotp_left, eval_dotp_right) = + (dotp_circuit_left.evaluate(), dotp_circuit_right.evaluate()); + + eval_dotp_left.append_to_transcript(b"claim_eval_dotp_left", transcript); + eval_dotp_right.append_to_transcript(b"claim_eval_dotp_right", transcript); + assert_eq!(eval_dotp_left + eval_dotp_right, eval[i]); + eval_dotp_left_vec.push(eval_dotp_left); + eval_dotp_right_vec.push(eval_dotp_right); + + dotp_circuit_left_vec.push(dotp_circuit_left); + dotp_circuit_right_vec.push(dotp_circuit_right); + } + + // The number of operations into the memory encoded by rx and ry are always the same (by design) + // So we can produce a batched product proof for all of them at the same time. + // prove the correctness of claim_row_eval_read, claim_row_eval_write, claim_col_eval_read, and claim_col_eval_write + // TODO: we currently only produce proofs for 3 batched sparse polynomial evaluations + assert_eq!(row_prod_layer.read_vec.len(), 3); + let (row_read_A, row_read_B, row_read_C) = { + let (vec_A, vec_BC) = row_prod_layer.read_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (row_write_A, row_write_B, row_write_C) = { + let (vec_A, vec_BC) = row_prod_layer.write_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (col_read_A, col_read_B, col_read_C) = { + let (vec_A, vec_BC) = col_prod_layer.read_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (col_write_A, col_write_B, col_write_C) = { + let (vec_A, vec_BC) = col_prod_layer.write_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (dotp_left_A, dotp_left_B, dotp_left_C) = { + let (vec_A, vec_BC) = dotp_circuit_left_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (dotp_right_A, dotp_right_B, dotp_right_C) = { + let (vec_A, vec_BC) = dotp_circuit_right_vec.split_at_mut(1); + let (vec_B, vec_C) = vec_BC.split_at_mut(1); + (vec_A, vec_B, vec_C) + }; + + let (proof_ops, rand_ops) = ProductCircuitEvalProofBatched::prove( + &mut vec![ + &mut row_read_A[0], + &mut row_read_B[0], + &mut row_read_C[0], + &mut row_write_A[0], + &mut row_write_B[0], + &mut row_write_C[0], + &mut col_read_A[0], + &mut col_read_B[0], + &mut col_read_C[0], + &mut col_write_A[0], + &mut col_write_B[0], + &mut col_write_C[0], + ], + &mut vec![ + &mut dotp_left_A[0], + &mut dotp_right_A[0], + &mut dotp_left_B[0], + &mut dotp_right_B[0], + &mut dotp_left_C[0], + &mut dotp_right_C[0], + ], + transcript, + ); + + // produce a batched proof of memory-related product circuits + let (proof_mem, rand_mem) = ProductCircuitEvalProofBatched::prove( + &mut vec![ + &mut row_prod_layer.init, + &mut row_prod_layer.audit, + &mut col_prod_layer.init, + &mut col_prod_layer.audit, + ], + &mut Vec::new(), + transcript, + ); + + let product_layer_proof = ProductLayerProof { + eval_row: (row_eval_init, row_eval_read, row_eval_write, row_eval_audit), + eval_col: (col_eval_init, col_eval_read, col_eval_write, col_eval_audit), + eval_val: (eval_dotp_left_vec, eval_dotp_right_vec), + proof_mem, + proof_ops, + }; + + let product_layer_proof_encoded: Vec = bincode::serialize(&product_layer_proof).unwrap(); + let msg = format!( + "len_product_layer_proof {:?}", + product_layer_proof_encoded.len() + ); + Timer::print(&msg); + + (product_layer_proof, rand_mem, rand_ops) + } + + pub fn verify( + &self, + num_ops: usize, + num_cells: usize, + eval: &[Scalar], + transcript: &mut Transcript, + ) -> Result< + ( + Vec, + Vec, + Vec, + Vec, + Vec, + ), + ProofVerifyError, + > { + transcript.append_protocol_name(ProductLayerProof::protocol_name()); + + let timer = Timer::new("verify_prod_proof"); + let num_instances = eval.len(); + + // subset check + let (row_eval_init, row_eval_read, row_eval_write, row_eval_audit) = &self.eval_row; + assert_eq!(row_eval_write.len(), num_instances); + assert_eq!(row_eval_read.len(), num_instances); + let ws: Scalar = (0..row_eval_write.len()) + .map(|i| row_eval_write[i]) + .product(); + let rs: Scalar = (0..row_eval_read.len()).map(|i| row_eval_read[i]).product(); + assert_eq!(row_eval_init * ws, rs * row_eval_audit); + + row_eval_init.append_to_transcript(b"claim_row_eval_init", transcript); + row_eval_read.append_to_transcript(b"claim_row_eval_read", transcript); + row_eval_write.append_to_transcript(b"claim_row_eval_write", transcript); + row_eval_audit.append_to_transcript(b"claim_row_eval_audit", transcript); + + // subset check + let (col_eval_init, col_eval_read, col_eval_write, col_eval_audit) = &self.eval_col; + assert_eq!(col_eval_write.len(), num_instances); + assert_eq!(col_eval_read.len(), num_instances); + let ws: Scalar = (0..col_eval_write.len()) + .map(|i| col_eval_write[i]) + .product(); + let rs: Scalar = (0..col_eval_read.len()).map(|i| col_eval_read[i]).product(); + assert_eq!(col_eval_init * ws, rs * col_eval_audit); + + col_eval_init.append_to_transcript(b"claim_col_eval_init", transcript); + col_eval_read.append_to_transcript(b"claim_col_eval_read", transcript); + col_eval_write.append_to_transcript(b"claim_col_eval_write", transcript); + col_eval_audit.append_to_transcript(b"claim_col_eval_audit", transcript); + + // verify the evaluation of the sparse polynomial + let (eval_dotp_left, eval_dotp_right) = &self.eval_val; + assert_eq!(eval_dotp_left.len(), eval_dotp_left.len()); + assert_eq!(eval_dotp_left.len(), num_instances); + let mut claims_dotp_circuit: Vec = Vec::new(); + for i in 0..num_instances { + assert_eq!(eval_dotp_left[i] + eval_dotp_right[i], eval[i]); + eval_dotp_left[i].append_to_transcript(b"claim_eval_dotp_left", transcript); + eval_dotp_right[i].append_to_transcript(b"claim_eval_dotp_right", transcript); + + claims_dotp_circuit.push(eval_dotp_left[i]); + claims_dotp_circuit.push(eval_dotp_right[i]); + } + + // verify the correctness of claim_row_eval_read, claim_row_eval_write, claim_col_eval_read, and claim_col_eval_write + let mut claims_prod_circuit: Vec = Vec::new(); + claims_prod_circuit.extend(row_eval_read); + claims_prod_circuit.extend(row_eval_write); + claims_prod_circuit.extend(col_eval_read); + claims_prod_circuit.extend(col_eval_write); + + let (claims_ops, claims_dotp, rand_ops) = self.proof_ops.verify( + &claims_prod_circuit, + &claims_dotp_circuit, + num_ops, + transcript, + ); + // verify the correctness of claim_row_eval_init and claim_row_eval_audit + let (claims_mem, _claims_mem_dotp, rand_mem) = self.proof_mem.verify( + &[ + *row_eval_init, + *row_eval_audit, + *col_eval_init, + *col_eval_audit, + ], + &Vec::new(), + num_cells, + transcript, + ); + timer.stop(); + + Ok((claims_mem, rand_mem, claims_ops, claims_dotp, rand_ops)) + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct PolyEvalNetworkProof { + proof_prod_layer: ProductLayerProof, + proof_hash_layer: HashLayerProof, +} + +impl PolyEvalNetworkProof { + fn protocol_name() -> &'static [u8] { + b"Sparse polynomial evaluation proof" + } + + pub fn prove( + network: &mut PolyEvalNetwork, + dense: &MultiSparseMatPolynomialAsDense, + derefs: &Derefs, + evals: &[Scalar], + gens: &SparseMatPolyCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> Self { + transcript.append_protocol_name(PolyEvalNetworkProof::protocol_name()); + + let (proof_prod_layer, rand_mem, rand_ops) = ProductLayerProof::prove( + &mut network.row_layers.prod_layer, + &mut network.col_layers.prod_layer, + dense, + derefs, + evals, + transcript, + ); + + // proof of hash layer for row and col + let proof_hash_layer = HashLayerProof::prove( + (&rand_mem, &rand_ops), + dense, + derefs, + gens, + transcript, + random_tape, + ); + + PolyEvalNetworkProof { + proof_prod_layer, + proof_hash_layer, + } + } + + pub fn verify( + &self, + comm: &SparseMatPolyCommitment, + comm_derefs: &DerefsCommitment, + evals: &[Scalar], + gens: &SparseMatPolyCommitmentGens, + rx: &[Scalar], + ry: &[Scalar], + r_mem_check: &(Scalar, Scalar), + nz: usize, + transcript: &mut Transcript, + ) -> Result<(), ProofVerifyError> { + let timer = Timer::new("verify_polyeval_proof"); + transcript.append_protocol_name(PolyEvalNetworkProof::protocol_name()); + + let num_instances = evals.len(); + let (r_hash, r_multiset_check) = r_mem_check; + + let num_ops = nz.next_power_of_two(); + let num_cells = rx.len().pow2(); + assert_eq!(rx.len(), ry.len()); + + let (claims_mem, rand_mem, mut claims_ops, claims_dotp, rand_ops) = self + .proof_prod_layer + .verify(num_ops, num_cells, evals, transcript)?; + assert_eq!(claims_mem.len(), 4); + assert_eq!(claims_ops.len(), 4 * num_instances); + assert_eq!(claims_dotp.len(), 3 * num_instances); + + let (claims_ops_row, claims_ops_col) = claims_ops.split_at_mut(2 * num_instances); + let (claims_ops_row_read, claims_ops_row_write) = claims_ops_row.split_at_mut(num_instances); + let (claims_ops_col_read, claims_ops_col_write) = claims_ops_col.split_at_mut(num_instances); + + // verify the proof of hash layer + self.proof_hash_layer.verify( + (&rand_mem, &rand_ops), + &( + claims_mem[0], + claims_ops_row_read.to_vec(), + claims_ops_row_write.to_vec(), + claims_mem[1], + ), + &( + claims_mem[2], + claims_ops_col_read.to_vec(), + claims_ops_col_write.to_vec(), + claims_mem[3], + ), + &claims_dotp, + comm, + gens, + comm_derefs, + rx, + ry, + r_hash, + r_multiset_check, + transcript, + )?; + timer.stop(); + + Ok(()) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct SparseMatPolyEvalProof { + comm_derefs: DerefsCommitment, + poly_eval_network_proof: PolyEvalNetworkProof, +} + +impl SparseMatPolyEvalProof { + fn protocol_name() -> &'static [u8] { + b"Sparse polynomial evaluation proof" + } + + fn equalize(rx: &[Scalar], ry: &[Scalar]) -> (Vec, Vec) { + match rx.len().cmp(&ry.len()) { + Ordering::Less => { + let diff = ry.len() - rx.len(); + let mut rx_ext = vec![Scalar::zero(); diff]; + rx_ext.extend(rx); + (rx_ext, ry.to_vec()) + } + Ordering::Greater => { + let diff = rx.len() - ry.len(); + let mut ry_ext = vec![Scalar::zero(); diff]; + ry_ext.extend(ry); + (rx.to_vec(), ry_ext) + } + Ordering::Equal => (rx.to_vec(), ry.to_vec()), + } + } + + pub fn prove( + dense: &MultiSparseMatPolynomialAsDense, + rx: &[Scalar], // point at which the polynomial is evaluated + ry: &[Scalar], + evals: &[Scalar], // a vector evaluation of \widetilde{M}(r = (rx,ry)) for each M + gens: &SparseMatPolyCommitmentGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> SparseMatPolyEvalProof { + transcript.append_protocol_name(SparseMatPolyEvalProof::protocol_name()); + + // ensure there is one eval for each polynomial in dense + assert_eq!(evals.len(), dense.batch_size); + + let (mem_rx, mem_ry) = { + // equalize the lengths of rx and ry + let (rx_ext, ry_ext) = SparseMatPolyEvalProof::equalize(rx, ry); + let poly_rx = EqPolynomial::new(rx_ext).evals(); + let poly_ry = EqPolynomial::new(ry_ext).evals(); + (poly_rx, poly_ry) + }; + + let derefs = dense.deref(&mem_rx, &mem_ry); + + // commit to non-deterministic choices of the prover + let timer_commit = Timer::new("commit_nondet_witness"); + let comm_derefs = { + let comm = derefs.commit(&gens.gens_derefs); + comm.append_to_transcript(b"comm_poly_row_col_ops_val", transcript); + comm + }; + timer_commit.stop(); + + let poly_eval_network_proof = { + // produce a random element from the transcript for hash function + let r_mem_check = transcript.challenge_vector(b"challenge_r_hash", 2); + + // build a network to evaluate the sparse polynomial + let timer_build_network = Timer::new("build_layered_network"); + let mut net = PolyEvalNetwork::new( + dense, + &derefs, + &mem_rx, + &mem_ry, + &(r_mem_check[0], r_mem_check[1]), + ); + timer_build_network.stop(); + + let timer_eval_network = Timer::new("evalproof_layered_network"); + let poly_eval_network_proof = PolyEvalNetworkProof::prove( + &mut net, + dense, + &derefs, + evals, + gens, + transcript, + random_tape, + ); + timer_eval_network.stop(); + + poly_eval_network_proof + }; + + SparseMatPolyEvalProof { + comm_derefs, + poly_eval_network_proof, + } + } + + pub fn verify( + &self, + comm: &SparseMatPolyCommitment, + rx: &[Scalar], // point at which the polynomial is evaluated + ry: &[Scalar], + evals: &[Scalar], // evaluation of \widetilde{M}(r = (rx,ry)) + gens: &SparseMatPolyCommitmentGens, + transcript: &mut Transcript, + ) -> Result<(), ProofVerifyError> { + transcript.append_protocol_name(SparseMatPolyEvalProof::protocol_name()); + + // equalize the lengths of rx and ry + let (rx_ext, ry_ext) = SparseMatPolyEvalProof::equalize(rx, ry); + + let (nz, num_mem_cells) = (comm.num_ops, comm.num_mem_cells); + assert_eq!(rx_ext.len().pow2(), num_mem_cells); + + // add claims to transcript and obtain challenges for randomized mem-check circuit + self + .comm_derefs + .append_to_transcript(b"comm_poly_row_col_ops_val", transcript); + + // produce a random element from the transcript for hash function + let r_mem_check = transcript.challenge_vector(b"challenge_r_hash", 2); + + self.poly_eval_network_proof.verify( + comm, + &self.comm_derefs, + evals, + gens, + &rx_ext, + &ry_ext, + &(r_mem_check[0], r_mem_check[1]), + nz, + transcript, + ) + } +} + +pub struct SparsePolyEntry { + idx: usize, + val: Scalar, +} + +impl SparsePolyEntry { + pub fn new(idx: usize, val: Scalar) -> Self { + SparsePolyEntry { idx, val } + } +} + +pub struct SparsePolynomial { + num_vars: usize, + Z: Vec, +} + +impl SparsePolynomial { + pub fn new(num_vars: usize, Z: Vec) -> Self { + SparsePolynomial { num_vars, Z } + } + + fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar { + assert_eq!(a.len(), r.len()); + let mut chi_i = Scalar::one(); + for j in 0..r.len() { + if a[j] { + chi_i *= r[j]; + } else { + chi_i *= Scalar::one() - r[j]; + } + } + chi_i + } + + // Takes O(n log n). TODO: do this in O(n) where n is the number of entries in Z + pub fn evaluate(&self, r: &[Scalar]) -> Scalar { + assert_eq!(self.num_vars, r.len()); + + (0..self.Z.len()) + .map(|i| { + let bits = self.Z[i].idx.get_bits(r.len()); + SparsePolynomial::compute_chi(&bits, r) * self.Z[i].val + }) + .sum() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::rngs::OsRng; + use rand::RngCore; + #[test] + fn check_sparse_polyeval_proof() { + let mut csprng: OsRng = OsRng; + + let num_nz_entries: usize = 256; + let num_rows: usize = 256; + let num_cols: usize = 256; + let num_vars_x: usize = num_rows.log_2(); + let num_vars_y: usize = num_cols.log_2(); + + let mut M: Vec = Vec::new(); + + for _i in 0..num_nz_entries { + M.push(SparseMatEntry::new( + (csprng.next_u64() % (num_rows as u64)) as usize, + (csprng.next_u64() % (num_cols as u64)) as usize, + Scalar::random(&mut csprng), + )); + } + + let poly_M = SparseMatPolynomial::new(num_vars_x, num_vars_y, M); + let gens = SparseMatPolyCommitmentGens::new( + b"gens_sparse_poly", + num_vars_x, + num_vars_y, + num_nz_entries, + 3, + ); + + // commitment + let (poly_comm, dense) = SparseMatPolynomial::multi_commit(&[&poly_M, &poly_M, &poly_M], &gens); + + // evaluation + let rx: Vec = (0..num_vars_x) + .map(|_i| Scalar::random(&mut csprng)) + .collect::>(); + let ry: Vec = (0..num_vars_y) + .map(|_i| Scalar::random(&mut csprng)) + .collect::>(); + let eval = SparseMatPolynomial::multi_evaluate(&[&poly_M], &rx, &ry); + let evals = vec![eval[0], eval[0], eval[0]]; + + let mut random_tape = RandomTape::new(b"proof"); + let mut prover_transcript = Transcript::new(b"example"); + let proof = SparseMatPolyEvalProof::prove( + &dense, + &rx, + &ry, + &evals, + &gens, + &mut prover_transcript, + &mut random_tape, + ); + + let mut verifier_transcript = Transcript::new(b"example"); + assert!(proof + .verify( + &poly_comm, + &rx, + &ry, + &evals, + &gens, + &mut verifier_transcript, + ) + .is_ok()); + } +} diff --git a/third_party/Dorian/src/sumcheck.rs b/third_party/Dorian/src/sumcheck.rs new file mode 100644 index 000000000..8aa093177 --- /dev/null +++ b/third_party/Dorian/src/sumcheck.rs @@ -0,0 +1,1223 @@ +#![allow(clippy::too_many_arguments)] +#![allow(clippy::type_complexity)] +use super::commitments::{Commitments, MultiCommitGens}; +use super::dense_mlpoly::DensePolynomial; +use super::errors::ProofVerifyError; +use super::group::{CompressedGroup, GroupElement, VartimeMultiscalarMul}; +use super::nizk::DotProductProof; +use super::random::RandomTape; +use super::scalar::Scalar; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use super::unipoly::{CompressedUniPoly, UniPoly}; +use core::iter; +use itertools::izip; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "multicore")] +use rayon::prelude::*; + +use crate::Timer; +#[derive(Serialize, Deserialize, Debug)] +pub struct SumcheckInstanceProof { + compressed_polys: Vec, +} + +impl SumcheckInstanceProof { + pub fn new(compressed_polys: Vec) -> SumcheckInstanceProof { + SumcheckInstanceProof { compressed_polys } + } + + pub fn verify( + &self, + claim: Scalar, + num_rounds: usize, + degree_bound: usize, + transcript: &mut Transcript, + ) -> Result<(Scalar, Vec), ProofVerifyError> { + let mut e = claim; + let mut r: Vec = Vec::new(); + + // verify that there is a univariate polynomial for each round + assert_eq!(self.compressed_polys.len(), num_rounds); + for i in 0..self.compressed_polys.len() { + let poly = self.compressed_polys[i].decompress(&e); + + // verify degree bound + assert_eq!(poly.degree(), degree_bound); + + // check if G_k(0) + G_k(1) = e + assert_eq!(poly.eval_at_zero() + poly.eval_at_one(), e); + + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + + //derive the verifier's challenge for the next round + let r_i = transcript.challenge_scalar(b"challenge_nextround"); + + r.push(r_i); + + // evaluate the claimed degree-ell polynomial at r_i + e = poly.evaluate(&r_i); + } + + Ok((e, r)) + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct ZKSumcheckInstanceProof { + comm_polys: Vec, + comm_evals: Vec, + proofs: Vec, +} + +impl ZKSumcheckInstanceProof { + pub fn new( + comm_polys: Vec, + comm_evals: Vec, + proofs: Vec, + ) -> Self { + ZKSumcheckInstanceProof { + comm_polys, + comm_evals, + proofs, + } + } + + pub fn verify( + &self, + comm_claim: &CompressedGroup, + num_rounds: usize, + degree_bound: usize, + gens_1: &MultiCommitGens, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + ) -> Result<(CompressedGroup, Vec), ProofVerifyError> { + // verify degree bound + assert_eq!(gens_n.n, degree_bound + 1); + + // verify that there is a univariate polynomial for each round + assert_eq!(self.comm_polys.len(), num_rounds); + assert_eq!(self.comm_evals.len(), num_rounds); + + let mut r: Vec = Vec::new(); + for i in 0..self.comm_polys.len() { + let comm_poly = &self.comm_polys[i]; + + // append the prover's polynomial to the transcript + comm_poly.append_to_transcript(b"comm_poly", transcript); + + //derive the verifier's challenge for the next round + let r_i = transcript.challenge_scalar(b"challenge_nextround"); + + // verify the proof of sum-check and evals + let res = { + let comm_claim_per_round = if i == 0 { + comm_claim + } else { + &self.comm_evals[i - 1] + }; + let comm_eval = &self.comm_evals[i]; + + // add two claims to transcript + comm_claim_per_round.append_to_transcript(b"comm_claim_per_round", transcript); + comm_eval.append_to_transcript(b"comm_eval", transcript); + + // produce two weights + let w = transcript.challenge_vector(b"combine_two_claims_to_one", 2); + + // compute a weighted sum of the RHS + let comm_target = GroupElement::vartime_multiscalar_mul( + w.iter(), + iter::once(&comm_claim_per_round) + .chain(iter::once(&comm_eval)) + .map(|pt| pt.decompress().unwrap()) + .collect::>(), + ) + .compress(); + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![Scalar::one(); degree_bound + 1]; + a[0] += Scalar::one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![Scalar::one(); degree_bound + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_i; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + self.proofs[i] + .verify( + gens_1, + gens_n, + transcript, + &a, + &self.comm_polys[i], + &comm_target, + ) + .is_ok() + }; + if !res { + return Err(ProofVerifyError::InternalError); + } + + r.push(r_i); + } + + Ok((self.comm_evals[self.comm_evals.len() - 1], r)) + } +} + +impl SumcheckInstanceProof { + pub fn prove_cubic( + claim: &Scalar, + num_rounds: usize, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + comb_func: F, + transcript: &mut Transcript, + ) -> (Self, Vec, Vec) + where + F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar, + { + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec = Vec::new(); + for _j in 0..num_rounds { + let mut eval_point_0 = Scalar::zero(); + let mut eval_point_2 = Scalar::zero(); + let mut eval_point_3 = Scalar::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + + let evals = vec![eval_point_0, e - eval_point_0, eval_point_2, eval_point_3]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b"challenge_nextround"); + r.push(r_j); + // bound all tables to the verifier's challenege + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + poly_C.bound_poly_var_top(&r_j); + e = poly.evaluate(&r_j); + cubic_polys.push(poly.compress()); + } + + ( + SumcheckInstanceProof::new(cubic_polys), + r, + vec![poly_A[0], poly_B[0], poly_C[0]], + ) + } + + pub fn prove_cubic_batched( + claim: &Scalar, + num_rounds: usize, + poly_vec_par: ( + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut DensePolynomial, + ), + poly_vec_seq: ( + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + ), + coeffs: &[Scalar], + comb_func: F, + transcript: &mut Transcript, + ) -> ( + Self, + Vec, + (Vec, Vec, Scalar), + (Vec, Vec, Vec), + ) + where + F: Fn(&Scalar, &Scalar, &Scalar) -> Scalar, + { + let (poly_A_vec_par, poly_B_vec_par, poly_C_par) = poly_vec_par; + let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; + + //let (poly_A_vec_seq, poly_B_vec_seq, poly_C_vec_seq) = poly_vec_seq; + let mut e = *claim; + let mut r: Vec = Vec::new(); + let mut cubic_polys: Vec = Vec::new(); + + for _j in 0..num_rounds { + let mut evals: Vec<(Scalar, Scalar, Scalar)> = Vec::new(); + + for (poly_A, poly_B) in poly_A_vec_par.iter().zip(poly_B_vec_par.iter()) { + let mut eval_point_0 = Scalar::zero(); + let mut eval_point_2 = Scalar::zero(); + let mut eval_point_3 = Scalar::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C_par[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_par[len + i] + poly_C_par[len + i] - poly_C_par[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C_par[len + i] - poly_C_par[i]; + + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + + evals.push((eval_point_0, eval_point_2, eval_point_3)); + } + + for (poly_A, poly_B, poly_C) in izip!( + poly_A_vec_seq.iter(), + poly_B_vec_seq.iter(), + poly_C_vec_seq.iter() + ) { + let mut eval_point_0 = Scalar::zero(); + let mut eval_point_2 = Scalar::zero(); + let mut eval_point_3 = Scalar::zero(); + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i]); + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + ); + } + evals.push((eval_point_0, eval_point_2, eval_point_3)); + } + + let evals_combined_0 = (0..evals.len()).map(|i| evals[i].0 * coeffs[i]).sum(); + let evals_combined_2 = (0..evals.len()).map(|i| evals[i].1 * coeffs[i]).sum(); + let evals_combined_3 = (0..evals.len()).map(|i| evals[i].2 * coeffs[i]).sum(); + + let evals = vec![ + evals_combined_0, + e - evals_combined_0, + evals_combined_2, + evals_combined_3, + ]; + let poly = UniPoly::from_evals(&evals); + + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b"challenge_nextround"); + r.push(r_j); + + // bound all tables to the verifier's challenege + for (poly_A, poly_B) in poly_A_vec_par.iter_mut().zip(poly_B_vec_par.iter_mut()) { + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + } + poly_C_par.bound_poly_var_top(&r_j); + + for (poly_A, poly_B, poly_C) in izip!( + poly_A_vec_seq.iter_mut(), + poly_B_vec_seq.iter_mut(), + poly_C_vec_seq.iter_mut() + ) { + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + poly_C.bound_poly_var_top(&r_j); + } + + e = poly.evaluate(&r_j); + cubic_polys.push(poly.compress()); + } + + let poly_A_par_final = (0..poly_A_vec_par.len()) + .map(|i| poly_A_vec_par[i][0]) + .collect(); + let poly_B_par_final = (0..poly_B_vec_par.len()) + .map(|i| poly_B_vec_par[i][0]) + .collect(); + let claims_prod = (poly_A_par_final, poly_B_par_final, poly_C_par[0]); + + let poly_A_seq_final = (0..poly_A_vec_seq.len()) + .map(|i| poly_A_vec_seq[i][0]) + .collect(); + let poly_B_seq_final = (0..poly_B_vec_seq.len()) + .map(|i| poly_B_vec_seq[i][0]) + .collect(); + let poly_C_seq_final = (0..poly_C_vec_seq.len()) + .map(|i| poly_C_vec_seq[i][0]) + .collect(); + let claims_dotp = (poly_A_seq_final, poly_B_seq_final, poly_C_seq_final); + + ( + SumcheckInstanceProof::new(cubic_polys), + r, + claims_prod, + claims_dotp, + ) + } +} + +impl ZKSumcheckInstanceProof { + pub fn prove_quad( + claim: &Scalar, + blind_claim: &Scalar, + num_rounds: usize, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + comb_func: F, + gens_1: &MultiCommitGens, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (Self, Vec, Vec, Scalar) + where + F: Fn(&Scalar, &Scalar) -> Scalar, + { + let (blinds_poly, blinds_evals) = ( + random_tape.random_vector(b"blinds_poly", num_rounds), + random_tape.random_vector(b"blinds_evals", num_rounds), + ); + let mut claim_per_round = *claim; + let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); + + let mut r: Vec = Vec::new(); + let mut comm_polys: Vec = Vec::new(); + let mut comm_evals: Vec = Vec::new(); + let mut proofs: Vec = Vec::new(); + + for j in 0..num_rounds { + let (poly, comm_poly) = { + let mut eval_point_0 = Scalar::zero(); + let mut eval_point_2 = Scalar::zero(); + + let len = poly_A.len() / 2; + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point); + } + + let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; + let poly = UniPoly::from_evals(&evals); + let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); + (poly, comm_poly) + }; + + // append the prover's message to the transcript + comm_poly.append_to_transcript(b"comm_poly", transcript); + comm_polys.push(comm_poly); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b"challenge_nextround"); + + // bound all tables to the verifier's challenege + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + + // produce a proof of sum-check and of evaluation + let (proof, claim_next_round, comm_claim_next_round) = { + let eval = poly.evaluate(&r_j); + let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); + + // we need to prove the following under homomorphic commitments: + // (1) poly(0) + poly(1) = claim_per_round + // (2) poly(r_j) = eval + + // Our technique is to leverage dot product proofs: + // (1) we can prove: = claim_per_round + // (2) we can prove: >(), + ) + .compress(); + + let blind = { + let blind_sc = if j == 0 { + blind_claim + } else { + &blinds_evals[j - 1] + }; + + let blind_eval = &blinds_evals[j]; + + w[0] * blind_sc + w[1] * blind_eval + }; + assert_eq!(target.commit(&blind, gens_1).compress(), comm_target); + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![Scalar::one(); poly.degree() + 1]; + a[0] += Scalar::one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![Scalar::one(); poly.degree() + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_j; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( + gens_1, + gens_n, + transcript, + random_tape, + &poly.as_vec(), + &blinds_poly[j], + &a, + &target, + &blind, + ); + + (proof, eval, comm_eval) + }; + + claim_per_round = claim_next_round; + comm_claim_per_round = comm_claim_next_round; + + proofs.push(proof); + r.push(r_j); + comm_evals.push(comm_claim_per_round); + } + + ( + ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), + r, + vec![poly_A[0], poly_B[0]], + blinds_evals[num_rounds - 1], + ) + } + + fn bound_four_polynomial ( + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + poly_D: &mut DensePolynomial, + r_j: &Scalar, + ) { + poly_A.bound_poly_var_top(r_j); + poly_B.bound_poly_var_top(r_j); + poly_C.bound_poly_var_top(r_j); + poly_D.bound_poly_var_top(r_j); + } + + #[cfg(feature = "multicore")] + fn bound_four_polynomial_parallel( + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + poly_D: &mut DensePolynomial, + r_j: &Scalar, + ) { + rayon::join( + || poly_A.bound_poly_var_top(r_j), + || rayon::join( + || poly_B.bound_poly_var_top(r_j), + || rayon::join( + || poly_C.bound_poly_var_top(r_j), + || poly_D.bound_poly_var_top(r_j), + ) + ) + ); + } + + pub fn prove_cubic_with_additive_term( + claim: &Scalar, + blind_claim: &Scalar, + num_rounds: usize, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + poly_D: &mut DensePolynomial, + comb_func: F, + gens_1: &MultiCommitGens, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (Self, Vec, Vec, Scalar) + where + F: Fn(&Scalar, &Scalar, &Scalar, &Scalar) -> Scalar, + { + let (blinds_poly, blinds_evals) = ( + random_tape.random_vector(b"blinds_poly", num_rounds), + random_tape.random_vector(b"blinds_evals", num_rounds), + ); + + let mut claim_per_round = *claim; + let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); + + let mut r: Vec = Vec::new(); + let mut comm_polys: Vec = Vec::new(); + let mut comm_evals: Vec = Vec::new(); + let mut proofs: Vec = Vec::new(); + + + for j in 0..num_rounds { + + let (poly, comm_poly) = { + let mut eval_point_0 = Scalar::zero(); + let mut eval_point_2 = Scalar::zero(); + let mut eval_point_3 = Scalar::zero(); + + let len = poly_A.len() / 2; + // let timer = Timer::new("cubic_with_additive_term_inner"); + + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + } + // timer.stop(); + + let evals = vec![ + eval_point_0, + claim_per_round - eval_point_0, + eval_point_2, + eval_point_3, + ]; + let poly = UniPoly::from_evals(&evals); + let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); + (poly, comm_poly) + }; + + // append the prover's message to the transcript + comm_poly.append_to_transcript(b"comm_poly", transcript); + comm_polys.push(comm_poly); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b"challenge_nextround"); + + // #[cfg(not(feature = "multicore"))] + // Self::bound_four_polynomial(poly_A, poly_B, poly_C, poly_D, &r_j); + // #[cfg(feature = "multicore")] + // Self::bound_four_polynomial_parallel(poly_A, poly_B, poly_C, poly_D, &r_j); + + // bound all tables to the verifier's challenege + poly_A.bound_poly_var_top(&r_j); + poly_B.bound_poly_var_top(&r_j); + poly_C.bound_poly_var_top(&r_j); + poly_D.bound_poly_var_top(&r_j); + + // produce a proof of sum-check and of evaluation + let (proof, claim_next_round, comm_claim_next_round) = { + let eval = poly.evaluate(&r_j); + let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); + + // we need to prove the following under homomorphic commitments: + // (1) poly(0) + poly(1) = claim_per_round + // (2) poly(r_j) = eval + + // Our technique is to leverage dot product proofs: + // (1) we can prove: = claim_per_round + // (2) we can prove: >(), + ) + .compress(); + + let blind = { + let blind_sc = if j == 0 { + blind_claim + } else { + &blinds_evals[j - 1] + }; + + let blind_eval = &blinds_evals[j]; + + w[0] * blind_sc + w[1] * blind_eval + }; + + assert_eq!(target.commit(&blind, gens_1).compress(), comm_target); + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![Scalar::one(); poly.degree() + 1]; + a[0] += Scalar::one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![Scalar::one(); poly.degree() + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_j; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( + gens_1, + gens_n, + transcript, + random_tape, + &poly.as_vec(), + &blinds_poly[j], + &a, + &target, + &blind, + ); + + (proof, eval, comm_eval) + }; + + proofs.push(proof); + claim_per_round = claim_next_round; + comm_claim_per_round = comm_claim_next_round; + r.push(r_j); + comm_evals.push(comm_claim_per_round); + + } + + ( + ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), + r, + vec![poly_A[0], poly_B[0], poly_C[0], poly_D[0]], + blinds_evals[num_rounds - 1], + ) + } + + fn bound_five_polynomial( + poly_A: &mut DensePolynomial, + poly_B0: &mut DensePolynomial, + poly_B1: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + poly_D: &mut DensePolynomial, + r_j: &Scalar, + ) { + poly_A.bound_poly_var_top(r_j); + poly_B0.bound_poly_var_top(r_j); + poly_B1.bound_poly_var_top(r_j); + // for poly_B in polys_B.iter_mut() { + // poly_B.bound_poly_var_top(r_j); + // } + poly_C.bound_poly_var_top(r_j); + poly_D.bound_poly_var_top(r_j); + } + + #[cfg(feature = "multicore")] + fn bound_five_polynomial_parallel( + poly_A: &mut DensePolynomial, + poly_B0: &mut DensePolynomial, + poly_B1: &mut DensePolynomial, + poly_C: &mut DensePolynomial, + poly_D: &mut DensePolynomial, + r_j: &Scalar, + ) { + rayon::join( + || poly_A.bound_poly_var_top(r_j), + || rayon::join( + || poly_B0.bound_poly_var_top(r_j), + || rayon::join( + || poly_B1.bound_poly_var_top(r_j), + || rayon::join( + || poly_C.bound_poly_var_top(r_j), + || poly_D.bound_poly_var_top(r_j) + ) + ) + ) + ); + } + + fn prove_cubic_with_four_terms_inner( + poly_A: &DensePolynomial, + // polys_B: &[DensePolynomial; 2], + poly_B0: &DensePolynomial, + poly_B1: &DensePolynomial, + poly_C: &DensePolynomial, + poly_D: &DensePolynomial, + comb_func: F, + len: usize, + ) -> (Scalar, Scalar, Scalar) + where + F: Fn(&Scalar, &Scalar, &Scalar, &Scalar, &Scalar) -> Scalar + Sync, + { + let mut eval_point_0 = Scalar::zero(); + let mut eval_point_2 = Scalar::zero(); + let mut eval_point_3 = Scalar::zero(); + for i in 0..len { + // eval 0: bound_func is A(low) + eval_point_0 += comb_func(&poly_A[i], + &poly_B0[i], + &poly_B1[i], + &poly_C[i], + &poly_D[i], + ); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + // let poly_B_bound_points = { + // let mut poly_B_bound_points = [Scalar::zero(); 2]; + // for k in 0..2 { + // poly_B_bound_points[k] = polys_B[k][len + i] + polys_B[k][len + i] - polys_B[k][i]; + // } + // poly_B_bound_points + // }; + let poly_B_bound_points = [ + poly_B0[len + i] + poly_B0[len + i] - poly_B0[i], + poly_B1[len + i] + poly_B1[len + i] - poly_B1[i], + ]; + + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + + let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; + eval_point_2 += comb_func( + &poly_A_bound_point, + &poly_B_bound_points[0], + &poly_B_bound_points[1], + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + // let poly_B_bound_points = { + // let mut cur_points = [Scalar::zero(); 2]; + // for k in 0..2 { + // cur_points[k] = poly_B_bound_points[k] + polys_B[k][len + i] - polys_B[k][i]; + // } + // cur_points + // }; + let poly_B_bound_points = [ + poly_B_bound_points[0] + poly_B0[len + i] - poly_B0[i], + poly_B_bound_points[1] + poly_B1[len + i] - poly_B1[i], + ]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + + + // println!("sum: {:?}", poly_C_bound_points[0]+poly_C_bound_points[1]); + let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; + eval_point_3 += comb_func( + &poly_A_bound_point, + &poly_B_bound_points[0], + &poly_B_bound_points[1], + &poly_C_bound_point, + &poly_D_bound_point, + ); + } + (eval_point_0, eval_point_2, eval_point_3) + } + + #[cfg(feature = "multicore")] + fn prove_cubic_with_four_terms_inner_parallel( + poly_A: &DensePolynomial, + // polys_B: &[DensePolynomial; 2], + // polys_B: (&DensePolynomial, &DensePolynomial), + poly_B0: &DensePolynomial, + poly_B1: &DensePolynomial, + poly_C: &DensePolynomial, + poly_D: &DensePolynomial, + comb_func: F, + len: usize, + ) -> (Scalar, Scalar, Scalar) + where + F: Fn(&Scalar, &Scalar, &Scalar, &Scalar, &Scalar) -> Scalar + Sync, + { + (0..len).into_par_iter().map(|i| { + let eval_0 = comb_func( + &poly_A[i], + &poly_B0[i], + &poly_B1[i], + &poly_C[i], + &poly_D[i], + ); + + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_points = [ + poly_B0[len + i] + poly_B0[len + i] - poly_B0[i], + poly_B1[len + i] + poly_B1[len + i] - poly_B1[i], + ]; + // let poly_B_bound_points: [Scalar; 2] = { + // let mut poly_B_bound_points = [Scalar::zero(); 2]; + // for k in 0..2 { + // poly_B_bound_points[k] = polys_B[k][len + i] + polys_B[k][len + i] - polys_B[k][i]; + // } + // poly_B_bound_points + // }; + + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; + let eval_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_points[0], + &poly_B_bound_points[1], + &poly_C_bound_point, + &poly_D_bound_point, + ); + + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + // let poly_B_bound_points: [Scalar; 2] = { + // let mut cur_points = [Scalar::zero(); 2]; + // for k in 0..2 { + // cur_points[k] = poly_B_bound_points[k] + polys_B[k][len + i] - polys_B[k][i]; + // } + // cur_points + // }; + let poly_B_bound_points = [ + poly_B_bound_points[0] + poly_B0[len + i] - poly_B0[i], + poly_B_bound_points[1] + poly_B1[len + i] - poly_B1[i], + ]; + + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; + let eval_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_points[0], + &poly_B_bound_points[1], + &poly_C_bound_point, + &poly_D_bound_point, + ); + + (eval_0, eval_2, eval_3) + }).reduce( + || (Scalar::zero(), Scalar::zero(), Scalar::zero()), + |(acc0, acc2, acc3), (eval_0, eval_2, eval_3)| (acc0 + eval_0, acc2 + eval_2, acc3 + eval_3), + ) + } + // polys_B[0]*polys_C[0] + polys_B[1]*polys_C[1] + poly_A + pub fn prove_cubic_with_four_terms( + claim: &Scalar, + blind_claim: &Scalar, + num_rounds: usize, + poly_A: &mut DensePolynomial, + // polys_B: &mut [DensePolynomial; 2], + polys_B: (&mut DensePolynomial, &mut DensePolynomial), + poly_C: &mut DensePolynomial, + poly_D: &mut DensePolynomial, + comb_func: F, + gens_1: &MultiCommitGens, + gens_n: &MultiCommitGens, + transcript: &mut Transcript, + random_tape: &mut RandomTape, + ) -> (Self, Vec, Vec, Scalar) + where + F: Fn(&Scalar, &Scalar, &Scalar, &Scalar, &Scalar) -> Scalar + Sync, + { + let (blinds_poly, blinds_evals) = ( + random_tape.random_vector(b"blinds_poly", num_rounds), + random_tape.random_vector(b"blinds_evals", num_rounds), + ); + + let mut claim_per_round = *claim; + let mut comm_claim_per_round = claim_per_round.commit(blind_claim, gens_1).compress(); + + let mut r: Vec = Vec::new(); + // let mut comm_polys: Vec = Vec::new(); + // let mut comm_evals: Vec = Vec::new(); + // let mut proofs: Vec = Vec::new(); + + let mut comm_polys: Vec = Vec::with_capacity(num_rounds); + let mut comm_evals: Vec = Vec::with_capacity(num_rounds); + let mut proofs: Vec = Vec::with_capacity(num_rounds); + + // let timer_sc_inner_outside = Timer::new("prove_sc_phase_two_inner_outside_forloop"); + + for j in 0..num_rounds { + let (poly, comm_poly) = { + + let len = poly_A.len() / 2; + #[cfg(not(feature = "multicore"))] + let (eval_point_0, eval_point_2, eval_point_3) = + Self::prove_cubic_with_four_terms_inner(poly_A, polys_B.0, polys_B.1, poly_C, poly_D, &comb_func, len); + + #[cfg(feature = "multicore")] + let (eval_point_0, eval_point_2, eval_point_3) = + if j > 2 { + Self::prove_cubic_with_four_terms_inner(poly_A, polys_B.0, polys_B.1, poly_C, poly_D, &comb_func, len) + } + else { + Self::prove_cubic_with_four_terms_inner_parallel(poly_A, polys_B.0, polys_B.1, poly_C, poly_D, &comb_func, len) + }; + + let evals = vec![ + eval_point_0, + claim_per_round - eval_point_0, + eval_point_2, + eval_point_3, + ]; + let poly = UniPoly::from_evals(&evals); + let comm_poly = poly.commit(gens_n, &blinds_poly[j]).compress(); + + (poly, comm_poly) + }; + + // append the prover's message to the transcript + comm_poly.append_to_transcript(b"comm_poly", transcript); + comm_polys.push(comm_poly); + + //derive the verifier's challenge for the next round + let r_j = transcript.challenge_scalar(b"challenge_nextround"); + + // let timer = Timer::new("cubic_with_cubic_term_inner"); + + // bound all tables to the verifier's challenege + #[cfg(not(feature = "multicore"))] + Self::bound_five_polynomial(poly_A, polys_B.0, polys_B.1, poly_C, poly_D, &r_j); + #[cfg(feature = "multicore")] + { + // if j > 2 { + // Self::bound_five_polynomial(poly_A, polys_B.0, polys_B.1, poly_C, poly_D, &r_j); + // } else { + Self::bound_five_polynomial_parallel(poly_A, polys_B.0, polys_B.1, poly_C, poly_D, &r_j); + // } + } + // let timer_sc_inner2 = Timer::new("prove_sc_phase_two_inner_forloop2"); + // timer.stop(); + // produce a proof of sum-check and of evaluation + let (proof, claim_next_round, comm_claim_next_round) = { + let eval = poly.evaluate(&r_j); + let comm_eval = eval.commit(&blinds_evals[j], gens_1).compress(); + // we need to prove the following under homomorphic commitments: + // (1) poly(0) + poly(1) = claim_per_round + // (2) poly(r_j) = eval + + // Our technique is to leverage dot product proofs: + // (1) we can prove: = claim_per_round + // (2) we can prove: >(), + ) + .compress(); + + let blind = { + let blind_sc = if j == 0 { + blind_claim + } else { + &blinds_evals[j - 1] + }; + + let blind_eval = &blinds_evals[j]; + + w[0] * blind_sc + w[1] * blind_eval + }; + + assert_eq!(target.commit(&blind, gens_1).compress(), comm_target); + + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![Scalar::one(); poly.degree() + 1]; + a[0] += Scalar::one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![Scalar::one(); poly.degree() + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_j; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + let (proof, _comm_poly, _comm_sc_eval) = DotProductProof::prove( + gens_1, + gens_n, + transcript, + random_tape, + &poly.as_vec(), + &blinds_poly[j], + &a, + &target, + &blind, + ); + + (proof, eval, comm_eval) + }; + + // timer_sc_inner2.stop(); + + proofs.push(proof); + claim_per_round = claim_next_round; + comm_claim_per_round = comm_claim_next_round; + r.push(r_j); + comm_evals.push(comm_claim_per_round); + } + // timer_sc_inner_outside.stop(); + + let mut polys_vec = vec![poly_A[0]]; + polys_vec.push(polys_B.0[0]); + polys_vec.push(polys_B.1[0]); + // for poly_B in polys_B.iter() { + // polys_vec.push(poly_B[0]); + // } + polys_vec.push(poly_C[0]); + // polys_vec.push(Scalar::one()-poly_C[0]); + polys_vec.push(poly_D[0]); + ( + ZKSumcheckInstanceProof::new(comm_polys, comm_evals, proofs), + r, + polys_vec, + blinds_evals[num_rounds - 1], + ) + } +} diff --git a/third_party/Dorian/src/timer.rs b/third_party/Dorian/src/timer.rs new file mode 100644 index 000000000..8356a35d8 --- /dev/null +++ b/third_party/Dorian/src/timer.rs @@ -0,0 +1,88 @@ +#[cfg(feature = "profile")] +use colored::Colorize; +#[cfg(feature = "profile")] +use core::sync::atomic::AtomicUsize; +#[cfg(feature = "profile")] +use core::sync::atomic::Ordering; +#[cfg(feature = "profile")] +use std::time::Instant; + +#[cfg(feature = "profile")] +pub static CALL_DEPTH: AtomicUsize = AtomicUsize::new(0); + +#[cfg(feature = "profile")] +pub struct Timer { + label: String, + timer: Instant, +} + +#[cfg(feature = "profile")] +impl Timer { + #[inline(always)] + pub fn new(label: &str) -> Self { + let timer = Instant::now(); + CALL_DEPTH.fetch_add(1, Ordering::Relaxed); + let star = "* "; + println!( + "{:indent$}{}{}", + "", + star, + label.yellow().bold(), + indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) + ); + Self { + label: label.to_string(), + timer, + } + } + + #[inline(always)] + pub fn stop(&self) { + let duration = self.timer.elapsed(); + let star = "* "; + println!( + "{:indent$}{}{} {:?}", + "", + star, + self.label.blue().bold(), + duration, + indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) + ); + CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); + } + + #[inline(always)] + pub fn print(msg: &str) { + CALL_DEPTH.fetch_add(1, Ordering::Relaxed); + let star = "* "; + println!( + "{:indent$}{}{}", + "", + star, + msg.to_string().green().bold(), + indent = 2 * CALL_DEPTH.fetch_add(0, Ordering::Relaxed) + ); + CALL_DEPTH.fetch_sub(1, Ordering::Relaxed); + } +} + +#[cfg(not(feature = "profile"))] +pub struct Timer { + _label: String, +} + +#[cfg(not(feature = "profile"))] +impl Timer { + #[inline(always)] + pub fn new(label: &str) -> Self { + Self { + _label: label.to_string(), + } + } + + #[inline(always)] + pub fn stop(&self) {} + + #[inline(always)] + pub fn print(_msg: &str) {} +} diff --git a/third_party/Dorian/src/transcript.rs b/third_party/Dorian/src/transcript.rs new file mode 100644 index 000000000..a57f15071 --- /dev/null +++ b/third_party/Dorian/src/transcript.rs @@ -0,0 +1,63 @@ +use super::group::CompressedGroup; +use super::scalar::Scalar; +use merlin::Transcript; + +pub trait ProofTranscript { + fn append_protocol_name(&mut self, protocol_name: &'static [u8]); + fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar); + fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup); + fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar; + fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec; +} + +impl ProofTranscript for Transcript { + fn append_protocol_name(&mut self, protocol_name: &'static [u8]) { + self.append_message(b"protocol-name", protocol_name); + } + + fn append_scalar(&mut self, label: &'static [u8], scalar: &Scalar) { + self.append_message(label, &scalar.to_bytes()); + } + + fn append_point(&mut self, label: &'static [u8], point: &CompressedGroup) { + self.append_message(label, point.as_bytes()); + } + + fn challenge_scalar(&mut self, label: &'static [u8]) -> Scalar { + let mut buf = [0u8; 64]; + self.challenge_bytes(label, &mut buf); + Scalar::from_bytes_wide(&buf) + } + + fn challenge_vector(&mut self, label: &'static [u8], len: usize) -> Vec { + (0..len) + .map(|_i| self.challenge_scalar(label)) + .collect::>() + } +} + +pub trait AppendToTranscript { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript); +} + +impl AppendToTranscript for Scalar { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { + transcript.append_scalar(label, self); + } +} + +impl AppendToTranscript for [Scalar] { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { + transcript.append_message(label, b"begin_append_vector"); + for item in self { + transcript.append_scalar(label, item); + } + transcript.append_message(label, b"end_append_vector"); + } +} + +impl AppendToTranscript for CompressedGroup { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { + transcript.append_point(label, self); + } +} diff --git a/third_party/Dorian/src/unipoly.rs b/third_party/Dorian/src/unipoly.rs new file mode 100644 index 000000000..dcc391854 --- /dev/null +++ b/third_party/Dorian/src/unipoly.rs @@ -0,0 +1,182 @@ +use super::commitments::{Commitments, MultiCommitGens}; +use super::group::GroupElement; +use super::scalar::{Scalar, ScalarFromPrimitives}; +use super::transcript::{AppendToTranscript, ProofTranscript}; +use merlin::Transcript; +use serde::{Deserialize, Serialize}; + +// ax^2 + bx + c stored as vec![c,b,a] +// ax^3 + bx^2 + cx + d stored as vec![d,c,b,a] +#[derive(Debug)] +pub struct UniPoly { + coeffs: Vec, +} + +// ax^2 + bx + c stored as vec![c,a] +// ax^3 + bx^2 + cx + d stored as vec![d,b,a] +#[derive(Serialize, Deserialize, Debug)] +pub struct CompressedUniPoly { + coeffs_except_linear_term: Vec, +} + +impl UniPoly { + pub fn from_evals(evals: &[Scalar]) -> Self { + // we only support degree-2 or degree-3 univariate polynomials + assert!(evals.len() == 3 || evals.len() == 4); + let coeffs = if evals.len() == 3 { + // ax^2 + bx + c + let two_inv = (2_usize).to_scalar().invert().unwrap(); + + let c = evals[0]; + let a = two_inv * (evals[2] - evals[1] - evals[1] + c); + let b = evals[1] - c - a; + vec![c, b, a] + } else { + // ax^3 + bx^2 + cx + d + let two_inv = (2_usize).to_scalar().invert().unwrap(); + let six_inv = (6_usize).to_scalar().invert().unwrap(); + + let d = evals[0]; + let a = six_inv + * (evals[3] - evals[2] - evals[2] - evals[2] + evals[1] + evals[1] + evals[1] - evals[0]); + let b = two_inv + * (evals[0] + evals[0] - evals[1] - evals[1] - evals[1] - evals[1] - evals[1] + + evals[2] + + evals[2] + + evals[2] + + evals[2] + - evals[3]); + let c = evals[1] - d - a - b; + vec![d, c, b, a] + }; + + UniPoly { coeffs } + } + + pub fn degree(&self) -> usize { + self.coeffs.len() - 1 + } + + pub fn as_vec(&self) -> Vec { + self.coeffs.clone() + } + + pub fn eval_at_zero(&self) -> Scalar { + self.coeffs[0] + } + + pub fn eval_at_one(&self) -> Scalar { + (0..self.coeffs.len()).map(|i| self.coeffs[i]).sum() + } + + pub fn evaluate(&self, r: &Scalar) -> Scalar { + let mut eval = self.coeffs[0]; + let mut power = *r; + for i in 1..self.coeffs.len() { + eval += power * self.coeffs[i]; + power *= r; + } + eval + } + + pub fn compress(&self) -> CompressedUniPoly { + let coeffs_except_linear_term = [&self.coeffs[..1], &self.coeffs[2..]].concat(); + assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); + CompressedUniPoly { + coeffs_except_linear_term, + } + } + + pub fn commit(&self, gens: &MultiCommitGens, blind: &Scalar) -> GroupElement { + self.coeffs.commit(blind, gens) + } +} + +impl CompressedUniPoly { + // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: + // linear_term = hint - 2 * constant_term - deg2 term - deg3 term + pub fn decompress(&self, hint: &Scalar) -> UniPoly { + let mut linear_term = + hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; + for i in 1..self.coeffs_except_linear_term.len() { + linear_term -= self.coeffs_except_linear_term[i]; + } + + let mut coeffs = vec![self.coeffs_except_linear_term[0], linear_term]; + coeffs.extend(&self.coeffs_except_linear_term[1..]); + assert_eq!(self.coeffs_except_linear_term.len() + 1, coeffs.len()); + UniPoly { coeffs } + } +} + +impl AppendToTranscript for UniPoly { + fn append_to_transcript(&self, label: &'static [u8], transcript: &mut Transcript) { + transcript.append_message(label, b"UniPoly_begin"); + for i in 0..self.coeffs.len() { + transcript.append_scalar(b"coeff", &self.coeffs[i]); + } + transcript.append_message(label, b"UniPoly_end"); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn test_from_evals_quad() { + // polynomial is 2x^2 + 3x + 1 + let e0 = Scalar::one(); + let e1 = (6_usize).to_scalar(); + let e2 = (15_usize).to_scalar(); + let evals = vec![e0, e1, e2]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 3); + assert_eq!(poly.coeffs[0], Scalar::one()); + assert_eq!(poly.coeffs[1], (3_usize).to_scalar()); + assert_eq!(poly.coeffs[2], (2_usize).to_scalar()); + + let hint = e0 + e1; + let compressed_poly = poly.compress(); + let decompressed_poly = compressed_poly.decompress(&hint); + for i in 0..decompressed_poly.coeffs.len() { + assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + } + + let e3 = (28_usize).to_scalar(); + assert_eq!(poly.evaluate(&(3_usize).to_scalar()), e3); + } + + #[test] + fn test_from_evals_cubic() { + // polynomial is x^3 + 2x^2 + 3x + 1 + let e0 = Scalar::one(); + let e1 = (7_usize).to_scalar(); + let e2 = (23_usize).to_scalar(); + let e3 = (55_usize).to_scalar(); + let evals = vec![e0, e1, e2, e3]; + let poly = UniPoly::from_evals(&evals); + + assert_eq!(poly.eval_at_zero(), e0); + assert_eq!(poly.eval_at_one(), e1); + assert_eq!(poly.coeffs.len(), 4); + assert_eq!(poly.coeffs[0], Scalar::one()); + assert_eq!(poly.coeffs[1], (3_usize).to_scalar()); + assert_eq!(poly.coeffs[2], (2_usize).to_scalar()); + assert_eq!(poly.coeffs[3], (1_usize).to_scalar()); + + let hint = e0 + e1; + let compressed_poly = poly.compress(); + let decompressed_poly = compressed_poly.decompress(&hint); + for i in 0..decompressed_poly.coeffs.len() { + assert_eq!(decompressed_poly.coeffs[i], poly.coeffs[i]); + } + + let e4 = (109_usize).to_scalar(); + assert_eq!(poly.evaluate(&(4_usize).to_scalar()), e4); + } +} diff --git a/third_party/ZoKrates/zokrates_parser/src/lib.rs b/third_party/ZoKrates/zokrates_parser/src/lib.rs index 277c611f0..be78717e3 100644 --- a/third_party/ZoKrates/zokrates_parser/src/lib.rs +++ b/third_party/ZoKrates/zokrates_parser/src/lib.rs @@ -12,7 +12,7 @@ use pest::Parser; #[grammar = "zokrates.pest"] struct ZoKratesParser; -pub fn parse(input: &str) -> Result, Error> { +pub fn parse(input: &str) -> Result, Error> { ZoKratesParser::parse(Rule::file, input) } diff --git a/third_party/ZoKrates/zokrates_pest_ast/src/lib.rs b/third_party/ZoKrates/zokrates_pest_ast/src/lib.rs index a5b49cc33..2dc7168f2 100644 --- a/third_party/ZoKrates/zokrates_pest_ast/src/lib.rs +++ b/third_party/ZoKrates/zokrates_pest_ast/src/lib.rs @@ -1124,7 +1124,7 @@ impl fmt::Display for Error { } } -pub fn generate_ast(input: &str) -> Result { +pub fn generate_ast(input: &str) -> Result, Error> { let parse_tree = parse(input).map_err(Error)?; Ok(Prog::from(parse_tree).0) } diff --git a/third_party/ZoKratesCurly/zokrates_parser/src/lib.rs b/third_party/ZoKratesCurly/zokrates_parser/src/lib.rs index eb61ce2d8..8e93fb970 100644 --- a/third_party/ZoKratesCurly/zokrates_parser/src/lib.rs +++ b/third_party/ZoKratesCurly/zokrates_parser/src/lib.rs @@ -13,7 +13,7 @@ use pest::Parser; struct ZoKratesParser; #[allow(clippy::result_large_err)] -pub fn parse(input: &str) -> Result, Error> { +pub fn parse(input: &str) -> Result, Error> { ZoKratesParser::parse(Rule::file, input) } diff --git a/third_party/ZoKratesCurly/zokrates_pest_ast/src/lib.rs b/third_party/ZoKratesCurly/zokrates_pest_ast/src/lib.rs index 9f58f574a..cb21fe539 100644 --- a/third_party/ZoKratesCurly/zokrates_pest_ast/src/lib.rs +++ b/third_party/ZoKratesCurly/zokrates_pest_ast/src/lib.rs @@ -1194,7 +1194,7 @@ impl fmt::Display for Error { } #[allow(clippy::result_large_err)] -pub fn generate_ast(input: &str) -> Result { +pub fn generate_ast(input: &str) -> Result, Error> { let parse_tree = parse(input).map_err(Error)?; Ok(Prog::from(parse_tree).0) }