-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathwitness.rs
More file actions
375 lines (348 loc) · 13.4 KB
/
witness.rs
File metadata and controls
375 lines (348 loc) · 13.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
use neo_ccs::matrix::Mat;
use neo_ccs::relations::{CcsClaim, CcsWitness};
use neo_math::K;
use neo_reductions::error::PiCcsError;
use serde::{Deserialize, Serialize};
use std::marker::PhantomData;
use std::ops::Range;
use crate::mem_init::MemInit;
use crate::riscv::lookups::RiscvOpcode;
use crate::AffineWordAddressRemap;
fn default_one_usize() -> usize {
1
}
/// Time-domain column payload for shard-local proving.
///
/// `cpu_cols` and `mem_cols` are stored in column-major form over time:
/// `cols[col_id][j]` where `j in [0, t)`.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TimeColumns<F> {
pub t: usize,
pub cpu_cols: Vec<Vec<F>>,
pub mem_cols: Vec<Vec<F>>,
/// Chunk-padding activity mask over time (`1` for real step rows, `0` for padded rows).
/// This is not the semantic CPU trace `active` column.
pub active_col: Vec<F>,
/// Logical column ids in the same order as `cpu_cols || mem_cols`.
pub col_ids: Vec<usize>,
}
impl<F> Default for TimeColumns<F> {
fn default() -> Self {
Self {
t: 0,
cpu_cols: Vec::new(),
mem_cols: Vec::new(),
active_col: Vec::new(),
col_ids: Vec::new(),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum LutTableSpec {
/// A "virtual" lookup table defined by the RISC-V opcode semantics over
/// an interleaved operand address space.
///
/// Addressing convention:
/// - `n_side = 2`, `ell = 1`
/// - `d = 2*xlen` (one bit per dimension)
/// - Address bits are little-endian and correspond to `interleave_bits(rs1, rs2)`.
RiscvOpcode { opcode: RiscvOpcode, xlen: usize },
/// A packed-key (non-bit-addressed) variant of `RiscvOpcode`, intended for "no width bloat"
/// Shout/ALU proofs that do **not** commit to `ell_addr=64` addr-bit columns.
///
/// Current implementation status:
/// - Supported: `xlen = 32` for selected RV32 ops, including:
/// - bitwise: `And | Andn | Or | Xor`
/// - arithmetic: `Add | Sub`
/// - compares: `Eq | Neq | Slt | Sltu`
/// - shifts: `Sll | Srl | Sra`
/// - RV32M: `Mul | Mulh | Mulhu | Mulhsu | Div | Divu | Rem | Remu`
/// - Witness convention: the Shout lane's `addr_bits` slice is repurposed as packed columns.
/// The exact layout depends on `opcode`; the suffix columns are always `[has_lookup, val_u32]`.
/// Examples:
/// - `Add/Sub` (d=3): `[lhs_u32, rhs_u32, aux_bit]` (carry for `Add`, borrow for `Sub`)
/// - `Eq/Neq` (d=35): `[lhs_u32, rhs_u32, borrow_bit, diff_bits[0..32]]` where `val_u32` is the out bit
/// - `Mul` (d=34): `[lhs_u32, rhs_u32, hi_bits[0..32]]` where `val_u32` is the low 32 bits
/// - `Mulhu` (d=34): `[lhs_u32, rhs_u32, lo_bits[0..32]]` where `val_u32` is the high 32 bits
/// - `Sltu` (d=35): `[lhs_u32, rhs_u32, diff_u32, diff_bits[0..32]]` where `val_u32` is the out bit
/// - `Sll/Srl/Sra` (d=38): `[lhs_u32, shamt_bits[0..5], ...]`
///
/// For packed-key instances, Route-A enforces correctness directly via time-domain constraints
/// (claimed sum forced to 0); table MLE evaluation is not used.
RiscvOpcodePacked { opcode: RiscvOpcode, xlen: usize },
/// An "event table" packed-key variant of `RiscvOpcodePacked` for RV32.
///
/// Instead of storing one Shout lane over time (one row per cycle), the witness stores only
/// the executed lookup events (one row per lookup). Each event row carries:
/// - a prefix of `time_bits` boolean columns encoding the original Route-A time index `t`
/// (little-endian), and
/// - the same packed columns as `RiscvOpcodePacked` for `opcode`.
///
/// The Route-A protocol then links the event table back to the CPU trace via a "scatter"
/// check at a random time point `r_cycle` (Jolt-ish): roughly,
/// Σ_events hash(event)·χ_{r_cycle}(t_event) == Σ_t hash(trace[t])·χ_{r_cycle}(t).
///
/// Notes:
/// - This is RV32-only (`xlen = 32`), `n_side = 2`, `ell = 1`.
/// - `time_bits` must match the Route-A `ell_n` used for the time domain.
RiscvOpcodeEventTablePacked {
opcode: RiscvOpcode,
xlen: usize,
time_bits: usize,
},
/// Implicit identity table over 32-bit addresses: `table[addr] = addr`.
///
/// Addressing convention:
/// - `n_side = 2`, `ell = 1`
/// - `d = 32` (one bit per dimension)
/// - Address bits are little-endian
IdentityU32,
}
impl LutTableSpec {
pub fn eval_table_mle(&self, r_addr: &[K]) -> Result<K, PiCcsError> {
match self {
LutTableSpec::RiscvOpcode { opcode, xlen } => {
Ok(crate::riscv::lookups::evaluate_opcode_mle(*opcode, r_addr, *xlen))
}
LutTableSpec::RiscvOpcodePacked { .. } => Err(PiCcsError::InvalidInput(
"RiscvOpcodePacked does not support eval_table_mle (not bit-addressed)".into(),
)),
LutTableSpec::RiscvOpcodeEventTablePacked { .. } => Err(PiCcsError::InvalidInput(
"RiscvOpcodeEventTablePacked does not support eval_table_mle (not bit-addressed)".into(),
)),
LutTableSpec::IdentityU32 => {
if r_addr.len() != 32 {
return Err(PiCcsError::InvalidInput(format!(
"IdentityU32: expected r_addr.len()=32, got {}",
r_addr.len()
)));
}
Ok(crate::identity::eval_identity_mle_le(r_addr))
}
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemInstance<C, F> {
/// Logical memory instance identifier (e.g. RISC-V `PROG_ID/REG_ID/RAM_ID`).
///
/// This is used by higher-level protocols to link Twist instances to CPU trace columns
/// without relying on a fixed instance ordering.
pub mem_id: u32,
pub comms: Vec<C>,
pub k: usize,
pub d: usize,
pub n_side: usize,
pub steps: usize,
/// Number of access lanes per VM step for this Twist instance.
///
/// Each lane carries the canonical Twist bus slice:
/// `[ra_bits, wa_bits, has_read, has_write, wv, rv, inc]`.
#[serde(default = "default_one_usize")]
pub lanes: usize,
/// Bits per address dimension: ell = ceil(log2(n_side))
/// With index-bit addressing, we commit d*ell bit-columns instead of d*n_side one-hot columns.
pub ell: usize,
/// Public initial memory state for cells [0..k).
pub init: MemInit<F>,
/// Optional precomputed digest for transcript binding fast paths (prover-only optimization).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub init_digest: Option<[u8; 32]>,
/// Optional guest->logical address remap metadata for memory-side linkage checks.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub guest_addr_remap: Option<AffineWordAddressRemap>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct MemWitness<F> {
pub mats: Vec<Mat<F>>,
}
#[derive(Clone, Debug)]
pub struct TwistWitnessLaneLayout {
pub ell_addr: usize,
pub ra_bits: Range<usize>,
pub wa_bits: Range<usize>,
pub has_read: usize,
pub has_write: usize,
pub wv: usize,
pub rv: usize,
pub inc_at_write_addr: usize,
}
#[derive(Clone, Debug)]
pub struct TwistWitnessLayout {
pub lane_len: usize,
pub lanes: Vec<TwistWitnessLaneLayout>,
}
impl TwistWitnessLayout {
pub fn expected_len(&self) -> usize {
self.lane_len
.checked_mul(self.lanes.len())
.expect("TwistWitnessLayout: lane_len*lanes overflow")
}
}
impl<C, F> MemInstance<C, F> {
pub fn twist_layout(&self) -> TwistWitnessLayout {
let ell_addr = self.d * self.ell;
let lane_len = 2 * ell_addr + 5;
let lanes = self.lanes.max(1);
let mut out = Vec::with_capacity(lanes);
for lane in 0..lanes {
let base = lane * lane_len;
out.push(TwistWitnessLaneLayout {
ell_addr,
ra_bits: base..(base + ell_addr),
wa_bits: (base + ell_addr)..(base + 2 * ell_addr),
has_read: base + 2 * ell_addr,
has_write: base + 2 * ell_addr + 1,
wv: base + 2 * ell_addr + 2,
rv: base + 2 * ell_addr + 3,
inc_at_write_addr: base + 2 * ell_addr + 4,
});
}
TwistWitnessLayout { lane_len, lanes: out }
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LutInstance<C, F> {
/// Logical shout table identifier.
pub table_id: u32,
pub comms: Vec<C>,
pub k: usize,
pub d: usize,
pub n_side: usize,
pub steps: usize,
/// Number of lookup lanes per VM step for this Shout instance.
///
/// Each lane carries the canonical Shout bus slice:
/// `[addr_bits, has_lookup, val]`.
#[serde(default = "default_one_usize")]
pub lanes: usize,
/// Bits per address dimension: ell = ceil(log2(n_side))
pub ell: usize,
/// Optional virtual table descriptor (when the full table is not materialized).
#[serde(default)]
pub table_spec: Option<LutTableSpec>,
pub table: Vec<F>,
/// Optional precomputed digest for transcript binding fast paths (prover-only optimization).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub table_digest: Option<[u8; 32]>,
/// Optional address-sharing group id for shared-bus column layout.
#[serde(default)]
pub addr_group: Option<u64>,
/// Optional selector-sharing group id for shared-bus column layout.
#[serde(default)]
pub selector_group: Option<u64>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LutWitness<F> {
pub mats: Vec<Mat<F>>,
}
#[derive(Clone, Debug)]
pub struct ShoutWitnessLayout {
pub ell_addr: usize,
pub addr_bits: Range<usize>,
pub has_lookup: usize,
pub val: usize,
}
impl ShoutWitnessLayout {
pub fn expected_len(&self) -> usize {
self.ell_addr + 2
}
}
impl<C, F> LutInstance<C, F> {
pub fn shout_layout(&self) -> ShoutWitnessLayout {
let ell_addr = self.d * self.ell;
ShoutWitnessLayout {
ell_addr,
addr_bits: 0..ell_addr,
has_lookup: ell_addr,
val: ell_addr + 1,
}
}
}
/// Per-step bundle that carries CPU + memory witnesses for a single folding step.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StepWitnessBundle<Cmt, F, K> {
pub mcs: (CcsClaim<Cmt, F>, CcsWitness<F>),
pub lut_instances: Vec<(LutInstance<Cmt, F>, LutWitness<F>)>,
pub mem_instances: Vec<(MemInstance<Cmt, F>, MemWitness<F>)>,
#[serde(default)]
pub time_columns: TimeColumns<F>,
#[serde(skip)]
pub _phantom: PhantomData<K>,
}
impl<Cmt, F, K> From<(CcsClaim<Cmt, F>, CcsWitness<F>)> for StepWitnessBundle<Cmt, F, K> {
fn from(mcs: (CcsClaim<Cmt, F>, CcsWitness<F>)) -> Self {
Self {
mcs,
lut_instances: Vec::new(),
mem_instances: Vec::new(),
time_columns: TimeColumns::default(),
_phantom: PhantomData,
}
}
}
/// Per-step bundle that carries *only public instances* (no witnesses).
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StepInstanceBundle<Cmt, F, K> {
pub mcs_inst: CcsClaim<Cmt, F>,
pub lut_insts: Vec<LutInstance<Cmt, F>>,
pub mem_insts: Vec<MemInstance<Cmt, F>>,
// Time columns are verifier-local execution context and should not be serialized
// as part of public step instances/proofs.
#[serde(skip, default)]
pub time_columns: TimeColumns<F>,
#[serde(skip)]
pub _phantom: PhantomData<K>,
}
impl<Cmt, F, K> From<CcsClaim<Cmt, F>> for StepInstanceBundle<Cmt, F, K> {
fn from(mcs_inst: CcsClaim<Cmt, F>) -> Self {
Self {
mcs_inst,
lut_insts: Vec::new(),
mem_insts: Vec::new(),
time_columns: TimeColumns::default(),
_phantom: PhantomData,
}
}
}
impl<Cmt: Clone, F: Clone, K> From<&StepWitnessBundle<Cmt, F, K>> for StepInstanceBundle<Cmt, F, K> {
fn from(step: &StepWitnessBundle<Cmt, F, K>) -> Self {
Self {
mcs_inst: step.mcs.0.clone(),
lut_insts: step
.lut_instances
.iter()
.map(|(inst, _)| inst.clone())
.collect(),
mem_insts: step
.mem_instances
.iter()
.map(|(inst, _)| inst.clone())
.collect(),
// Canonical time columns are verifier-local context (serde-skip on instance),
// and are required by Route-A named-opening verification.
time_columns: step.time_columns.clone(),
_phantom: PhantomData,
}
}
}
impl<Cmt, F, K> From<StepWitnessBundle<Cmt, F, K>> for StepInstanceBundle<Cmt, F, K> {
fn from(step: StepWitnessBundle<Cmt, F, K>) -> Self {
let StepWitnessBundle {
mcs: (mcs_inst, _mcs_wit),
lut_instances,
mem_instances,
time_columns,
_phantom: _,
} = step;
Self {
mcs_inst,
lut_insts: lut_instances.into_iter().map(|(inst, _)| inst).collect(),
mem_insts: mem_instances.into_iter().map(|(inst, _)| inst).collect(),
// Canonical time columns are verifier-local context (serde-skip on instance),
// and are required by Route-A named-opening verification.
time_columns,
_phantom: PhantomData,
}
}
}