@@ -139,4 +139,251 @@ static int secp256k1_bulletproofs_commit(
139139 }
140140 return 1 ;
141141}
142+
143+ typedef struct ec_mult_x_cb_data {
144+ const secp256k1_scalar * n ;
145+ const secp256k1_ge * g ;
146+ const secp256k1_scalar * l ;
147+ const secp256k1_scalar * r ;
148+ const secp256k1_scalar * r_inv ;
149+ size_t G_GENS_LEN ; /* Figure out initialization syntax so that this can also be const */
150+ size_t n_len ;
151+ } ec_mult_x_cb_data ;
152+
153+ static int ec_mult_x_cb (secp256k1_scalar * sc , secp256k1_ge * pt , size_t idx , void * cbdata ) {
154+ ec_mult_x_cb_data * data = (ec_mult_x_cb_data * ) cbdata ;
155+ if (idx < data -> n_len ) {
156+ if (idx % 2 == 0 ) {
157+ secp256k1_scalar_mul (sc , & data -> n [idx + 1 ], data -> r );
158+ * pt = data -> g [idx ];
159+ } else {
160+ secp256k1_scalar_mul (sc , & data -> n [idx - 1 ], data -> r_inv );
161+ * pt = data -> g [idx ];
162+ }
163+ } else {
164+ idx -= data -> n_len ;
165+ if (idx % 2 == 0 ) {
166+ * sc = data -> l [idx + 1 ];
167+ * pt = data -> g [data -> G_GENS_LEN + idx ];
168+ } else {
169+ * sc = data -> l [idx - 1 ];
170+ * pt = data -> g [data -> G_GENS_LEN + idx ];
171+ }
172+ }
173+ return 1 ;
174+ }
175+
176+ typedef struct ec_mult_r_cb_data {
177+ const secp256k1_scalar * n1 ;
178+ const secp256k1_ge * g1 ;
179+ const secp256k1_scalar * l1 ;
180+ size_t G_GENS_LEN ;
181+ size_t n_len ;
182+ } ec_mult_r_cb_data ;
183+
184+ static int ec_mult_r_cb (secp256k1_scalar * sc , secp256k1_ge * pt , size_t idx , void * cbdata ) {
185+ ec_mult_r_cb_data * data = (ec_mult_r_cb_data * ) cbdata ;
186+ if (idx < data -> n_len ) {
187+ * sc = data -> n1 [2 * idx + 1 ];
188+ * pt = data -> g1 [2 * idx + 1 ];
189+ } else {
190+ idx -= data -> n_len ;
191+ * sc = data -> l1 [2 * idx + 1 ];
192+ * pt = data -> g1 [data -> G_GENS_LEN + 2 * idx + 1 ];
193+ }
194+ return 1 ;
195+ }
196+
197+ /* Recursively compute the norm argument proof satisfying the relation
198+ * <n_vec, n_vec>_q + <c_vec, l_vec> = v for some commitment
199+ * C = v*G + <n_vec, G_vec> + <l_vec, H_vec>. <x, x>_q is the weighted inner
200+ * product of x with itself, where the weights are the first n powers of q.
201+ * <x, x>_q = q*x_1^2 + q^2*x_2^2 + q^3*x_3^2 + ... + q^n*x_n^2
202+ *
203+ * The norm argument is not zero knowledge and does not operate on any secret data.
204+ * Thus the following code uses variable time operations while computing the proof.
205+ */
206+ int secp256k1_bulletproofs_pp_rangeproof_norm_product_prove (
207+ const secp256k1_context * ctx ,
208+ secp256k1_scratch_space * scratch ,
209+ unsigned char * proof ,
210+ size_t * proof_len ,
211+ unsigned char * transcript_hash32 , /* Transcript hash of the parent protocol */
212+ const secp256k1_scalar * r_ch ,
213+ const secp256k1_bulletproofs_generators * g_vec ,
214+ const secp256k1_scalar * n_vec ,
215+ size_t n_vec_len ,
216+ const secp256k1_scalar * l_vec ,
217+ size_t l_vec_len ,
218+ const secp256k1_scalar * c_vec ,
219+ size_t c_vec_len ,
220+ const secp256k1_ge * commit
221+ ) {
222+ secp256k1_scalar q , r = * r_ch ;
223+ secp256k1_sha256 sha256 ;
224+ unsigned char ser_commit [33 ];
225+ size_t start_idx = 0 ;
226+ ec_mult_x_cb_data x_cb_data ;
227+ ec_mult_r_cb_data r_cb_data ;
228+ size_t g_len = n_vec_len , h_len = l_vec_len ;
229+ const size_t G_GENS_LEN = g_len ;
230+ /* The initial pointers to the callback function remain the same. We mutate
231+ * the values and len in loop*/
232+ secp256k1_scalar * ns , * ls , * cs ;
233+ secp256k1_ge * gs , comm = * commit ;
234+ size_t scratch_checkpoint ;
235+ size_t log_n = secp256k1_bulletproofs_pp_log2 (g_len ), log_m = secp256k1_bulletproofs_pp_log2 (h_len );
236+ size_t n_rounds = log_n > log_m ? log_n : log_m ;
237+
238+ /* Check proof sizes.*/
239+ if (* proof_len < 65 * n_rounds + 64 ) {
240+ return 0 ;
241+ }
242+
243+ if (g_vec -> n != (n_vec_len + l_vec_len ) || l_vec_len != c_vec_len ) {
244+ return 0 ;
245+ }
246+
247+ if (!secp256k1_check_power_of_two (n_vec_len ) || !secp256k1_check_power_of_two (c_vec_len )) {
248+ return 0 ;
249+ }
250+ /* We can get away with allocating only half the size of arrays and unrolling the first iteration of the loop.
251+ This would increase the code complexity and can be done as an optimization in a future PR. */
252+ scratch_checkpoint = secp256k1_scratch_checkpoint (& ctx -> error_callback , scratch );
253+ ns = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , g_len * sizeof (secp256k1_scalar ));
254+ ls = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , h_len * sizeof (secp256k1_scalar ));
255+ cs = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , h_len * sizeof (secp256k1_scalar ));
256+ gs = (secp256k1_ge * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , (g_len + h_len ) * sizeof (secp256k1_ge ));
257+ if (ns == NULL || ls == NULL || cs == NULL || gs == NULL ) {
258+ secp256k1_scratch_apply_checkpoint (& ctx -> error_callback , scratch , scratch_checkpoint );
259+ return 0 ;
260+ }
261+ memcpy (ns , n_vec , g_len * sizeof (secp256k1_scalar ));
262+ memcpy (ls , l_vec , h_len * sizeof (secp256k1_scalar ));
263+ memcpy (cs , c_vec , h_len * sizeof (secp256k1_scalar ));
264+ memcpy (gs , g_vec -> gens , (g_len + h_len ) * sizeof (secp256k1_ge ));
265+
266+ x_cb_data .n = ns ;
267+ x_cb_data .g = gs ;
268+ x_cb_data .l = ls ;
269+ x_cb_data .G_GENS_LEN = G_GENS_LEN ;
270+
271+ r_cb_data .n1 = ns ;
272+ r_cb_data .g1 = gs ;
273+ r_cb_data .l1 = ls ;
274+ r_cb_data .G_GENS_LEN = G_GENS_LEN ;
275+ secp256k1_scalar_sqr (& q , & r );
276+
277+
278+ secp256k1_sha256_initialize (& sha256 );
279+ secp256k1_sha256_write (& sha256 , transcript_hash32 , 32 );
280+ secp256k1_fe_normalize_var (& comm .x );
281+ secp256k1_fe_normalize_var (& comm .y );
282+ ser_commit [0 ] = 0x02 | secp256k1_fe_is_odd (& comm .y );
283+ secp256k1_fe_get_b32 (& ser_commit [1 ], & comm .x );
284+ secp256k1_sha256_write (& sha256 , ser_commit , 33 );
285+ secp256k1_sha256_finalize (& sha256 , transcript_hash32 );
286+
287+ while (g_len > 1 || h_len > 1 ) {
288+ size_t i , num_points ;
289+ secp256k1_scalar q_sq , r_inv , c0_l1 , c1_l0 , x_v , c1_l1 , r_v ;
290+ secp256k1_gej rj , xj ;
291+ secp256k1_ge ge ;
292+ int overflow ;
293+ secp256k1_scalar e ;
294+
295+ secp256k1_scalar_inverse_var (& r_inv , & r );
296+ secp256k1_scalar_sqr (& q_sq , & q );
297+
298+ /* Compute the X commitment X = WIP(r_inv*n0,n1)_q2 * g + r<n1,G> + <r_inv*x0, G1> */
299+ secp256k1_scalar_inner_product (& c0_l1 , cs , 0 , ls , 1 , 2 , h_len /2 );
300+ secp256k1_scalar_inner_product (& c1_l0 , cs , 1 , ls , 0 , 2 , h_len /2 );
301+ secp256k1_weighted_scalar_inner_product (& x_v , ns , 0 , ns , 1 , 2 , g_len /2 , & q_sq );
302+ secp256k1_scalar_mul (& x_v , & x_v , & r_inv );
303+ secp256k1_scalar_add (& x_v , & x_v , & x_v );
304+ secp256k1_scalar_add (& x_v , & x_v , & c0_l1 );
305+ secp256k1_scalar_add (& x_v , & x_v , & c1_l0 );
306+
307+ x_cb_data .r = & r ;
308+ x_cb_data .r_inv = & r_inv ;
309+ x_cb_data .n_len = g_len >= 2 ? g_len : 0 ;
310+ num_points = x_cb_data .n_len + (h_len >= 2 ? h_len : 0 );
311+
312+ if (!secp256k1_ecmult_multi_var (& ctx -> error_callback , scratch , & xj , & x_v , ec_mult_x_cb , (void * )& x_cb_data , num_points )) {
313+ return 0 ;
314+ }
315+
316+ secp256k1_weighted_scalar_inner_product (& r_v , ns , 1 , ns , 1 , 2 , g_len /2 , & q_sq );
317+ secp256k1_scalar_inner_product (& c1_l1 , cs , 1 , ls , 1 , 2 , h_len /2 );
318+ secp256k1_scalar_add (& r_v , & r_v , & c1_l1 );
319+
320+ r_cb_data .n_len = g_len /2 ;
321+ num_points = r_cb_data .n_len + h_len /2 ;
322+ if (!secp256k1_ecmult_multi_var (& ctx -> error_callback , scratch , & rj , & r_v , ec_mult_r_cb , (void * )& r_cb_data , num_points )) {
323+ return 0 ;
324+ }
325+
326+ secp256k1_ge_set_gej_var (& ge , & xj );
327+ secp256k1_fe_normalize_var (& ge .x );
328+ secp256k1_fe_normalize_var (& ge .y );
329+ proof [start_idx ] = secp256k1_fe_is_odd (& ge .y ) << 1 ;
330+ secp256k1_fe_get_b32 (& proof [start_idx + 1 ], & ge .x );
331+ secp256k1_ge_set_gej_var (& ge , & rj );
332+ secp256k1_fe_normalize_var (& ge .x );
333+ secp256k1_fe_normalize_var (& ge .y );
334+ proof [start_idx ] |= secp256k1_fe_is_odd (& ge .y );
335+ secp256k1_fe_get_b32 (& proof [start_idx + 33 ], & ge .x );
336+
337+ /* x additionally covers the next 65 bytes */
338+ secp256k1_sha256_initialize (& sha256 );
339+ secp256k1_sha256_write (& sha256 , transcript_hash32 , 32 );
340+ secp256k1_sha256_write (& sha256 , & proof [start_idx ], 65 );
341+ secp256k1_sha256_finalize (& sha256 , transcript_hash32 );
342+ secp256k1_scalar_set_b32 (& e , transcript_hash32 , & overflow ); /* Ignore overflow*/
343+
344+ if (g_len > 1 ) {
345+ for (i = 0 ; i < g_len ; i = i + 2 ) {
346+ secp256k1_scalar nl , nr ;
347+ secp256k1_gej gl , gr ;
348+ secp256k1_scalar_mul (& nl , & ns [i ], & r_inv );
349+ secp256k1_scalar_mul (& nr , & ns [i + 1 ], & e );
350+ secp256k1_scalar_add (& ns [i /2 ], & nl , & nr );
351+
352+ secp256k1_gej_set_ge (& gl , & gs [i ]);
353+ secp256k1_ecmult (& gl , & gl , & r , NULL );
354+ secp256k1_gej_set_ge (& gr , & gs [i + 1 ]);
355+ secp256k1_ecmult (& gr , & gr , & e , NULL );
356+ secp256k1_gej_add_var (& gl , & gl , & gr , NULL );
357+ secp256k1_ge_set_gej_var (& gs [i /2 ], & gl );
358+ }
359+ }
360+
361+ if (h_len > 1 ) {
362+ for (i = 0 ; i < h_len ; i = i + 2 ) {
363+ secp256k1_scalar temp1 ;
364+ secp256k1_gej grj ;
365+ secp256k1_scalar_mul (& temp1 , & cs [i + 1 ], & e );
366+ secp256k1_scalar_add (& cs [i /2 ], & cs [i ], & temp1 );
367+
368+ secp256k1_scalar_mul (& temp1 , & ls [i + 1 ], & e );
369+ secp256k1_scalar_add (& ls [i /2 ], & ls [i ], & temp1 );
370+
371+ secp256k1_gej_set_ge (& grj , & gs [G_GENS_LEN + i + 1 ]);
372+ secp256k1_ecmult (& grj , & grj , & e , NULL );
373+ secp256k1_gej_add_ge_var (& grj , & grj , & gs [G_GENS_LEN + i ], NULL );
374+ secp256k1_ge_set_gej_var (& gs [G_GENS_LEN + i /2 ], & grj );
375+ }
376+ }
377+ g_len = g_len / 2 ;
378+ h_len = h_len / 2 ;
379+ r = q ;
380+ q = q_sq ;
381+ start_idx += 65 ;
382+ }
383+
384+ secp256k1_scalar_get_b32 (& proof [start_idx ], & ns [0 ]);
385+ secp256k1_scalar_get_b32 (& proof [start_idx + 32 ], & ls [0 ]);
386+ * proof_len = start_idx + 64 ;
387+ return 1 ;
388+ }
142389#endif
0 commit comments