Skip to content

Commit 436281a

Browse files
committed
Move secp256k1_fe_inverse{_var} to per-impl files
This temporarily duplicates the inversion code across the 5x52 and 10x26 implementations. Those implementations will be replaced in a next commit.
1 parent aa404d5 commit 436281a

File tree

3 files changed

+254
-127
lines changed

3 files changed

+254
-127
lines changed

src/field_10x26_impl.h

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1164,4 +1164,131 @@ static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const se
11641164
#endif
11651165
}
11661166

1167+
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) {
1168+
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
1169+
int j;
1170+
1171+
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
1172+
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
1173+
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
1174+
*/
1175+
1176+
secp256k1_fe_sqr(&x2, a);
1177+
secp256k1_fe_mul(&x2, &x2, a);
1178+
1179+
secp256k1_fe_sqr(&x3, &x2);
1180+
secp256k1_fe_mul(&x3, &x3, a);
1181+
1182+
x6 = x3;
1183+
for (j=0; j<3; j++) {
1184+
secp256k1_fe_sqr(&x6, &x6);
1185+
}
1186+
secp256k1_fe_mul(&x6, &x6, &x3);
1187+
1188+
x9 = x6;
1189+
for (j=0; j<3; j++) {
1190+
secp256k1_fe_sqr(&x9, &x9);
1191+
}
1192+
secp256k1_fe_mul(&x9, &x9, &x3);
1193+
1194+
x11 = x9;
1195+
for (j=0; j<2; j++) {
1196+
secp256k1_fe_sqr(&x11, &x11);
1197+
}
1198+
secp256k1_fe_mul(&x11, &x11, &x2);
1199+
1200+
x22 = x11;
1201+
for (j=0; j<11; j++) {
1202+
secp256k1_fe_sqr(&x22, &x22);
1203+
}
1204+
secp256k1_fe_mul(&x22, &x22, &x11);
1205+
1206+
x44 = x22;
1207+
for (j=0; j<22; j++) {
1208+
secp256k1_fe_sqr(&x44, &x44);
1209+
}
1210+
secp256k1_fe_mul(&x44, &x44, &x22);
1211+
1212+
x88 = x44;
1213+
for (j=0; j<44; j++) {
1214+
secp256k1_fe_sqr(&x88, &x88);
1215+
}
1216+
secp256k1_fe_mul(&x88, &x88, &x44);
1217+
1218+
x176 = x88;
1219+
for (j=0; j<88; j++) {
1220+
secp256k1_fe_sqr(&x176, &x176);
1221+
}
1222+
secp256k1_fe_mul(&x176, &x176, &x88);
1223+
1224+
x220 = x176;
1225+
for (j=0; j<44; j++) {
1226+
secp256k1_fe_sqr(&x220, &x220);
1227+
}
1228+
secp256k1_fe_mul(&x220, &x220, &x44);
1229+
1230+
x223 = x220;
1231+
for (j=0; j<3; j++) {
1232+
secp256k1_fe_sqr(&x223, &x223);
1233+
}
1234+
secp256k1_fe_mul(&x223, &x223, &x3);
1235+
1236+
/* The final result is then assembled using a sliding window over the blocks. */
1237+
1238+
t1 = x223;
1239+
for (j=0; j<23; j++) {
1240+
secp256k1_fe_sqr(&t1, &t1);
1241+
}
1242+
secp256k1_fe_mul(&t1, &t1, &x22);
1243+
for (j=0; j<5; j++) {
1244+
secp256k1_fe_sqr(&t1, &t1);
1245+
}
1246+
secp256k1_fe_mul(&t1, &t1, a);
1247+
for (j=0; j<3; j++) {
1248+
secp256k1_fe_sqr(&t1, &t1);
1249+
}
1250+
secp256k1_fe_mul(&t1, &t1, &x2);
1251+
for (j=0; j<2; j++) {
1252+
secp256k1_fe_sqr(&t1, &t1);
1253+
}
1254+
secp256k1_fe_mul(r, a, &t1);
1255+
}
1256+
1257+
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
1258+
#if defined(USE_FIELD_INV_BUILTIN)
1259+
secp256k1_fe_inv(r, a);
1260+
#elif defined(USE_FIELD_INV_NUM)
1261+
secp256k1_num n, m;
1262+
static const secp256k1_fe negone = SECP256K1_FE_CONST(
1263+
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
1264+
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
1265+
);
1266+
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
1267+
static const unsigned char prime[32] = {
1268+
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
1269+
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
1270+
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
1271+
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
1272+
};
1273+
unsigned char b[32];
1274+
int res;
1275+
secp256k1_fe c = *a;
1276+
secp256k1_fe_normalize_var(&c);
1277+
secp256k1_fe_get_b32(b, &c);
1278+
secp256k1_num_set_bin(&n, b, 32);
1279+
secp256k1_num_set_bin(&m, prime, 32);
1280+
secp256k1_num_mod_inverse(&n, &n, &m);
1281+
secp256k1_num_get_bin(b, 32, &n);
1282+
res = secp256k1_fe_set_b32(r, b);
1283+
(void)res;
1284+
VERIFY_CHECK(res);
1285+
/* Verify the result is the (unique) valid inverse using non-GMP code. */
1286+
secp256k1_fe_mul(&c, &c, r);
1287+
secp256k1_fe_add(&c, &negone);
1288+
CHECK(secp256k1_fe_normalizes_to_zero_var(&c));
1289+
#else
1290+
#error "Please select field inverse implementation"
1291+
#endif
1292+
}
1293+
11671294
#endif /* SECP256K1_FIELD_REPR_IMPL_H */

src/field_5x52_impl.h

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -498,4 +498,131 @@ static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const se
498498
#endif
499499
}
500500

501+
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) {
502+
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
503+
int j;
504+
505+
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
506+
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
507+
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
508+
*/
509+
510+
secp256k1_fe_sqr(&x2, a);
511+
secp256k1_fe_mul(&x2, &x2, a);
512+
513+
secp256k1_fe_sqr(&x3, &x2);
514+
secp256k1_fe_mul(&x3, &x3, a);
515+
516+
x6 = x3;
517+
for (j=0; j<3; j++) {
518+
secp256k1_fe_sqr(&x6, &x6);
519+
}
520+
secp256k1_fe_mul(&x6, &x6, &x3);
521+
522+
x9 = x6;
523+
for (j=0; j<3; j++) {
524+
secp256k1_fe_sqr(&x9, &x9);
525+
}
526+
secp256k1_fe_mul(&x9, &x9, &x3);
527+
528+
x11 = x9;
529+
for (j=0; j<2; j++) {
530+
secp256k1_fe_sqr(&x11, &x11);
531+
}
532+
secp256k1_fe_mul(&x11, &x11, &x2);
533+
534+
x22 = x11;
535+
for (j=0; j<11; j++) {
536+
secp256k1_fe_sqr(&x22, &x22);
537+
}
538+
secp256k1_fe_mul(&x22, &x22, &x11);
539+
540+
x44 = x22;
541+
for (j=0; j<22; j++) {
542+
secp256k1_fe_sqr(&x44, &x44);
543+
}
544+
secp256k1_fe_mul(&x44, &x44, &x22);
545+
546+
x88 = x44;
547+
for (j=0; j<44; j++) {
548+
secp256k1_fe_sqr(&x88, &x88);
549+
}
550+
secp256k1_fe_mul(&x88, &x88, &x44);
551+
552+
x176 = x88;
553+
for (j=0; j<88; j++) {
554+
secp256k1_fe_sqr(&x176, &x176);
555+
}
556+
secp256k1_fe_mul(&x176, &x176, &x88);
557+
558+
x220 = x176;
559+
for (j=0; j<44; j++) {
560+
secp256k1_fe_sqr(&x220, &x220);
561+
}
562+
secp256k1_fe_mul(&x220, &x220, &x44);
563+
564+
x223 = x220;
565+
for (j=0; j<3; j++) {
566+
secp256k1_fe_sqr(&x223, &x223);
567+
}
568+
secp256k1_fe_mul(&x223, &x223, &x3);
569+
570+
/* The final result is then assembled using a sliding window over the blocks. */
571+
572+
t1 = x223;
573+
for (j=0; j<23; j++) {
574+
secp256k1_fe_sqr(&t1, &t1);
575+
}
576+
secp256k1_fe_mul(&t1, &t1, &x22);
577+
for (j=0; j<5; j++) {
578+
secp256k1_fe_sqr(&t1, &t1);
579+
}
580+
secp256k1_fe_mul(&t1, &t1, a);
581+
for (j=0; j<3; j++) {
582+
secp256k1_fe_sqr(&t1, &t1);
583+
}
584+
secp256k1_fe_mul(&t1, &t1, &x2);
585+
for (j=0; j<2; j++) {
586+
secp256k1_fe_sqr(&t1, &t1);
587+
}
588+
secp256k1_fe_mul(r, a, &t1);
589+
}
590+
591+
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
592+
#if defined(USE_FIELD_INV_BUILTIN)
593+
secp256k1_fe_inv(r, a);
594+
#elif defined(USE_FIELD_INV_NUM)
595+
secp256k1_num n, m;
596+
static const secp256k1_fe negone = SECP256K1_FE_CONST(
597+
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
598+
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
599+
);
600+
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
601+
static const unsigned char prime[32] = {
602+
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
603+
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
604+
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
605+
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
606+
};
607+
unsigned char b[32];
608+
int res;
609+
secp256k1_fe c = *a;
610+
secp256k1_fe_normalize_var(&c);
611+
secp256k1_fe_get_b32(b, &c);
612+
secp256k1_num_set_bin(&n, b, 32);
613+
secp256k1_num_set_bin(&m, prime, 32);
614+
secp256k1_num_mod_inverse(&n, &n, &m);
615+
secp256k1_num_get_bin(b, 32, &n);
616+
res = secp256k1_fe_set_b32(r, b);
617+
(void)res;
618+
VERIFY_CHECK(res);
619+
/* Verify the result is the (unique) valid inverse using non-GMP code. */
620+
secp256k1_fe_mul(&c, &c, r);
621+
secp256k1_fe_add(&c, &negone);
622+
CHECK(secp256k1_fe_normalizes_to_zero_var(&c));
623+
#else
624+
#error "Please select field inverse implementation"
625+
#endif
626+
}
627+
501628
#endif /* SECP256K1_FIELD_REPR_IMPL_H */

src/field_impl.h

Lines changed: 0 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -136,133 +136,6 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) {
136136
return secp256k1_fe_equal(&t1, a);
137137
}
138138

139-
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) {
140-
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
141-
int j;
142-
143-
/** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in
144-
* { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
145-
* [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223]
146-
*/
147-
148-
secp256k1_fe_sqr(&x2, a);
149-
secp256k1_fe_mul(&x2, &x2, a);
150-
151-
secp256k1_fe_sqr(&x3, &x2);
152-
secp256k1_fe_mul(&x3, &x3, a);
153-
154-
x6 = x3;
155-
for (j=0; j<3; j++) {
156-
secp256k1_fe_sqr(&x6, &x6);
157-
}
158-
secp256k1_fe_mul(&x6, &x6, &x3);
159-
160-
x9 = x6;
161-
for (j=0; j<3; j++) {
162-
secp256k1_fe_sqr(&x9, &x9);
163-
}
164-
secp256k1_fe_mul(&x9, &x9, &x3);
165-
166-
x11 = x9;
167-
for (j=0; j<2; j++) {
168-
secp256k1_fe_sqr(&x11, &x11);
169-
}
170-
secp256k1_fe_mul(&x11, &x11, &x2);
171-
172-
x22 = x11;
173-
for (j=0; j<11; j++) {
174-
secp256k1_fe_sqr(&x22, &x22);
175-
}
176-
secp256k1_fe_mul(&x22, &x22, &x11);
177-
178-
x44 = x22;
179-
for (j=0; j<22; j++) {
180-
secp256k1_fe_sqr(&x44, &x44);
181-
}
182-
secp256k1_fe_mul(&x44, &x44, &x22);
183-
184-
x88 = x44;
185-
for (j=0; j<44; j++) {
186-
secp256k1_fe_sqr(&x88, &x88);
187-
}
188-
secp256k1_fe_mul(&x88, &x88, &x44);
189-
190-
x176 = x88;
191-
for (j=0; j<88; j++) {
192-
secp256k1_fe_sqr(&x176, &x176);
193-
}
194-
secp256k1_fe_mul(&x176, &x176, &x88);
195-
196-
x220 = x176;
197-
for (j=0; j<44; j++) {
198-
secp256k1_fe_sqr(&x220, &x220);
199-
}
200-
secp256k1_fe_mul(&x220, &x220, &x44);
201-
202-
x223 = x220;
203-
for (j=0; j<3; j++) {
204-
secp256k1_fe_sqr(&x223, &x223);
205-
}
206-
secp256k1_fe_mul(&x223, &x223, &x3);
207-
208-
/* The final result is then assembled using a sliding window over the blocks. */
209-
210-
t1 = x223;
211-
for (j=0; j<23; j++) {
212-
secp256k1_fe_sqr(&t1, &t1);
213-
}
214-
secp256k1_fe_mul(&t1, &t1, &x22);
215-
for (j=0; j<5; j++) {
216-
secp256k1_fe_sqr(&t1, &t1);
217-
}
218-
secp256k1_fe_mul(&t1, &t1, a);
219-
for (j=0; j<3; j++) {
220-
secp256k1_fe_sqr(&t1, &t1);
221-
}
222-
secp256k1_fe_mul(&t1, &t1, &x2);
223-
for (j=0; j<2; j++) {
224-
secp256k1_fe_sqr(&t1, &t1);
225-
}
226-
secp256k1_fe_mul(r, a, &t1);
227-
}
228-
229-
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
230-
#if defined(USE_FIELD_INV_BUILTIN)
231-
secp256k1_fe_inv(r, a);
232-
#elif defined(USE_FIELD_INV_NUM)
233-
secp256k1_num n, m;
234-
static const secp256k1_fe negone = SECP256K1_FE_CONST(
235-
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
236-
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
237-
);
238-
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
239-
static const unsigned char prime[32] = {
240-
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
241-
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
242-
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
243-
0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F
244-
};
245-
unsigned char b[32];
246-
int res;
247-
secp256k1_fe c = *a;
248-
secp256k1_fe_normalize_var(&c);
249-
secp256k1_fe_get_b32(b, &c);
250-
secp256k1_num_set_bin(&n, b, 32);
251-
secp256k1_num_set_bin(&m, prime, 32);
252-
secp256k1_num_mod_inverse(&n, &n, &m);
253-
secp256k1_num_get_bin(b, 32, &n);
254-
res = secp256k1_fe_set_b32(r, b);
255-
(void)res;
256-
VERIFY_CHECK(res);
257-
/* Verify the result is the (unique) valid inverse using non-GMP code. */
258-
secp256k1_fe_mul(&c, &c, r);
259-
secp256k1_fe_add(&c, &negone);
260-
CHECK(secp256k1_fe_normalizes_to_zero_var(&c));
261-
#else
262-
#error "Please select field inverse implementation"
263-
#endif
264-
}
265-
266139
static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) {
267140
#ifndef USE_NUM_NONE
268141
unsigned char b[32];

0 commit comments

Comments
 (0)