@@ -495,7 +495,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
495
495
}
496
496
497
497
static void secp256k1_gej_add_ge_var (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b , secp256k1_fe * rzr ) {
498
- /* 8 mul, 3 sqr, 13 add/negate/normalize_weak /normalizes_to_zero (ignoring special cases) */
498
+ /* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
499
499
secp256k1_fe z12 , u1 , u2 , s1 , s2 , h , i , h2 , h3 , t ;
500
500
secp256k1_gej_verify (a );
501
501
secp256k1_ge_verify (b );
@@ -513,11 +513,11 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
513
513
}
514
514
515
515
secp256k1_fe_sqr (& z12 , & a -> z );
516
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 );
516
+ u1 = a -> x ;
517
517
secp256k1_fe_mul (& u2 , & b -> x , & z12 );
518
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 );
518
+ s1 = a -> y ;
519
519
secp256k1_fe_mul (& s2 , & b -> y , & z12 ); secp256k1_fe_mul (& s2 , & s2 , & a -> z );
520
- secp256k1_fe_negate (& h , & u1 , 1 ); secp256k1_fe_add (& h , & u2 );
520
+ secp256k1_fe_negate (& h , & u1 , 6 ); secp256k1_fe_add (& h , & u2 );
521
521
secp256k1_fe_negate (& i , & s2 , 1 ); secp256k1_fe_add (& i , & s1 );
522
522
if (secp256k1_fe_normalizes_to_zero_var (& h )) {
523
523
if (secp256k1_fe_normalizes_to_zero_var (& i )) {
@@ -556,7 +556,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
556
556
}
557
557
558
558
static void secp256k1_gej_add_zinv_var (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b , const secp256k1_fe * bzinv ) {
559
- /* 9 mul, 3 sqr, 13 add/negate/normalize_weak /normalizes_to_zero (ignoring special cases) */
559
+ /* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
560
560
secp256k1_fe az , z12 , u1 , u2 , s1 , s2 , h , i , h2 , h3 , t ;
561
561
562
562
secp256k1_gej_verify (a );
@@ -589,11 +589,11 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
589
589
secp256k1_fe_mul (& az , & a -> z , bzinv );
590
590
591
591
secp256k1_fe_sqr (& z12 , & az );
592
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 );
592
+ u1 = a -> x ;
593
593
secp256k1_fe_mul (& u2 , & b -> x , & z12 );
594
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 );
594
+ s1 = a -> y ;
595
595
secp256k1_fe_mul (& s2 , & b -> y , & z12 ); secp256k1_fe_mul (& s2 , & s2 , & az );
596
- secp256k1_fe_negate (& h , & u1 , 1 ); secp256k1_fe_add (& h , & u2 );
596
+ secp256k1_fe_negate (& h , & u1 , 6 ); secp256k1_fe_add (& h , & u2 );
597
597
secp256k1_fe_negate (& i , & s2 , 1 ); secp256k1_fe_add (& i , & s1 );
598
598
if (secp256k1_fe_normalizes_to_zero_var (& h )) {
599
599
if (secp256k1_fe_normalizes_to_zero_var (& i )) {
@@ -626,7 +626,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
626
626
627
627
628
628
static void secp256k1_gej_add_ge (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b ) {
629
- /* Operations: 7 mul, 5 sqr, 24 add/cmov/half/mul_int/negate/normalize_weak /normalizes_to_zero */
629
+ /* Operations: 7 mul, 5 sqr, 21 add/cmov/half/mul_int/negate/normalizes_to_zero */
630
630
secp256k1_fe zz , u1 , u2 , s1 , s2 , t , tt , m , n , q , rr ;
631
631
secp256k1_fe m_alt , rr_alt ;
632
632
int degenerate ;
@@ -686,17 +686,17 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
686
686
*/
687
687
688
688
secp256k1_fe_sqr (& zz , & a -> z ); /* z = Z1^2 */
689
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 ); /* u1 = U1 = X1*Z2^2 (1 ) */
689
+ u1 = a -> x ; /* u1 = U1 = X1*Z2^2 (6 ) */
690
690
secp256k1_fe_mul (& u2 , & b -> x , & zz ); /* u2 = U2 = X2*Z1^2 (1) */
691
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 ); /* s1 = S1 = Y1*Z2^3 (1 ) */
691
+ s1 = a -> y ; /* s1 = S1 = Y1*Z2^3 (4 ) */
692
692
secp256k1_fe_mul (& s2 , & b -> y , & zz ); /* s2 = Y2*Z1^2 (1) */
693
693
secp256k1_fe_mul (& s2 , & s2 , & a -> z ); /* s2 = S2 = Y2*Z1^3 (1) */
694
- t = u1 ; secp256k1_fe_add (& t , & u2 ); /* t = T = U1+U2 (2 ) */
695
- m = s1 ; secp256k1_fe_add (& m , & s2 ); /* m = M = S1+S2 (2 ) */
694
+ t = u1 ; secp256k1_fe_add (& t , & u2 ); /* t = T = U1+U2 (7 ) */
695
+ m = s1 ; secp256k1_fe_add (& m , & s2 ); /* m = M = S1+S2 (5 ) */
696
696
secp256k1_fe_sqr (& rr , & t ); /* rr = T^2 (1) */
697
- secp256k1_fe_negate (& m_alt , & u2 , 1 ); /* Malt = -X2*Z1^2 */
698
- secp256k1_fe_mul (& tt , & u1 , & m_alt ); /* tt = -U1*U2 (2 ) */
699
- secp256k1_fe_add (& rr , & tt ); /* rr = R = T^2-U1*U2 (3 ) */
697
+ secp256k1_fe_negate (& m_alt , & u2 , 1 ); /* Malt = -X2*Z1^2 (2) */
698
+ secp256k1_fe_mul (& tt , & u1 , & m_alt ); /* tt = -U1*U2 (1 ) */
699
+ secp256k1_fe_add (& rr , & tt ); /* rr = R = T^2-U1*U2 (2 ) */
700
700
/* If lambda = R/M = R/0 we have a problem (except in the "trivial"
701
701
* case that Z = z1z2 = 0, and this is special-cased later on). */
702
702
degenerate = secp256k1_fe_normalizes_to_zero (& m );
@@ -706,34 +706,34 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
706
706
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
707
707
* so we set R/M equal to this. */
708
708
rr_alt = s1 ;
709
- secp256k1_fe_mul_int (& rr_alt , 2 ); /* rr = Y1*Z2^3 - Y2*Z1^3 (2 ) */
710
- secp256k1_fe_add (& m_alt , & u1 ); /* Malt = X1*Z2^2 - X2*Z1^2 */
709
+ secp256k1_fe_mul_int (& rr_alt , 2 ); /* rr_alt = Y1*Z2^3 - Y2*Z1^3 (8 ) */
710
+ secp256k1_fe_add (& m_alt , & u1 ); /* Malt = X1*Z2^2 - X2*Z1^2 (8) */
711
711
712
- secp256k1_fe_cmov (& rr_alt , & rr , !degenerate );
713
- secp256k1_fe_cmov (& m_alt , & m , !degenerate );
712
+ secp256k1_fe_cmov (& rr_alt , & rr , !degenerate ); /* rr_alt (8) */
713
+ secp256k1_fe_cmov (& m_alt , & m , !degenerate ); /* m_alt (5) */
714
714
/* Now Ralt / Malt = lambda and is guaranteed not to be Ralt / 0.
715
715
* From here on out Ralt and Malt represent the numerator
716
716
* and denominator of lambda; R and M represent the explicit
717
717
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
718
718
secp256k1_fe_sqr (& n , & m_alt ); /* n = Malt^2 (1) */
719
- secp256k1_fe_negate (& q , & t , 2 ); /* q = -T (3 ) */
719
+ secp256k1_fe_negate (& q , & t , 7 ); /* q = -T (8 ) */
720
720
secp256k1_fe_mul (& q , & q , & n ); /* q = Q = -T*Malt^2 (1) */
721
721
/* These two lines use the observation that either M == Malt or M == 0,
722
722
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or
723
723
* zero (which is "computed" by cmov). So the cost is one squaring
724
724
* versus two multiplications. */
725
- secp256k1_fe_sqr (& n , & n );
726
- secp256k1_fe_cmov (& n , & m , degenerate ); /* n = M^3 * Malt (2 ) */
725
+ secp256k1_fe_sqr (& n , & n ); /* n = Malt^4 (1) */
726
+ secp256k1_fe_cmov (& n , & m , degenerate ); /* n = M^3 * Malt (5 ) */
727
727
secp256k1_fe_sqr (& t , & rr_alt ); /* t = Ralt^2 (1) */
728
728
secp256k1_fe_mul (& r -> z , & a -> z , & m_alt ); /* r->z = Z3 = Malt*Z (1) */
729
729
secp256k1_fe_add (& t , & q ); /* t = Ralt^2 + Q (2) */
730
730
r -> x = t ; /* r->x = X3 = Ralt^2 + Q (2) */
731
731
secp256k1_fe_mul_int (& t , 2 ); /* t = 2*X3 (4) */
732
732
secp256k1_fe_add (& t , & q ); /* t = 2*X3 + Q (5) */
733
733
secp256k1_fe_mul (& t , & t , & rr_alt ); /* t = Ralt*(2*X3 + Q) (1) */
734
- secp256k1_fe_add (& t , & n ); /* t = Ralt*(2*X3 + Q) + M^3*Malt (3 ) */
735
- secp256k1_fe_negate (& r -> y , & t , 3 ); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (4 ) */
736
- secp256k1_fe_half (& r -> y ); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (3 ) */
734
+ secp256k1_fe_add (& t , & n ); /* t = Ralt*(2*X3 + Q) + M^3*Malt (6 ) */
735
+ secp256k1_fe_negate (& r -> y , & t , 6 ); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (7 ) */
736
+ secp256k1_fe_half (& r -> y ); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (4 ) */
737
737
738
738
/* In case a->infinity == 1, replace r with (b->x, b->y, 1). */
739
739
secp256k1_fe_cmov (& r -> x , & b -> x , a -> infinity );
0 commit comments