@@ -69,8 +69,9 @@ SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scal
6969}
7070
7171SECP256K1_INLINE static int secp256k1_scalar_reduce (secp256k1_scalar_t * r , unsigned int overflow ) {
72+ uint128_t t ;
7273 VERIFY_CHECK (overflow <= 1 );
73- uint128_t t = (uint128_t )r -> d [0 ] + overflow * SECP256K1_N_C_0 ;
74+ t = (uint128_t )r -> d [0 ] + overflow * SECP256K1_N_C_0 ;
7475 r -> d [0 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
7576 t += (uint128_t )r -> d [1 ] + overflow * SECP256K1_N_C_1 ;
7677 r -> d [1 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
@@ -82,6 +83,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsig
8283}
8384
8485static int secp256k1_scalar_add (secp256k1_scalar_t * r , const secp256k1_scalar_t * a , const secp256k1_scalar_t * b ) {
86+ int overflow ;
8587 uint128_t t = (uint128_t )a -> d [0 ] + b -> d [0 ];
8688 r -> d [0 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
8789 t += (uint128_t )a -> d [1 ] + b -> d [1 ];
@@ -90,15 +92,16 @@ static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t
9092 r -> d [2 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
9193 t += (uint128_t )a -> d [3 ] + b -> d [3 ];
9294 r -> d [3 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
93- int overflow = t + secp256k1_scalar_check_overflow (r );
95+ overflow = t + secp256k1_scalar_check_overflow (r );
9496 VERIFY_CHECK (overflow == 0 || overflow == 1 );
9597 secp256k1_scalar_reduce (r , overflow );
9698 return overflow ;
9799}
98100
99101static void secp256k1_scalar_add_bit (secp256k1_scalar_t * r , unsigned int bit ) {
102+ uint128_t t ;
100103 VERIFY_CHECK (bit < 256 );
101- uint128_t t = (uint128_t )r -> d [0 ] + (((uint64_t )((bit >> 6 ) == 0 )) << (bit & 0x3F ));
104+ t = (uint128_t )r -> d [0 ] + (((uint64_t )((bit >> 6 ) == 0 )) << (bit & 0x3F ));
102105 r -> d [0 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
103106 t += (uint128_t )r -> d [1 ] + (((uint64_t )((bit >> 6 ) == 1 )) << (bit & 0x3F ));
104107 r -> d [1 ] = t & 0xFFFFFFFFFFFFFFFFULL ; t >>= 64 ;
@@ -113,11 +116,12 @@ static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) {
113116}
114117
115118static void secp256k1_scalar_set_b32 (secp256k1_scalar_t * r , const unsigned char * b32 , int * overflow ) {
119+ int over ;
116120 r -> d [0 ] = (uint64_t )b32 [31 ] | (uint64_t )b32 [30 ] << 8 | (uint64_t )b32 [29 ] << 16 | (uint64_t )b32 [28 ] << 24 | (uint64_t )b32 [27 ] << 32 | (uint64_t )b32 [26 ] << 40 | (uint64_t )b32 [25 ] << 48 | (uint64_t )b32 [24 ] << 56 ;
117121 r -> d [1 ] = (uint64_t )b32 [23 ] | (uint64_t )b32 [22 ] << 8 | (uint64_t )b32 [21 ] << 16 | (uint64_t )b32 [20 ] << 24 | (uint64_t )b32 [19 ] << 32 | (uint64_t )b32 [18 ] << 40 | (uint64_t )b32 [17 ] << 48 | (uint64_t )b32 [16 ] << 56 ;
118122 r -> d [2 ] = (uint64_t )b32 [15 ] | (uint64_t )b32 [14 ] << 8 | (uint64_t )b32 [13 ] << 16 | (uint64_t )b32 [12 ] << 24 | (uint64_t )b32 [11 ] << 32 | (uint64_t )b32 [10 ] << 40 | (uint64_t )b32 [9 ] << 48 | (uint64_t )b32 [8 ] << 56 ;
119123 r -> d [3 ] = (uint64_t )b32 [7 ] | (uint64_t )b32 [6 ] << 8 | (uint64_t )b32 [5 ] << 16 | (uint64_t )b32 [4 ] << 24 | (uint64_t )b32 [3 ] << 32 | (uint64_t )b32 [2 ] << 40 | (uint64_t )b32 [1 ] << 48 | (uint64_t )b32 [0 ] << 56 ;
120- int over = secp256k1_scalar_reduce (r , secp256k1_scalar_check_overflow (r ));
124+ over = secp256k1_scalar_reduce (r , secp256k1_scalar_check_overflow (r ));
121125 if (overflow ) {
122126 * overflow = over ;
123127 }
@@ -195,16 +199,16 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) {
195199
196200/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
197201#define muladd2 (a ,b ) { \
198- uint64_t tl, th; \
202+ uint64_t tl, th, th2, tl2 ; \
199203 { \
200204 uint128_t t = (uint128_t)a * b; \
201205 th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
202206 tl = t ; \
203207 } \
204- uint64_t th2 = th + th ; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
208+ th2 = th + th ; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
205209 c2 += (th2 < th ) ? 1 : 0 ; /* never overflows by contract (verified the next line) */ \
206210 VERIFY_CHECK ((th2 >= th ) || (c2 != 0 )); \
207- uint64_t tl2 = tl + tl ; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
211+ tl2 = tl + tl ; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
208212 th2 += (tl2 < tl ) ? 1 : 0 ; /* at most 0xFFFFFFFFFFFFFFFF */ \
209213 c0 += tl2 ; /* overflow is handled on the next line */ \
210214 th2 += (c0 < tl2 ) ? 1 : 0 ; /* second overflow is handled on the next line */ \
@@ -217,8 +221,9 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) {
217221
218222/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
219223#define sumadd (a ) { \
224+ unsigned int over; \
220225 c0 += (a); /* overflow is handled on the next line */ \
221- unsigned int over = (c0 < (a )) ? 1 : 0 ; \
226+ over = (c0 < (a )) ? 1 : 0 ; \
222227 c1 += over ; /* overflow is handled on the next line */ \
223228 c2 += (c1 < over ) ? 1 : 0 ; /* never overflows by contract */ \
224229}
@@ -248,7 +253,12 @@ static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) {
248253}
249254
250255static void secp256k1_scalar_reduce_512 (secp256k1_scalar_t * r , const uint64_t * l ) {
256+ uint128_t c ;
251257 uint64_t n0 = l [4 ], n1 = l [5 ], n2 = l [6 ], n3 = l [7 ];
258+ uint64_t m0 , m1 , m2 , m3 , m4 , m5 ;
259+ uint32_t m6 ;
260+ uint64_t p0 , p1 , p2 , p3 ;
261+ uint32_t p4 ;
252262
253263 /* 160 bit accumulator. */
254264 uint64_t c0 , c1 ;
@@ -258,53 +268,53 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l
258268 /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
259269 c0 = l [0 ]; c1 = 0 ; c2 = 0 ;
260270 muladd_fast (n0 , SECP256K1_N_C_0 );
261- uint64_t m0 ; extract_fast (m0 );
271+ extract_fast (m0 );
262272 sumadd_fast (l [1 ]);
263273 muladd (n1 , SECP256K1_N_C_0 );
264274 muladd (n0 , SECP256K1_N_C_1 );
265- uint64_t m1 ; extract (m1 );
275+ extract (m1 );
266276 sumadd (l [2 ]);
267277 muladd (n2 , SECP256K1_N_C_0 );
268278 muladd (n1 , SECP256K1_N_C_1 );
269279 sumadd (n0 );
270- uint64_t m2 ; extract (m2 );
280+ extract (m2 );
271281 sumadd (l [3 ]);
272282 muladd (n3 , SECP256K1_N_C_0 );
273283 muladd (n2 , SECP256K1_N_C_1 );
274284 sumadd (n1 );
275- uint64_t m3 ; extract (m3 );
285+ extract (m3 );
276286 muladd (n3 , SECP256K1_N_C_1 );
277287 sumadd (n2 );
278- uint64_t m4 ; extract (m4 );
288+ extract (m4 );
279289 sumadd_fast (n3 );
280- uint64_t m5 ; extract_fast (m5 );
290+ extract_fast (m5 );
281291 VERIFY_CHECK (c0 <= 1 );
282- uint32_t m6 = c0 ;
292+ m6 = c0 ;
283293
284294 /* Reduce 385 bits into 258. */
285295 /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
286296 c0 = m0 ; c1 = 0 ; c2 = 0 ;
287297 muladd_fast (m4 , SECP256K1_N_C_0 );
288- uint64_t p0 ; extract_fast (p0 );
298+ extract_fast (p0 );
289299 sumadd_fast (m1 );
290300 muladd (m5 , SECP256K1_N_C_0 );
291301 muladd (m4 , SECP256K1_N_C_1 );
292- uint64_t p1 ; extract (p1 );
302+ extract (p1 );
293303 sumadd (m2 );
294304 muladd (m6 , SECP256K1_N_C_0 );
295305 muladd (m5 , SECP256K1_N_C_1 );
296306 sumadd (m4 );
297- uint64_t p2 ; extract (p2 );
307+ extract (p2 );
298308 sumadd_fast (m3 );
299309 muladd_fast (m6 , SECP256K1_N_C_1 );
300310 sumadd_fast (m5 );
301- uint64_t p3 ; extract_fast (p3 );
302- uint32_t p4 = c0 + m6 ;
311+ extract_fast (p3 );
312+ p4 = c0 + m6 ;
303313 VERIFY_CHECK (p4 <= 2 );
304314
305315 /* Reduce 258 bits into 256. */
306316 /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
307- uint128_t c = p0 + (uint128_t )SECP256K1_N_C_0 * p4 ;
317+ c = p0 + (uint128_t )SECP256K1_N_C_0 * p4 ;
308318 r -> d [0 ] = c & 0xFFFFFFFFFFFFFFFFULL ; c >>= 64 ;
309319 c += p1 + (uint128_t )SECP256K1_N_C_1 * p4 ;
310320 r -> d [1 ] = c & 0xFFFFFFFFFFFFFFFFULL ; c >>= 64 ;
@@ -413,12 +423,15 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, con
413423}
414424
415425SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var (secp256k1_scalar_t * r , const secp256k1_scalar_t * a , const secp256k1_scalar_t * b , unsigned int shift ) {
416- VERIFY_CHECK (shift >= 256 );
417426 uint64_t l [8 ];
427+ unsigned int shiftlimbs ;
428+ unsigned int shiftlow ;
429+ unsigned int shifthigh ;
430+ VERIFY_CHECK (shift >= 256 );
418431 secp256k1_scalar_mul_512 (l , a , b );
419- unsigned int shiftlimbs = shift >> 6 ;
420- unsigned int shiftlow = shift & 0x3F ;
421- unsigned int shifthigh = 64 - shiftlow ;
432+ shiftlimbs = shift >> 6 ;
433+ shiftlow = shift & 0x3F ;
434+ shifthigh = 64 - shiftlow ;
422435 r -> d [0 ] = shift < 512 ? (l [0 + shiftlimbs ] >> shiftlow | (shift < 448 && shiftlow ? (l [1 + shiftlimbs ] << shifthigh ) : 0 )) : 0 ;
423436 r -> d [1 ] = shift < 448 ? (l [1 + shiftlimbs ] >> shiftlow | (shift < 384 && shiftlow ? (l [2 + shiftlimbs ] << shifthigh ) : 0 )) : 0 ;
424437 r -> d [2 ] = shift < 384 ? (l [2 + shiftlimbs ] >> shiftlow | (shift < 320 && shiftlow ? (l [3 + shiftlimbs ] << shifthigh ) : 0 )) : 0 ;
0 commit comments