8 #ifndef SECP256K1_ECDSA_IMPL_H 9 #define SECP256K1_ECDSA_IMPL_H 32 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
33 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL
46 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL
49 static int secp256k1_der_read_len(
const unsigned char **sigp,
const unsigned char *sigend) {
52 if (*sigp >= sigend) {
60 if ((b1 & 0x80) == 0) {
70 if (lenleft > sigend - *sigp) {
77 if ((
size_t)lenleft >
sizeof(size_t)) {
84 ret = (ret << 8) | **sigp;
85 if (ret + lenleft > (
size_t)(sigend - *sigp)) {
99 static int secp256k1_der_parse_integer(
secp256k1_scalar *r,
const unsigned char **sig,
const unsigned char *sigend) {
101 unsigned char ra[32] = {0};
104 if (*sig == sigend || **sig != 0x02) {
109 rlen = secp256k1_der_read_len(sig, sigend);
110 if (rlen <= 0 || (*sig) + rlen > sigend) {
114 if (**sig == 0x00 && rlen > 1 && (((*sig)[1]) & 0x80) == 0x00) {
118 if (**sig == 0xFF && rlen > 1 && (((*sig)[1]) & 0x80) == 0x80) {
122 if ((**sig & 0x80) == 0x80) {
126 while (rlen > 0 && **sig == 0) {
135 memcpy(ra + 32 - rlen, *sig, rlen);
136 secp256k1_scalar_set_b32(r, ra, &overflow);
139 secp256k1_scalar_set_int(r, 0);
146 const unsigned char *sigend = sig +
size;
148 if (sig == sigend || *(sig++) != 0x30) {
152 rlen = secp256k1_der_read_len(&sig, sigend);
153 if (rlen < 0 || sig + rlen > sigend) {
157 if (sig + rlen != sigend) {
162 if (!secp256k1_der_parse_integer(rr, &sig, sigend)) {
165 if (!secp256k1_der_parse_integer(rs, &sig, sigend)) {
178 unsigned char r[33] = {0}, s[33] = {0};
179 unsigned char *rp = r, *sp = s;
180 size_t lenR = 33, lenS = 33;
181 secp256k1_scalar_get_b32(&r[1], ar);
182 secp256k1_scalar_get_b32(&s[1], as);
183 while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
184 while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
185 if (*size < 6+lenS+lenR) {
186 *size = 6 + lenS + lenR;
189 *size = 6 + lenS + lenR;
191 sig[1] = 4 + lenS + lenR;
197 memcpy(sig+lenR+6, sp, lenS);
204 #if !defined(EXHAUSTIVE_TEST_ORDER) 210 if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
214 secp256k1_scalar_inverse_var(&sn, sigs);
215 secp256k1_scalar_mul(&u1, &sn, message);
216 secp256k1_scalar_mul(&u2, &sn, sigr);
217 secp256k1_gej_set_ge(&pubkeyj, pubkey);
218 secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1);
219 if (secp256k1_gej_is_infinity(&pr)) {
223 #if defined(EXHAUSTIVE_TEST_ORDER) 227 secp256k1_ge_set_gej(&pr_ge, &pr);
228 secp256k1_fe_normalize(&pr_ge.
x);
230 secp256k1_fe_get_b32(c, &pr_ge.
x);
231 secp256k1_scalar_set_b32(&computed_r, c, NULL);
232 return secp256k1_scalar_eq(sigr, &computed_r);
235 secp256k1_scalar_get_b32(c, sigr);
236 secp256k1_fe_set_b32(&xr, c);
254 if (secp256k1_gej_eq_x_var(&xr, &pr)) {
258 if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
262 secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe);
263 if (secp256k1_gej_eq_x_var(&xr, &pr)) {
278 secp256k1_ecmult_gen(ctx, &rp, nonce);
279 secp256k1_ge_set_gej(&r, &rp);
280 secp256k1_fe_normalize(&r.
x);
281 secp256k1_fe_normalize(&r.
y);
282 secp256k1_fe_get_b32(b, &r.
x);
283 secp256k1_scalar_set_b32(sigr, b, &overflow);
292 *recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.
y) ? 1 : 0);
294 secp256k1_scalar_mul(&n, sigr, seckey);
295 secp256k1_scalar_add(&n, &n, message);
296 secp256k1_scalar_inverse(sigs, nonce);
297 secp256k1_scalar_mul(sigs, sigs, &n);
298 secp256k1_scalar_clear(&n);
299 secp256k1_gej_clear(&rp);
300 secp256k1_ge_clear(&r);
301 if (secp256k1_scalar_is_zero(sigs)) {
304 if (secp256k1_scalar_is_high(sigs)) {
305 secp256k1_scalar_negate(sigs, sigs);
#define VERIFY_CHECK(cond)
A group element of the secp256k1 curve, in jacobian coordinates.
#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0)
A group element of the secp256k1 curve, in affine coordinates.
A scalar modulo the group order of the secp256k1 curve.
uint8_t const size_t const size
void * memcpy(void *a, const void *b, size_t c)