|
Lines 4-9
Link Here
|
| 4 |
|
4 |
|
| 5 |
Copyright (C) 2001, 2015 Niels Möller |
5 |
Copyright (C) 2001, 2015 Niels Möller |
| 6 |
Copyright (C) 2012 Nikos Mavrogiannopoulos |
6 |
Copyright (C) 2012 Nikos Mavrogiannopoulos |
|
|
7 |
Copyright (C) 2018 Red Hat Inc. |
| 7 |
|
8 |
|
| 8 |
This file is part of GNU Nettle. |
9 |
This file is part of GNU Nettle. |
| 9 |
|
10 |
|
|
Lines 36-43
Link Here
|
| 36 |
# include "config.h" |
37 |
# include "config.h" |
| 37 |
#endif |
38 |
#endif |
| 38 |
|
39 |
|
|
|
40 |
#include <assert.h> |
| 41 |
|
| 42 |
#include "gmp-glue.h" |
| 39 |
#include "rsa.h" |
43 |
#include "rsa.h" |
|
|
44 |
#include "rsa-internal.h" |
| 45 |
|
| 46 |
#define MAX(a, b) ((a) > (b) ? (a) : (b)) |
| 40 |
|
47 |
|
|
|
48 |
#if NETTLE_USE_MINI_GMP |
| 41 |
/* Blinds m, by computing c = m r^e (mod n), for a random r. Also |
49 |
/* Blinds m, by computing c = m r^e (mod n), for a random r. Also |
| 42 |
returns the inverse (ri), for use by rsa_unblind. */ |
50 |
returns the inverse (ri), for use by rsa_unblind. */ |
| 43 |
static void |
51 |
static void |
|
Lines 118-120
Link Here
|
| 118 |
|
126 |
|
| 119 |
return res; |
127 |
return res; |
| 120 |
} |
128 |
} |
|
|
129 |
|
| 130 |
int |
| 131 |
_rsa_sec_compute_root_tr(const struct rsa_public_key *pub, |
| 132 |
const struct rsa_private_key *key, |
| 133 |
void *random_ctx, nettle_random_func *random, |
| 134 |
mp_limb_t *x, const mp_limb_t *m, size_t mn) |
| 135 |
{ |
| 136 |
mpz_t mz; |
| 137 |
mpz_t xz; |
| 138 |
int res; |
| 139 |
|
| 140 |
mpz_init(mz); |
| 141 |
mpz_init(xz); |
| 142 |
|
| 143 |
mpn_copyi(mpz_limbs_write(mz, mn), m, mn); |
| 144 |
mpz_limbs_finish(mz, mn); |
| 145 |
|
| 146 |
res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, mz); |
| 147 |
|
| 148 |
if (res) |
| 149 |
mpz_limbs_copy(x, xz, mpz_size(pub->n)); |
| 150 |
|
| 151 |
mpz_clear(mz); |
| 152 |
mpz_clear(xz); |
| 153 |
return res; |
| 154 |
} |
| 155 |
#else |
| 156 |
/* Blinds m, by computing c = m r^e (mod n), for a random r. Also |
| 157 |
returns the inverse (ri), for use by rsa_unblind. */ |
| 158 |
static void |
| 159 |
rsa_sec_blind (const struct rsa_public_key *pub, |
| 160 |
void *random_ctx, nettle_random_func *random, |
| 161 |
mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m, |
| 162 |
mp_size_t mn) |
| 163 |
{ |
| 164 |
const mp_limb_t *ep = mpz_limbs_read (pub->e); |
| 165 |
const mp_limb_t *np = mpz_limbs_read (pub->n); |
| 166 |
mp_bitcnt_t ebn = mpz_sizeinbase (pub->e, 2); |
| 167 |
mp_size_t nn = mpz_size (pub->n); |
| 168 |
size_t itch; |
| 169 |
size_t i2; |
| 170 |
mp_limb_t *scratch; |
| 171 |
TMP_GMP_DECL (tp, mp_limb_t); |
| 172 |
TMP_GMP_DECL (rp, mp_limb_t); |
| 173 |
TMP_GMP_DECL (r, uint8_t); |
| 174 |
|
| 175 |
TMP_GMP_ALLOC (rp, nn); |
| 176 |
TMP_GMP_ALLOC (r, nn * sizeof(mp_limb_t)); |
| 177 |
|
| 178 |
/* c = m*(r^e) mod n */ |
| 179 |
itch = mpn_sec_powm_itch(nn, ebn, nn); |
| 180 |
i2 = mpn_sec_mul_itch(nn, mn); |
| 181 |
itch = MAX(itch, i2); |
| 182 |
i2 = mpn_sec_div_r_itch(nn + mn, nn); |
| 183 |
itch = MAX(itch, i2); |
| 184 |
i2 = mpn_sec_invert_itch(nn); |
| 185 |
itch = MAX(itch, i2); |
| 186 |
|
| 187 |
TMP_GMP_ALLOC (tp, nn + mn + itch); |
| 188 |
scratch = tp + nn + mn; |
| 189 |
|
| 190 |
/* ri = r^(-1) */ |
| 191 |
do |
| 192 |
{ |
| 193 |
random(random_ctx, nn * sizeof(mp_limb_t), (uint8_t *)r); |
| 194 |
mpn_set_base256(rp, nn, r, nn * sizeof(mp_limb_t)); |
| 195 |
mpn_copyi(tp, rp, nn); |
| 196 |
/* invert r */ |
| 197 |
} |
| 198 |
while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch)); |
| 199 |
|
| 200 |
mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch); |
| 201 |
/* normally mn == nn, but m can be smaller in some cases */ |
| 202 |
mpn_sec_mul (tp, c, nn, m, mn, scratch); |
| 203 |
mpn_sec_div_r (tp, nn + mn, np, nn, scratch); |
| 204 |
mpn_copyi(c, tp, nn); |
| 205 |
|
| 206 |
TMP_GMP_FREE (r); |
| 207 |
TMP_GMP_FREE (rp); |
| 208 |
TMP_GMP_FREE (tp); |
| 209 |
} |
| 210 |
|
| 211 |
/* m = c ri mod n */ |
| 212 |
static void |
| 213 |
rsa_sec_unblind (const struct rsa_public_key *pub, |
| 214 |
mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c) |
| 215 |
{ |
| 216 |
const mp_limb_t *np = mpz_limbs_read (pub->n); |
| 217 |
mp_size_t nn = mpz_size (pub->n); |
| 218 |
|
| 219 |
size_t itch; |
| 220 |
size_t i2; |
| 221 |
mp_limb_t *scratch; |
| 222 |
TMP_GMP_DECL(tp, mp_limb_t); |
| 223 |
|
| 224 |
itch = mpn_sec_mul_itch(nn, nn); |
| 225 |
i2 = mpn_sec_div_r_itch(nn + nn, nn); |
| 226 |
itch = MAX(itch, i2); |
| 227 |
|
| 228 |
TMP_GMP_ALLOC (tp, nn + nn + itch); |
| 229 |
scratch = tp + nn + nn; |
| 230 |
|
| 231 |
mpn_sec_mul (tp, c, nn, ri, nn, scratch); |
| 232 |
mpn_sec_div_r (tp, nn + nn, np, nn, scratch); |
| 233 |
mpn_copyi(x, tp, nn); |
| 234 |
|
| 235 |
TMP_GMP_FREE (tp); |
| 236 |
} |
| 237 |
|
| 238 |
static int |
| 239 |
sec_equal(const mp_limb_t *a, const mp_limb_t *b, size_t limbs) |
| 240 |
{ |
| 241 |
volatile mp_limb_t z = 0; |
| 242 |
|
| 243 |
for (size_t i = 0; i < limbs; i++) |
| 244 |
{ |
| 245 |
z |= (a[i] ^ b[i]); |
| 246 |
} |
| 247 |
|
| 248 |
/* FIXME: Might compile to a branch instruction on some platforms. */ |
| 249 |
return z == 0; |
| 250 |
} |
| 251 |
|
| 252 |
static int |
| 253 |
rsa_sec_check_root(const struct rsa_public_key *pub, |
| 254 |
const mp_limb_t *x, const mp_limb_t *m) |
| 255 |
{ |
| 256 |
mp_size_t nn = mpz_size (pub->n); |
| 257 |
mp_size_t ebn = mpz_sizeinbase (pub->e, 2); |
| 258 |
const mp_limb_t *np = mpz_limbs_read (pub->n); |
| 259 |
const mp_limb_t *ep = mpz_limbs_read (pub->e); |
| 260 |
int ret; |
| 261 |
|
| 262 |
mp_size_t itch; |
| 263 |
|
| 264 |
mp_limb_t *scratch; |
| 265 |
TMP_GMP_DECL(tp, mp_limb_t); |
| 266 |
|
| 267 |
itch = mpn_sec_powm_itch (nn, ebn, nn); |
| 268 |
TMP_GMP_ALLOC (tp, nn + itch); |
| 269 |
scratch = tp + nn; |
| 270 |
|
| 271 |
mpn_sec_powm(tp, x, nn, ep, ebn, np, nn, scratch); |
| 272 |
ret = sec_equal(tp, m, nn); |
| 273 |
|
| 274 |
TMP_GMP_FREE (tp); |
| 275 |
return ret; |
| 276 |
} |
| 277 |
|
| 278 |
static void |
| 279 |
cnd_mpn_zero (int cnd, volatile mp_ptr rp, mp_size_t n) |
| 280 |
{ |
| 281 |
volatile mp_limb_t c; |
| 282 |
volatile mp_limb_t mask = (mp_limb_t) cnd - 1; |
| 283 |
|
| 284 |
while (--n >= 0) |
| 285 |
{ |
| 286 |
c = rp[n]; |
| 287 |
c &= mask; |
| 288 |
rp[n] = c; |
| 289 |
} |
| 290 |
} |
| 291 |
|
| 292 |
/* Checks for any errors done in the RSA computation. That avoids |
| 293 |
* attacks which rely on faults on hardware, or even software MPI |
| 294 |
* implementation. |
| 295 |
* This version is side-channel silent even in case of error, |
| 296 |
* the destination buffer is always overwritten */ |
| 297 |
int |
| 298 |
_rsa_sec_compute_root_tr(const struct rsa_public_key *pub, |
| 299 |
const struct rsa_private_key *key, |
| 300 |
void *random_ctx, nettle_random_func *random, |
| 301 |
mp_limb_t *x, const mp_limb_t *m, size_t mn) |
| 302 |
{ |
| 303 |
TMP_GMP_DECL (c, mp_limb_t); |
| 304 |
TMP_GMP_DECL (ri, mp_limb_t); |
| 305 |
TMP_GMP_DECL (scratch, mp_limb_t); |
| 306 |
size_t key_limb_size; |
| 307 |
int ret; |
| 308 |
|
| 309 |
key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); |
| 310 |
|
| 311 |
/* mpz_powm_sec handles only odd moduli. If p, q or n is even, the |
| 312 |
key is invalid and rejected by rsa_private_key_prepare. However, |
| 313 |
some applications, notably gnutls, don't use this function, and |
| 314 |
we don't want an invalid key to lead to a crash down inside |
| 315 |
mpz_powm_sec. So do an additional check here. */ |
| 316 |
if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q)) |
| 317 |
{ |
| 318 |
mpn_zero(x, key_limb_size); |
| 319 |
return 0; |
| 320 |
} |
| 321 |
|
| 322 |
assert(mpz_size(pub->n) == key_limb_size); |
| 323 |
assert(mn <= key_limb_size); |
| 324 |
|
| 325 |
TMP_GMP_ALLOC (c, key_limb_size); |
| 326 |
TMP_GMP_ALLOC (ri, key_limb_size); |
| 327 |
TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key)); |
| 328 |
|
| 329 |
rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn); |
| 330 |
|
| 331 |
_rsa_sec_compute_root(key, c, x, scratch); |
| 332 |
|
| 333 |
ret = rsa_sec_check_root(pub, c, x); |
| 334 |
|
| 335 |
rsa_sec_unblind(pub, x, ri, c); |
| 336 |
|
| 337 |
cnd_mpn_zero(1 - ret, x, key_limb_size); |
| 338 |
|
| 339 |
TMP_GMP_FREE (scratch); |
| 340 |
TMP_GMP_FREE (ri); |
| 341 |
TMP_GMP_FREE (c); |
| 342 |
return ret; |
| 343 |
} |
| 344 |
|
| 345 |
/* Checks for any errors done in the RSA computation. That avoids |
| 346 |
* attacks which rely on faults on hardware, or even software MPI |
| 347 |
* implementation. |
| 348 |
* This version is maintained for API compatibility reasons. It |
| 349 |
* is not completely side-channel silent. There are conditionals |
| 350 |
* in buffer copying both in case of success or error. |
| 351 |
*/ |
| 352 |
int |
| 353 |
rsa_compute_root_tr(const struct rsa_public_key *pub, |
| 354 |
const struct rsa_private_key *key, |
| 355 |
void *random_ctx, nettle_random_func *random, |
| 356 |
mpz_t x, const mpz_t m) |
| 357 |
{ |
| 358 |
TMP_GMP_DECL (l, mp_limb_t); |
| 359 |
int res; |
| 360 |
|
| 361 |
mp_size_t l_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); |
| 362 |
TMP_GMP_ALLOC (l, l_size); |
| 363 |
|
| 364 |
res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, |
| 365 |
mpz_limbs_read(m), mpz_size(m)); |
| 366 |
if (res) { |
| 367 |
mp_limb_t *xp = mpz_limbs_write (x, l_size); |
| 368 |
mpn_copyi (xp, l, l_size); |
| 369 |
mpz_limbs_finish (x, l_size); |
| 370 |
} |
| 371 |
|
| 372 |
TMP_GMP_FREE (l); |
| 373 |
return res; |
| 374 |
} |
| 375 |
#endif |