Hacl_Curve25519.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
  2. Licensed under the Apache 2.0 License. */
  3. /* This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
  4. * KreMLin invocation: /mnt/e/everest/verify/kremlin/krml -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -fbuiltin-uint128 -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -I /mnt/e/everest/verify/hacl-star/code/lib/kremlin -I /mnt/e/everest/verify/kremlin/kremlib/compat -I /mnt/e/everest/verify/hacl-star/specs -I /mnt/e/everest/verify/hacl-star/specs/old -I . -ccopt -march=native -verbose -ldopt -flto -tmpdir x25519-c -I ../bignum -bundle Hacl.Curve25519=* -minimal -add-include "kremlib.h" -skip-compilation x25519-c/out.krml -o x25519-c/Hacl_Curve25519.c
  5. * F* version: 059db0c8
  6. * KreMLin version: 916c37ac
  7. */
  8. #include "Hacl_Curve25519.h"
  9. extern uint64_t FStar_UInt64_eq_mask(uint64_t x0, uint64_t x1);
  10. extern uint64_t FStar_UInt64_gte_mask(uint64_t x0, uint64_t x1);
  11. extern uint128_t FStar_UInt128_add(uint128_t x0, uint128_t x1);
  12. extern uint128_t FStar_UInt128_add_mod(uint128_t x0, uint128_t x1);
  13. extern uint128_t FStar_UInt128_logand(uint128_t x0, uint128_t x1);
  14. extern uint128_t FStar_UInt128_shift_right(uint128_t x0, uint32_t x1);
  15. extern uint128_t FStar_UInt128_uint64_to_uint128(uint64_t x0);
  16. extern uint64_t FStar_UInt128_uint128_to_uint64(uint128_t x0);
  17. extern uint128_t FStar_UInt128_mul_wide(uint64_t x0, uint64_t x1);
  18. static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
  19. {
  20. uint64_t b4 = b[4U];
  21. uint64_t b0 = b[0U];
  22. uint64_t b4_ = b4 & (uint64_t)0x7ffffffffffffU;
  23. uint64_t b0_ = b0 + (uint64_t)19U * (b4 >> (uint32_t)51U);
  24. b[4U] = b4_;
  25. b[0U] = b0_;
  26. }
  27. inline static void Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, uint128_t *input)
  28. {
  29. uint32_t i;
  30. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  31. {
  32. uint128_t xi = input[i];
  33. output[i] = (uint64_t)xi;
  34. }
  35. }
  36. inline static void
  37. Hacl_Bignum_Fproduct_sum_scalar_multiplication_(uint128_t *output, uint64_t *input, uint64_t s)
  38. {
  39. uint32_t i;
  40. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  41. {
  42. uint128_t xi = output[i];
  43. uint64_t yi = input[i];
  44. output[i] = xi + (uint128_t)yi * s;
  45. }
  46. }
  47. inline static void Hacl_Bignum_Fproduct_carry_wide_(uint128_t *tmp)
  48. {
  49. uint32_t i;
  50. for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
  51. {
  52. uint32_t ctr = i;
  53. uint128_t tctr = tmp[ctr];
  54. uint128_t tctrp1 = tmp[ctr + (uint32_t)1U];
  55. uint64_t r0 = (uint64_t)tctr & (uint64_t)0x7ffffffffffffU;
  56. uint128_t c = tctr >> (uint32_t)51U;
  57. tmp[ctr] = (uint128_t)r0;
  58. tmp[ctr + (uint32_t)1U] = tctrp1 + c;
  59. }
  60. }
  61. inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
  62. {
  63. uint64_t tmp = output[4U];
  64. uint64_t b0;
  65. {
  66. uint32_t i;
  67. for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
  68. {
  69. uint32_t ctr = (uint32_t)5U - i - (uint32_t)1U;
  70. uint64_t z = output[ctr - (uint32_t)1U];
  71. output[ctr] = z;
  72. }
  73. }
  74. output[0U] = tmp;
  75. b0 = output[0U];
  76. output[0U] = (uint64_t)19U * b0;
  77. }
  78. static void
  79. Hacl_Bignum_Fmul_mul_shift_reduce_(uint128_t *output, uint64_t *input, uint64_t *input2)
  80. {
  81. uint32_t i;
  82. uint64_t input2i;
  83. {
  84. uint32_t i0;
  85. for (i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U)
  86. {
  87. uint64_t input2i0 = input2[i0];
  88. Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i0);
  89. Hacl_Bignum_Fmul_shift_reduce(input);
  90. }
  91. }
  92. i = (uint32_t)4U;
  93. input2i = input2[i];
  94. Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
  95. }
  96. inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2)
  97. {
  98. uint64_t tmp[5U] = { 0U };
  99. memcpy(tmp, input, (uint32_t)5U * sizeof input[0U]);
  100. KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
  101. {
  102. uint128_t t[5U];
  103. {
  104. uint32_t _i;
  105. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  106. t[_i] = (uint128_t)(uint64_t)0U;
  107. }
  108. {
  109. uint128_t b4;
  110. uint128_t b0;
  111. uint128_t b4_;
  112. uint128_t b0_;
  113. uint64_t i0;
  114. uint64_t i1;
  115. uint64_t i0_;
  116. uint64_t i1_;
  117. Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input2);
  118. Hacl_Bignum_Fproduct_carry_wide_(t);
  119. b4 = t[4U];
  120. b0 = t[0U];
  121. b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU;
  122. b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U);
  123. t[4U] = b4_;
  124. t[0U] = b0_;
  125. Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
  126. i0 = output[0U];
  127. i1 = output[1U];
  128. i0_ = i0 & (uint64_t)0x7ffffffffffffU;
  129. i1_ = i1 + (i0 >> (uint32_t)51U);
  130. output[0U] = i0_;
  131. output[1U] = i1_;
  132. }
  133. }
  134. }
  135. inline static void Hacl_Bignum_Fsquare_fsquare__(uint128_t *tmp, uint64_t *output)
  136. {
  137. uint64_t r0 = output[0U];
  138. uint64_t r1 = output[1U];
  139. uint64_t r2 = output[2U];
  140. uint64_t r3 = output[3U];
  141. uint64_t r4 = output[4U];
  142. uint64_t d0 = r0 * (uint64_t)2U;
  143. uint64_t d1 = r1 * (uint64_t)2U;
  144. uint64_t d2 = r2 * (uint64_t)2U * (uint64_t)19U;
  145. uint64_t d419 = r4 * (uint64_t)19U;
  146. uint64_t d4 = d419 * (uint64_t)2U;
  147. uint128_t s0 = (uint128_t)r0 * r0 + (uint128_t)d4 * r1 + (uint128_t)d2 * r3;
  148. uint128_t s1 = (uint128_t)d0 * r1 + (uint128_t)d4 * r2 + (uint128_t)(r3 * (uint64_t)19U) * r3;
  149. uint128_t s2 = (uint128_t)d0 * r2 + (uint128_t)r1 * r1 + (uint128_t)d4 * r3;
  150. uint128_t s3 = (uint128_t)d0 * r3 + (uint128_t)d1 * r2 + (uint128_t)r4 * d419;
  151. uint128_t s4 = (uint128_t)d0 * r4 + (uint128_t)d1 * r3 + (uint128_t)r2 * r2;
  152. tmp[0U] = s0;
  153. tmp[1U] = s1;
  154. tmp[2U] = s2;
  155. tmp[3U] = s3;
  156. tmp[4U] = s4;
  157. }
  158. inline static void Hacl_Bignum_Fsquare_fsquare_(uint128_t *tmp, uint64_t *output)
  159. {
  160. uint128_t b4;
  161. uint128_t b0;
  162. uint128_t b4_;
  163. uint128_t b0_;
  164. uint64_t i0;
  165. uint64_t i1;
  166. uint64_t i0_;
  167. uint64_t i1_;
  168. Hacl_Bignum_Fsquare_fsquare__(tmp, output);
  169. Hacl_Bignum_Fproduct_carry_wide_(tmp);
  170. b4 = tmp[4U];
  171. b0 = tmp[0U];
  172. b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU;
  173. b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U);
  174. tmp[4U] = b4_;
  175. tmp[0U] = b0_;
  176. Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
  177. i0 = output[0U];
  178. i1 = output[1U];
  179. i0_ = i0 & (uint64_t)0x7ffffffffffffU;
  180. i1_ = i1 + (i0 >> (uint32_t)51U);
  181. output[0U] = i0_;
  182. output[1U] = i1_;
  183. }
  184. static void
  185. Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, uint128_t *tmp, uint32_t count1)
  186. {
  187. uint32_t i;
  188. Hacl_Bignum_Fsquare_fsquare_(tmp, input);
  189. for (i = (uint32_t)1U; i < count1; i = i + (uint32_t)1U)
  190. Hacl_Bignum_Fsquare_fsquare_(tmp, input);
  191. }
  192. inline static void
  193. Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
  194. {
  195. KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
  196. {
  197. uint128_t t[5U];
  198. {
  199. uint32_t _i;
  200. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  201. t[_i] = (uint128_t)(uint64_t)0U;
  202. }
  203. memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
  204. Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
  205. }
  206. }
  207. inline static void Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
  208. {
  209. KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
  210. {
  211. uint128_t t[5U];
  212. {
  213. uint32_t _i;
  214. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  215. t[_i] = (uint128_t)(uint64_t)0U;
  216. }
  217. Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
  218. }
  219. }
  220. inline static void Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
  221. {
  222. uint64_t buf[20U] = { 0U };
  223. uint64_t *a0 = buf;
  224. uint64_t *t00 = buf + (uint32_t)5U;
  225. uint64_t *b0 = buf + (uint32_t)10U;
  226. uint64_t *t01;
  227. uint64_t *b1;
  228. uint64_t *c0;
  229. uint64_t *a;
  230. uint64_t *t0;
  231. uint64_t *b;
  232. uint64_t *c;
  233. Hacl_Bignum_Fsquare_fsquare_times(a0, z, (uint32_t)1U);
  234. Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)2U);
  235. Hacl_Bignum_Fmul_fmul(b0, t00, z);
  236. Hacl_Bignum_Fmul_fmul(a0, b0, a0);
  237. Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)1U);
  238. Hacl_Bignum_Fmul_fmul(b0, t00, b0);
  239. Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5U);
  240. t01 = buf + (uint32_t)5U;
  241. b1 = buf + (uint32_t)10U;
  242. c0 = buf + (uint32_t)15U;
  243. Hacl_Bignum_Fmul_fmul(b1, t01, b1);
  244. Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10U);
  245. Hacl_Bignum_Fmul_fmul(c0, t01, b1);
  246. Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20U);
  247. Hacl_Bignum_Fmul_fmul(t01, t01, c0);
  248. Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10U);
  249. Hacl_Bignum_Fmul_fmul(b1, t01, b1);
  250. Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50U);
  251. a = buf;
  252. t0 = buf + (uint32_t)5U;
  253. b = buf + (uint32_t)10U;
  254. c = buf + (uint32_t)15U;
  255. Hacl_Bignum_Fmul_fmul(c, t0, b);
  256. Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100U);
  257. Hacl_Bignum_Fmul_fmul(t0, t0, c);
  258. Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50U);
  259. Hacl_Bignum_Fmul_fmul(t0, t0, b);
  260. Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5U);
  261. Hacl_Bignum_Fmul_fmul(out, t0, a);
  262. }
  263. inline static void Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
  264. {
  265. uint32_t i;
  266. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  267. {
  268. uint64_t xi = a[i];
  269. uint64_t yi = b[i];
  270. a[i] = xi + yi;
  271. }
  272. }
  273. inline static void Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
  274. {
  275. uint64_t tmp[5U] = { 0U };
  276. uint64_t b0;
  277. uint64_t b1;
  278. uint64_t b2;
  279. uint64_t b3;
  280. uint64_t b4;
  281. memcpy(tmp, b, (uint32_t)5U * sizeof b[0U]);
  282. b0 = tmp[0U];
  283. b1 = tmp[1U];
  284. b2 = tmp[2U];
  285. b3 = tmp[3U];
  286. b4 = tmp[4U];
  287. tmp[0U] = b0 + (uint64_t)0x3fffffffffff68U;
  288. tmp[1U] = b1 + (uint64_t)0x3ffffffffffff8U;
  289. tmp[2U] = b2 + (uint64_t)0x3ffffffffffff8U;
  290. tmp[3U] = b3 + (uint64_t)0x3ffffffffffff8U;
  291. tmp[4U] = b4 + (uint64_t)0x3ffffffffffff8U;
  292. {
  293. uint32_t i;
  294. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  295. {
  296. uint64_t xi = a[i];
  297. uint64_t yi = tmp[i];
  298. a[i] = yi - xi;
  299. }
  300. }
  301. }
  302. inline static void Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
  303. {
  304. KRML_CHECK_SIZE(sizeof (uint128_t), (uint32_t)5U);
  305. {
  306. uint128_t tmp[5U];
  307. {
  308. uint32_t _i;
  309. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  310. tmp[_i] = (uint128_t)(uint64_t)0U;
  311. }
  312. {
  313. uint128_t b4;
  314. uint128_t b0;
  315. uint128_t b4_;
  316. uint128_t b0_;
  317. {
  318. uint32_t i;
  319. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  320. {
  321. uint64_t xi = b[i];
  322. tmp[i] = (uint128_t)xi * s;
  323. }
  324. }
  325. Hacl_Bignum_Fproduct_carry_wide_(tmp);
  326. b4 = tmp[4U];
  327. b0 = tmp[0U];
  328. b4_ = b4 & (uint128_t)(uint64_t)0x7ffffffffffffU;
  329. b0_ = b0 + (uint128_t)(uint64_t)19U * (uint64_t)(b4 >> (uint32_t)51U);
  330. tmp[4U] = b4_;
  331. tmp[0U] = b0_;
  332. Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
  333. }
  334. }
  335. }
  336. inline static void Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
  337. {
  338. Hacl_Bignum_Fmul_fmul(output, a, b);
  339. }
  340. inline static void Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
  341. {
  342. Hacl_Bignum_Crecip_crecip(output, input);
  343. }
  344. static void
  345. Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
  346. {
  347. uint32_t i = ctr - (uint32_t)1U;
  348. uint64_t ai = a[i];
  349. uint64_t bi = b[i];
  350. uint64_t x = swap1 & (ai ^ bi);
  351. uint64_t ai1 = ai ^ x;
  352. uint64_t bi1 = bi ^ x;
  353. a[i] = ai1;
  354. b[i] = bi1;
  355. }
  356. static void
  357. Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
  358. {
  359. if (!(ctr == (uint32_t)0U))
  360. {
  361. uint32_t i;
  362. Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
  363. i = ctr - (uint32_t)1U;
  364. Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
  365. }
  366. }
  367. static void Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
  368. {
  369. uint64_t swap1 = (uint64_t)0U - iswap;
  370. Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5U);
  371. Hacl_EC_Point_swap_conditional_(a + (uint32_t)5U, b + (uint32_t)5U, swap1, (uint32_t)5U);
  372. }
  373. static void Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
  374. {
  375. memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
  376. memcpy(output + (uint32_t)5U,
  377. input + (uint32_t)5U,
  378. (uint32_t)5U * sizeof (input + (uint32_t)5U)[0U]);
  379. }
  380. static void Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
  381. {
  382. uint64_t i0 = load64_le(input);
  383. uint8_t *x00 = input + (uint32_t)6U;
  384. uint64_t i1 = load64_le(x00);
  385. uint8_t *x01 = input + (uint32_t)12U;
  386. uint64_t i2 = load64_le(x01);
  387. uint8_t *x02 = input + (uint32_t)19U;
  388. uint64_t i3 = load64_le(x02);
  389. uint8_t *x0 = input + (uint32_t)24U;
  390. uint64_t i4 = load64_le(x0);
  391. uint64_t output0 = i0 & (uint64_t)0x7ffffffffffffU;
  392. uint64_t output1 = i1 >> (uint32_t)3U & (uint64_t)0x7ffffffffffffU;
  393. uint64_t output2 = i2 >> (uint32_t)6U & (uint64_t)0x7ffffffffffffU;
  394. uint64_t output3 = i3 >> (uint32_t)1U & (uint64_t)0x7ffffffffffffU;
  395. uint64_t output4 = i4 >> (uint32_t)12U & (uint64_t)0x7ffffffffffffU;
  396. output[0U] = output0;
  397. output[1U] = output1;
  398. output[2U] = output2;
  399. output[3U] = output3;
  400. output[4U] = output4;
  401. }
  402. static void Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
  403. {
  404. uint64_t t0 = input[0U];
  405. uint64_t t1 = input[1U];
  406. uint64_t t2 = input[2U];
  407. uint64_t t3 = input[3U];
  408. uint64_t t4 = input[4U];
  409. uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
  410. uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
  411. uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
  412. uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
  413. uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
  414. uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
  415. uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
  416. uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
  417. input[0U] = t0_;
  418. input[1U] = t1__;
  419. input[2U] = t2__;
  420. input[3U] = t3__;
  421. input[4U] = t4_;
  422. }
  423. static void Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
  424. {
  425. Hacl_EC_Format_fcontract_first_carry_pass(input);
  426. Hacl_Bignum_Modulo_carry_top(input);
  427. }
  428. static void Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
  429. {
  430. uint64_t t0 = input[0U];
  431. uint64_t t1 = input[1U];
  432. uint64_t t2 = input[2U];
  433. uint64_t t3 = input[3U];
  434. uint64_t t4 = input[4U];
  435. uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
  436. uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
  437. uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
  438. uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
  439. uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
  440. uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
  441. uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
  442. uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
  443. input[0U] = t0_;
  444. input[1U] = t1__;
  445. input[2U] = t2__;
  446. input[3U] = t3__;
  447. input[4U] = t4_;
  448. }
  449. static void Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
  450. {
  451. uint64_t i0;
  452. uint64_t i1;
  453. uint64_t i0_;
  454. uint64_t i1_;
  455. Hacl_EC_Format_fcontract_second_carry_pass(input);
  456. Hacl_Bignum_Modulo_carry_top(input);
  457. i0 = input[0U];
  458. i1 = input[1U];
  459. i0_ = i0 & (uint64_t)0x7ffffffffffffU;
  460. i1_ = i1 + (i0 >> (uint32_t)51U);
  461. input[0U] = i0_;
  462. input[1U] = i1_;
  463. }
  464. static void Hacl_EC_Format_fcontract_trim(uint64_t *input)
  465. {
  466. uint64_t a0 = input[0U];
  467. uint64_t a1 = input[1U];
  468. uint64_t a2 = input[2U];
  469. uint64_t a3 = input[3U];
  470. uint64_t a4 = input[4U];
  471. uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffedU);
  472. uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffffU);
  473. uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffffU);
  474. uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffffU);
  475. uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffffU);
  476. uint64_t mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
  477. uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffedU & mask);
  478. uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffffU & mask);
  479. uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffffU & mask);
  480. uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffffU & mask);
  481. uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffffU & mask);
  482. input[0U] = a0_;
  483. input[1U] = a1_;
  484. input[2U] = a2_;
  485. input[3U] = a3_;
  486. input[4U] = a4_;
  487. }
  488. static void Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
  489. {
  490. uint64_t t0 = input[0U];
  491. uint64_t t1 = input[1U];
  492. uint64_t t2 = input[2U];
  493. uint64_t t3 = input[3U];
  494. uint64_t t4 = input[4U];
  495. uint64_t o0 = t1 << (uint32_t)51U | t0;
  496. uint64_t o1 = t2 << (uint32_t)38U | t1 >> (uint32_t)13U;
  497. uint64_t o2 = t3 << (uint32_t)25U | t2 >> (uint32_t)26U;
  498. uint64_t o3 = t4 << (uint32_t)12U | t3 >> (uint32_t)39U;
  499. uint8_t *b0 = output;
  500. uint8_t *b1 = output + (uint32_t)8U;
  501. uint8_t *b2 = output + (uint32_t)16U;
  502. uint8_t *b3 = output + (uint32_t)24U;
  503. store64_le(b0, o0);
  504. store64_le(b1, o1);
  505. store64_le(b2, o2);
  506. store64_le(b3, o3);
  507. }
  508. static void Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
  509. {
  510. Hacl_EC_Format_fcontract_first_carry_full(input);
  511. Hacl_EC_Format_fcontract_second_carry_full(input);
  512. Hacl_EC_Format_fcontract_trim(input);
  513. Hacl_EC_Format_fcontract_store(output, input);
  514. }
  515. static void Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
  516. {
  517. uint64_t *x = point;
  518. uint64_t *z = point + (uint32_t)5U;
  519. uint64_t buf[10U] = { 0U };
  520. uint64_t *zmone = buf;
  521. uint64_t *sc = buf + (uint32_t)5U;
  522. Hacl_Bignum_crecip(zmone, z);
  523. Hacl_Bignum_fmul(sc, x, zmone);
  524. Hacl_EC_Format_fcontract(scalar, sc);
  525. }
  526. static void
  527. Hacl_EC_AddAndDouble_fmonty(
  528. uint64_t *pp,
  529. uint64_t *ppq,
  530. uint64_t *p,
  531. uint64_t *pq,
  532. uint64_t *qmqp
  533. )
  534. {
  535. uint64_t *qx = qmqp;
  536. uint64_t *x2 = pp;
  537. uint64_t *z2 = pp + (uint32_t)5U;
  538. uint64_t *x3 = ppq;
  539. uint64_t *z3 = ppq + (uint32_t)5U;
  540. uint64_t *x = p;
  541. uint64_t *z = p + (uint32_t)5U;
  542. uint64_t *xprime = pq;
  543. uint64_t *zprime = pq + (uint32_t)5U;
  544. uint64_t buf[40U] = { 0U };
  545. uint64_t *origx = buf;
  546. uint64_t *origxprime0 = buf + (uint32_t)5U;
  547. uint64_t *xxprime0 = buf + (uint32_t)25U;
  548. uint64_t *zzprime0 = buf + (uint32_t)30U;
  549. uint64_t *origxprime;
  550. uint64_t *xx0;
  551. uint64_t *zz0;
  552. uint64_t *xxprime;
  553. uint64_t *zzprime;
  554. uint64_t *zzzprime;
  555. uint64_t *zzz;
  556. uint64_t *xx;
  557. uint64_t *zz;
  558. uint64_t scalar;
  559. memcpy(origx, x, (uint32_t)5U * sizeof x[0U]);
  560. Hacl_Bignum_fsum(x, z);
  561. Hacl_Bignum_fdifference(z, origx);
  562. memcpy(origxprime0, xprime, (uint32_t)5U * sizeof xprime[0U]);
  563. Hacl_Bignum_fsum(xprime, zprime);
  564. Hacl_Bignum_fdifference(zprime, origxprime0);
  565. Hacl_Bignum_fmul(xxprime0, xprime, z);
  566. Hacl_Bignum_fmul(zzprime0, x, zprime);
  567. origxprime = buf + (uint32_t)5U;
  568. xx0 = buf + (uint32_t)15U;
  569. zz0 = buf + (uint32_t)20U;
  570. xxprime = buf + (uint32_t)25U;
  571. zzprime = buf + (uint32_t)30U;
  572. zzzprime = buf + (uint32_t)35U;
  573. memcpy(origxprime, xxprime, (uint32_t)5U * sizeof xxprime[0U]);
  574. Hacl_Bignum_fsum(xxprime, zzprime);
  575. Hacl_Bignum_fdifference(zzprime, origxprime);
  576. Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1U);
  577. Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1U);
  578. Hacl_Bignum_fmul(z3, zzzprime, qx);
  579. Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1U);
  580. Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1U);
  581. zzz = buf + (uint32_t)10U;
  582. xx = buf + (uint32_t)15U;
  583. zz = buf + (uint32_t)20U;
  584. Hacl_Bignum_fmul(x2, xx, zz);
  585. Hacl_Bignum_fdifference(zz, xx);
  586. scalar = (uint64_t)121665U;
  587. Hacl_Bignum_fscalar(zzz, zz, scalar);
  588. Hacl_Bignum_fsum(zzz, xx);
  589. Hacl_Bignum_fmul(z2, zzz, zz);
  590. }
  591. static void
  592. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
  593. uint64_t *nq,
  594. uint64_t *nqpq,
  595. uint64_t *nq2,
  596. uint64_t *nqpq2,
  597. uint64_t *q,
  598. uint8_t byt
  599. )
  600. {
  601. uint64_t bit0 = (uint64_t)(byt >> (uint32_t)7U);
  602. uint64_t bit;
  603. Hacl_EC_Point_swap_conditional(nq, nqpq, bit0);
  604. Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
  605. bit = (uint64_t)(byt >> (uint32_t)7U);
  606. Hacl_EC_Point_swap_conditional(nq2, nqpq2, bit);
  607. }
  608. static void
  609. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
  610. uint64_t *nq,
  611. uint64_t *nqpq,
  612. uint64_t *nq2,
  613. uint64_t *nqpq2,
  614. uint64_t *q,
  615. uint8_t byt
  616. )
  617. {
  618. uint8_t byt1;
  619. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
  620. byt1 = byt << (uint32_t)1U;
  621. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
  622. }
  623. static void
  624. Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
  625. uint64_t *nq,
  626. uint64_t *nqpq,
  627. uint64_t *nq2,
  628. uint64_t *nqpq2,
  629. uint64_t *q,
  630. uint8_t byt,
  631. uint32_t i
  632. )
  633. {
  634. if (!(i == (uint32_t)0U))
  635. {
  636. uint32_t i_ = i - (uint32_t)1U;
  637. uint8_t byt_;
  638. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
  639. byt_ = byt << (uint32_t)2U;
  640. Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
  641. }
  642. }
  643. static void
  644. Hacl_EC_Ladder_BigLoop_cmult_big_loop(
  645. uint8_t *n1,
  646. uint64_t *nq,
  647. uint64_t *nqpq,
  648. uint64_t *nq2,
  649. uint64_t *nqpq2,
  650. uint64_t *q,
  651. uint32_t i
  652. )
  653. {
  654. if (!(i == (uint32_t)0U))
  655. {
  656. uint32_t i1 = i - (uint32_t)1U;
  657. uint8_t byte = n1[i1];
  658. Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4U);
  659. Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
  660. }
  661. }
  662. static void Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
  663. {
  664. uint64_t point_buf[40U] = { 0U };
  665. uint64_t *nq = point_buf;
  666. uint64_t *nqpq = point_buf + (uint32_t)10U;
  667. uint64_t *nq2 = point_buf + (uint32_t)20U;
  668. uint64_t *nqpq2 = point_buf + (uint32_t)30U;
  669. Hacl_EC_Point_copy(nqpq, q);
  670. nq[0U] = (uint64_t)1U;
  671. Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32U);
  672. Hacl_EC_Point_copy(result, nq);
  673. }
  674. void Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
  675. {
  676. uint64_t buf0[10U] = { 0U };
  677. uint64_t *x0 = buf0;
  678. uint64_t *z = buf0 + (uint32_t)5U;
  679. uint64_t *q;
  680. Hacl_EC_Format_fexpand(x0, basepoint);
  681. z[0U] = (uint64_t)1U;
  682. q = buf0;
  683. {
  684. uint8_t e[32U] = { 0U };
  685. uint8_t e0;
  686. uint8_t e31;
  687. uint8_t e01;
  688. uint8_t e311;
  689. uint8_t e312;
  690. uint8_t *scalar;
  691. memcpy(e, secret, (uint32_t)32U * sizeof secret[0U]);
  692. e0 = e[0U];
  693. e31 = e[31U];
  694. e01 = e0 & (uint8_t)248U;
  695. e311 = e31 & (uint8_t)127U;
  696. e312 = e311 | (uint8_t)64U;
  697. e[0U] = e01;
  698. e[31U] = e312;
  699. scalar = e;
  700. {
  701. uint64_t buf[15U] = { 0U };
  702. uint64_t *nq = buf;
  703. uint64_t *x = nq;
  704. x[0U] = (uint64_t)1U;
  705. Hacl_EC_Ladder_cmult(nq, scalar, q);
  706. Hacl_EC_Format_scalar_of_point(mypublic, nq);
  707. }
  708. }
  709. }