Hacl_Curve25519.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /* Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
  2. Licensed under the Apache 2.0 License. */
  3. /* This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
  4. * KreMLin invocation: /mnt/e/everest/verify/kremlin/krml -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -fc89 -fparentheses -fno-shadow -header /mnt/e/everest/verify/hdrcLh -minimal -I /mnt/e/everest/verify/hacl-star/code/lib/kremlin -I /mnt/e/everest/verify/kremlin/kremlib/compat -I /mnt/e/everest/verify/hacl-star/specs -I /mnt/e/everest/verify/hacl-star/specs/old -I . -ccopt -march=native -verbose -ldopt -flto -tmpdir x25519-c -I ../bignum -bundle Hacl.Curve25519=* -minimal -add-include "kremlib.h" -skip-compilation x25519-c/out.krml -o x25519-c/Hacl_Curve25519.c
  5. * F* version: 059db0c8
  6. * KreMLin version: 916c37ac
  7. */
  8. #include "Hacl_Curve25519.h"
  9. extern uint64_t FStar_UInt64_eq_mask(uint64_t x0, uint64_t x1);
  10. extern uint64_t FStar_UInt64_gte_mask(uint64_t x0, uint64_t x1);
  11. extern FStar_UInt128_uint128
  12. FStar_UInt128_add(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
  13. extern FStar_UInt128_uint128
  14. FStar_UInt128_add_mod(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
  15. extern FStar_UInt128_uint128
  16. FStar_UInt128_logand(FStar_UInt128_uint128 x0, FStar_UInt128_uint128 x1);
  17. extern FStar_UInt128_uint128 FStar_UInt128_shift_right(FStar_UInt128_uint128 x0, uint32_t x1);
  18. extern FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t x0);
  19. extern uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 x0);
  20. extern FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x0, uint64_t x1);
  21. static void Hacl_Bignum_Modulo_carry_top(uint64_t *b)
  22. {
  23. uint64_t b4 = b[4U];
  24. uint64_t b0 = b[0U];
  25. uint64_t b4_ = b4 & (uint64_t)0x7ffffffffffffU;
  26. uint64_t b0_ = b0 + (uint64_t)19U * (b4 >> (uint32_t)51U);
  27. b[4U] = b4_;
  28. b[0U] = b0_;
  29. }
  30. inline static void
  31. Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_uint128 *input)
  32. {
  33. uint32_t i;
  34. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  35. {
  36. FStar_UInt128_uint128 xi = input[i];
  37. output[i] = FStar_UInt128_uint128_to_uint64(xi);
  38. }
  39. }
  40. inline static void
  41. Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
  42. FStar_UInt128_uint128 *output,
  43. uint64_t *input,
  44. uint64_t s
  45. )
  46. {
  47. uint32_t i;
  48. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  49. {
  50. FStar_UInt128_uint128 xi = output[i];
  51. uint64_t yi = input[i];
  52. output[i] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
  53. }
  54. }
  55. inline static void Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_uint128 *tmp)
  56. {
  57. uint32_t i;
  58. for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
  59. {
  60. uint32_t ctr = i;
  61. FStar_UInt128_uint128 tctr = tmp[ctr];
  62. FStar_UInt128_uint128 tctrp1 = tmp[ctr + (uint32_t)1U];
  63. uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
  64. FStar_UInt128_uint128 c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
  65. tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
  66. tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
  67. }
  68. }
  69. inline static void Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
  70. {
  71. uint64_t tmp = output[4U];
  72. uint64_t b0;
  73. {
  74. uint32_t i;
  75. for (i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U)
  76. {
  77. uint32_t ctr = (uint32_t)5U - i - (uint32_t)1U;
  78. uint64_t z = output[ctr - (uint32_t)1U];
  79. output[ctr] = z;
  80. }
  81. }
  82. output[0U] = tmp;
  83. b0 = output[0U];
  84. output[0U] = (uint64_t)19U * b0;
  85. }
  86. static void
  87. Hacl_Bignum_Fmul_mul_shift_reduce_(
  88. FStar_UInt128_uint128 *output,
  89. uint64_t *input,
  90. uint64_t *input2
  91. )
  92. {
  93. uint32_t i;
  94. uint64_t input2i;
  95. {
  96. uint32_t i0;
  97. for (i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U)
  98. {
  99. uint64_t input2i0 = input2[i0];
  100. Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i0);
  101. Hacl_Bignum_Fmul_shift_reduce(input);
  102. }
  103. }
  104. i = (uint32_t)4U;
  105. input2i = input2[i];
  106. Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
  107. }
  108. inline static void Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input2)
  109. {
  110. uint64_t tmp[5U] = { 0U };
  111. memcpy(tmp, input, (uint32_t)5U * sizeof input[0U]);
  112. KRML_CHECK_SIZE(sizeof (FStar_UInt128_uint128), (uint32_t)5U);
  113. {
  114. FStar_UInt128_uint128 t[5U];
  115. {
  116. uint32_t _i;
  117. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  118. t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
  119. }
  120. {
  121. FStar_UInt128_uint128 b4;
  122. FStar_UInt128_uint128 b0;
  123. FStar_UInt128_uint128 b4_;
  124. FStar_UInt128_uint128 b0_;
  125. uint64_t i0;
  126. uint64_t i1;
  127. uint64_t i0_;
  128. uint64_t i1_;
  129. Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input2);
  130. Hacl_Bignum_Fproduct_carry_wide_(t);
  131. b4 = t[4U];
  132. b0 = t[0U];
  133. b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
  134. b0_ =
  135. FStar_UInt128_add(b0,
  136. FStar_UInt128_mul_wide((uint64_t)19U,
  137. FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
  138. t[4U] = b4_;
  139. t[0U] = b0_;
  140. Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
  141. i0 = output[0U];
  142. i1 = output[1U];
  143. i0_ = i0 & (uint64_t)0x7ffffffffffffU;
  144. i1_ = i1 + (i0 >> (uint32_t)51U);
  145. output[0U] = i0_;
  146. output[1U] = i1_;
  147. }
  148. }
  149. }
  150. inline static void Hacl_Bignum_Fsquare_fsquare__(FStar_UInt128_uint128 *tmp, uint64_t *output)
  151. {
  152. uint64_t r0 = output[0U];
  153. uint64_t r1 = output[1U];
  154. uint64_t r2 = output[2U];
  155. uint64_t r3 = output[3U];
  156. uint64_t r4 = output[4U];
  157. uint64_t d0 = r0 * (uint64_t)2U;
  158. uint64_t d1 = r1 * (uint64_t)2U;
  159. uint64_t d2 = r2 * (uint64_t)2U * (uint64_t)19U;
  160. uint64_t d419 = r4 * (uint64_t)19U;
  161. uint64_t d4 = d419 * (uint64_t)2U;
  162. FStar_UInt128_uint128
  163. s0 =
  164. FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(r0, r0),
  165. FStar_UInt128_mul_wide(d4, r1)),
  166. FStar_UInt128_mul_wide(d2, r3));
  167. FStar_UInt128_uint128
  168. s1 =
  169. FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r1),
  170. FStar_UInt128_mul_wide(d4, r2)),
  171. FStar_UInt128_mul_wide(r3 * (uint64_t)19U, r3));
  172. FStar_UInt128_uint128
  173. s2 =
  174. FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r2),
  175. FStar_UInt128_mul_wide(r1, r1)),
  176. FStar_UInt128_mul_wide(d4, r3));
  177. FStar_UInt128_uint128
  178. s3 =
  179. FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r3),
  180. FStar_UInt128_mul_wide(d1, r2)),
  181. FStar_UInt128_mul_wide(r4, d419));
  182. FStar_UInt128_uint128
  183. s4 =
  184. FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r4),
  185. FStar_UInt128_mul_wide(d1, r3)),
  186. FStar_UInt128_mul_wide(r2, r2));
  187. tmp[0U] = s0;
  188. tmp[1U] = s1;
  189. tmp[2U] = s2;
  190. tmp[3U] = s3;
  191. tmp[4U] = s4;
  192. }
  193. inline static void Hacl_Bignum_Fsquare_fsquare_(FStar_UInt128_uint128 *tmp, uint64_t *output)
  194. {
  195. FStar_UInt128_uint128 b4;
  196. FStar_UInt128_uint128 b0;
  197. FStar_UInt128_uint128 b4_;
  198. FStar_UInt128_uint128 b0_;
  199. uint64_t i0;
  200. uint64_t i1;
  201. uint64_t i0_;
  202. uint64_t i1_;
  203. Hacl_Bignum_Fsquare_fsquare__(tmp, output);
  204. Hacl_Bignum_Fproduct_carry_wide_(tmp);
  205. b4 = tmp[4U];
  206. b0 = tmp[0U];
  207. b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
  208. b0_ =
  209. FStar_UInt128_add(b0,
  210. FStar_UInt128_mul_wide((uint64_t)19U,
  211. FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
  212. tmp[4U] = b4_;
  213. tmp[0U] = b0_;
  214. Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
  215. i0 = output[0U];
  216. i1 = output[1U];
  217. i0_ = i0 & (uint64_t)0x7ffffffffffffU;
  218. i1_ = i1 + (i0 >> (uint32_t)51U);
  219. output[0U] = i0_;
  220. output[1U] = i1_;
  221. }
  222. static void
  223. Hacl_Bignum_Fsquare_fsquare_times_(
  224. uint64_t *input,
  225. FStar_UInt128_uint128 *tmp,
  226. uint32_t count1
  227. )
  228. {
  229. uint32_t i;
  230. Hacl_Bignum_Fsquare_fsquare_(tmp, input);
  231. for (i = (uint32_t)1U; i < count1; i = i + (uint32_t)1U)
  232. Hacl_Bignum_Fsquare_fsquare_(tmp, input);
  233. }
  234. inline static void
  235. Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
  236. {
  237. KRML_CHECK_SIZE(sizeof (FStar_UInt128_uint128), (uint32_t)5U);
  238. {
  239. FStar_UInt128_uint128 t[5U];
  240. {
  241. uint32_t _i;
  242. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  243. t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
  244. }
  245. memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
  246. Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
  247. }
  248. }
  249. inline static void Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
  250. {
  251. KRML_CHECK_SIZE(sizeof (FStar_UInt128_uint128), (uint32_t)5U);
  252. {
  253. FStar_UInt128_uint128 t[5U];
  254. {
  255. uint32_t _i;
  256. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  257. t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
  258. }
  259. Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
  260. }
  261. }
  262. inline static void Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
  263. {
  264. uint64_t buf[20U] = { 0U };
  265. uint64_t *a0 = buf;
  266. uint64_t *t00 = buf + (uint32_t)5U;
  267. uint64_t *b0 = buf + (uint32_t)10U;
  268. uint64_t *t01;
  269. uint64_t *b1;
  270. uint64_t *c0;
  271. uint64_t *a;
  272. uint64_t *t0;
  273. uint64_t *b;
  274. uint64_t *c;
  275. Hacl_Bignum_Fsquare_fsquare_times(a0, z, (uint32_t)1U);
  276. Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)2U);
  277. Hacl_Bignum_Fmul_fmul(b0, t00, z);
  278. Hacl_Bignum_Fmul_fmul(a0, b0, a0);
  279. Hacl_Bignum_Fsquare_fsquare_times(t00, a0, (uint32_t)1U);
  280. Hacl_Bignum_Fmul_fmul(b0, t00, b0);
  281. Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5U);
  282. t01 = buf + (uint32_t)5U;
  283. b1 = buf + (uint32_t)10U;
  284. c0 = buf + (uint32_t)15U;
  285. Hacl_Bignum_Fmul_fmul(b1, t01, b1);
  286. Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10U);
  287. Hacl_Bignum_Fmul_fmul(c0, t01, b1);
  288. Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20U);
  289. Hacl_Bignum_Fmul_fmul(t01, t01, c0);
  290. Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10U);
  291. Hacl_Bignum_Fmul_fmul(b1, t01, b1);
  292. Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50U);
  293. a = buf;
  294. t0 = buf + (uint32_t)5U;
  295. b = buf + (uint32_t)10U;
  296. c = buf + (uint32_t)15U;
  297. Hacl_Bignum_Fmul_fmul(c, t0, b);
  298. Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100U);
  299. Hacl_Bignum_Fmul_fmul(t0, t0, c);
  300. Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50U);
  301. Hacl_Bignum_Fmul_fmul(t0, t0, b);
  302. Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5U);
  303. Hacl_Bignum_Fmul_fmul(out, t0, a);
  304. }
  305. inline static void Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
  306. {
  307. uint32_t i;
  308. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  309. {
  310. uint64_t xi = a[i];
  311. uint64_t yi = b[i];
  312. a[i] = xi + yi;
  313. }
  314. }
  315. inline static void Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
  316. {
  317. uint64_t tmp[5U] = { 0U };
  318. uint64_t b0;
  319. uint64_t b1;
  320. uint64_t b2;
  321. uint64_t b3;
  322. uint64_t b4;
  323. memcpy(tmp, b, (uint32_t)5U * sizeof b[0U]);
  324. b0 = tmp[0U];
  325. b1 = tmp[1U];
  326. b2 = tmp[2U];
  327. b3 = tmp[3U];
  328. b4 = tmp[4U];
  329. tmp[0U] = b0 + (uint64_t)0x3fffffffffff68U;
  330. tmp[1U] = b1 + (uint64_t)0x3ffffffffffff8U;
  331. tmp[2U] = b2 + (uint64_t)0x3ffffffffffff8U;
  332. tmp[3U] = b3 + (uint64_t)0x3ffffffffffff8U;
  333. tmp[4U] = b4 + (uint64_t)0x3ffffffffffff8U;
  334. {
  335. uint32_t i;
  336. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  337. {
  338. uint64_t xi = a[i];
  339. uint64_t yi = tmp[i];
  340. a[i] = yi - xi;
  341. }
  342. }
  343. }
  344. inline static void Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
  345. {
  346. KRML_CHECK_SIZE(sizeof (FStar_UInt128_uint128), (uint32_t)5U);
  347. {
  348. FStar_UInt128_uint128 tmp[5U];
  349. {
  350. uint32_t _i;
  351. for (_i = 0U; _i < (uint32_t)5U; ++_i)
  352. tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
  353. }
  354. {
  355. FStar_UInt128_uint128 b4;
  356. FStar_UInt128_uint128 b0;
  357. FStar_UInt128_uint128 b4_;
  358. FStar_UInt128_uint128 b0_;
  359. {
  360. uint32_t i;
  361. for (i = (uint32_t)0U; i < (uint32_t)5U; i = i + (uint32_t)1U)
  362. {
  363. uint64_t xi = b[i];
  364. tmp[i] = FStar_UInt128_mul_wide(xi, s);
  365. }
  366. }
  367. Hacl_Bignum_Fproduct_carry_wide_(tmp);
  368. b4 = tmp[4U];
  369. b0 = tmp[0U];
  370. b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
  371. b0_ =
  372. FStar_UInt128_add(b0,
  373. FStar_UInt128_mul_wide((uint64_t)19U,
  374. FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
  375. tmp[4U] = b4_;
  376. tmp[0U] = b0_;
  377. Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
  378. }
  379. }
  380. }
  381. inline static void Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
  382. {
  383. Hacl_Bignum_Fmul_fmul(output, a, b);
  384. }
  385. inline static void Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
  386. {
  387. Hacl_Bignum_Crecip_crecip(output, input);
  388. }
  389. static void
  390. Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
  391. {
  392. uint32_t i = ctr - (uint32_t)1U;
  393. uint64_t ai = a[i];
  394. uint64_t bi = b[i];
  395. uint64_t x = swap1 & (ai ^ bi);
  396. uint64_t ai1 = ai ^ x;
  397. uint64_t bi1 = bi ^ x;
  398. a[i] = ai1;
  399. b[i] = bi1;
  400. }
  401. static void
  402. Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
  403. {
  404. if (!(ctr == (uint32_t)0U))
  405. {
  406. uint32_t i;
  407. Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
  408. i = ctr - (uint32_t)1U;
  409. Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
  410. }
  411. }
  412. static void Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
  413. {
  414. uint64_t swap1 = (uint64_t)0U - iswap;
  415. Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5U);
  416. Hacl_EC_Point_swap_conditional_(a + (uint32_t)5U, b + (uint32_t)5U, swap1, (uint32_t)5U);
  417. }
  418. static void Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
  419. {
  420. memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
  421. memcpy(output + (uint32_t)5U,
  422. input + (uint32_t)5U,
  423. (uint32_t)5U * sizeof (input + (uint32_t)5U)[0U]);
  424. }
  425. static void Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
  426. {
  427. uint64_t i0 = load64_le(input);
  428. uint8_t *x00 = input + (uint32_t)6U;
  429. uint64_t i1 = load64_le(x00);
  430. uint8_t *x01 = input + (uint32_t)12U;
  431. uint64_t i2 = load64_le(x01);
  432. uint8_t *x02 = input + (uint32_t)19U;
  433. uint64_t i3 = load64_le(x02);
  434. uint8_t *x0 = input + (uint32_t)24U;
  435. uint64_t i4 = load64_le(x0);
  436. uint64_t output0 = i0 & (uint64_t)0x7ffffffffffffU;
  437. uint64_t output1 = i1 >> (uint32_t)3U & (uint64_t)0x7ffffffffffffU;
  438. uint64_t output2 = i2 >> (uint32_t)6U & (uint64_t)0x7ffffffffffffU;
  439. uint64_t output3 = i3 >> (uint32_t)1U & (uint64_t)0x7ffffffffffffU;
  440. uint64_t output4 = i4 >> (uint32_t)12U & (uint64_t)0x7ffffffffffffU;
  441. output[0U] = output0;
  442. output[1U] = output1;
  443. output[2U] = output2;
  444. output[3U] = output3;
  445. output[4U] = output4;
  446. }
  447. static void Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
  448. {
  449. uint64_t t0 = input[0U];
  450. uint64_t t1 = input[1U];
  451. uint64_t t2 = input[2U];
  452. uint64_t t3 = input[3U];
  453. uint64_t t4 = input[4U];
  454. uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
  455. uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
  456. uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
  457. uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
  458. uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
  459. uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
  460. uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
  461. uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
  462. input[0U] = t0_;
  463. input[1U] = t1__;
  464. input[2U] = t2__;
  465. input[3U] = t3__;
  466. input[4U] = t4_;
  467. }
  468. static void Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
  469. {
  470. Hacl_EC_Format_fcontract_first_carry_pass(input);
  471. Hacl_Bignum_Modulo_carry_top(input);
  472. }
  473. static void Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
  474. {
  475. uint64_t t0 = input[0U];
  476. uint64_t t1 = input[1U];
  477. uint64_t t2 = input[2U];
  478. uint64_t t3 = input[3U];
  479. uint64_t t4 = input[4U];
  480. uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
  481. uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
  482. uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
  483. uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
  484. uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
  485. uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
  486. uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
  487. uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
  488. input[0U] = t0_;
  489. input[1U] = t1__;
  490. input[2U] = t2__;
  491. input[3U] = t3__;
  492. input[4U] = t4_;
  493. }
  494. static void Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
  495. {
  496. uint64_t i0;
  497. uint64_t i1;
  498. uint64_t i0_;
  499. uint64_t i1_;
  500. Hacl_EC_Format_fcontract_second_carry_pass(input);
  501. Hacl_Bignum_Modulo_carry_top(input);
  502. i0 = input[0U];
  503. i1 = input[1U];
  504. i0_ = i0 & (uint64_t)0x7ffffffffffffU;
  505. i1_ = i1 + (i0 >> (uint32_t)51U);
  506. input[0U] = i0_;
  507. input[1U] = i1_;
  508. }
  509. static void Hacl_EC_Format_fcontract_trim(uint64_t *input)
  510. {
  511. uint64_t a0 = input[0U];
  512. uint64_t a1 = input[1U];
  513. uint64_t a2 = input[2U];
  514. uint64_t a3 = input[3U];
  515. uint64_t a4 = input[4U];
  516. uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffedU);
  517. uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffffU);
  518. uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffffU);
  519. uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffffU);
  520. uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffffU);
  521. uint64_t mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
  522. uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffedU & mask);
  523. uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffffU & mask);
  524. uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffffU & mask);
  525. uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffffU & mask);
  526. uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffffU & mask);
  527. input[0U] = a0_;
  528. input[1U] = a1_;
  529. input[2U] = a2_;
  530. input[3U] = a3_;
  531. input[4U] = a4_;
  532. }
  533. static void Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
  534. {
  535. uint64_t t0 = input[0U];
  536. uint64_t t1 = input[1U];
  537. uint64_t t2 = input[2U];
  538. uint64_t t3 = input[3U];
  539. uint64_t t4 = input[4U];
  540. uint64_t o0 = t1 << (uint32_t)51U | t0;
  541. uint64_t o1 = t2 << (uint32_t)38U | t1 >> (uint32_t)13U;
  542. uint64_t o2 = t3 << (uint32_t)25U | t2 >> (uint32_t)26U;
  543. uint64_t o3 = t4 << (uint32_t)12U | t3 >> (uint32_t)39U;
  544. uint8_t *b0 = output;
  545. uint8_t *b1 = output + (uint32_t)8U;
  546. uint8_t *b2 = output + (uint32_t)16U;
  547. uint8_t *b3 = output + (uint32_t)24U;
  548. store64_le(b0, o0);
  549. store64_le(b1, o1);
  550. store64_le(b2, o2);
  551. store64_le(b3, o3);
  552. }
  553. static void Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
  554. {
  555. Hacl_EC_Format_fcontract_first_carry_full(input);
  556. Hacl_EC_Format_fcontract_second_carry_full(input);
  557. Hacl_EC_Format_fcontract_trim(input);
  558. Hacl_EC_Format_fcontract_store(output, input);
  559. }
  560. static void Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
  561. {
  562. uint64_t *x = point;
  563. uint64_t *z = point + (uint32_t)5U;
  564. uint64_t buf[10U] = { 0U };
  565. uint64_t *zmone = buf;
  566. uint64_t *sc = buf + (uint32_t)5U;
  567. Hacl_Bignum_crecip(zmone, z);
  568. Hacl_Bignum_fmul(sc, x, zmone);
  569. Hacl_EC_Format_fcontract(scalar, sc);
  570. }
  571. static void
  572. Hacl_EC_AddAndDouble_fmonty(
  573. uint64_t *pp,
  574. uint64_t *ppq,
  575. uint64_t *p,
  576. uint64_t *pq,
  577. uint64_t *qmqp
  578. )
  579. {
  580. uint64_t *qx = qmqp;
  581. uint64_t *x2 = pp;
  582. uint64_t *z2 = pp + (uint32_t)5U;
  583. uint64_t *x3 = ppq;
  584. uint64_t *z3 = ppq + (uint32_t)5U;
  585. uint64_t *x = p;
  586. uint64_t *z = p + (uint32_t)5U;
  587. uint64_t *xprime = pq;
  588. uint64_t *zprime = pq + (uint32_t)5U;
  589. uint64_t buf[40U] = { 0U };
  590. uint64_t *origx = buf;
  591. uint64_t *origxprime0 = buf + (uint32_t)5U;
  592. uint64_t *xxprime0 = buf + (uint32_t)25U;
  593. uint64_t *zzprime0 = buf + (uint32_t)30U;
  594. uint64_t *origxprime;
  595. uint64_t *xx0;
  596. uint64_t *zz0;
  597. uint64_t *xxprime;
  598. uint64_t *zzprime;
  599. uint64_t *zzzprime;
  600. uint64_t *zzz;
  601. uint64_t *xx;
  602. uint64_t *zz;
  603. uint64_t scalar;
  604. memcpy(origx, x, (uint32_t)5U * sizeof x[0U]);
  605. Hacl_Bignum_fsum(x, z);
  606. Hacl_Bignum_fdifference(z, origx);
  607. memcpy(origxprime0, xprime, (uint32_t)5U * sizeof xprime[0U]);
  608. Hacl_Bignum_fsum(xprime, zprime);
  609. Hacl_Bignum_fdifference(zprime, origxprime0);
  610. Hacl_Bignum_fmul(xxprime0, xprime, z);
  611. Hacl_Bignum_fmul(zzprime0, x, zprime);
  612. origxprime = buf + (uint32_t)5U;
  613. xx0 = buf + (uint32_t)15U;
  614. zz0 = buf + (uint32_t)20U;
  615. xxprime = buf + (uint32_t)25U;
  616. zzprime = buf + (uint32_t)30U;
  617. zzzprime = buf + (uint32_t)35U;
  618. memcpy(origxprime, xxprime, (uint32_t)5U * sizeof xxprime[0U]);
  619. Hacl_Bignum_fsum(xxprime, zzprime);
  620. Hacl_Bignum_fdifference(zzprime, origxprime);
  621. Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1U);
  622. Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1U);
  623. Hacl_Bignum_fmul(z3, zzzprime, qx);
  624. Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1U);
  625. Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1U);
  626. zzz = buf + (uint32_t)10U;
  627. xx = buf + (uint32_t)15U;
  628. zz = buf + (uint32_t)20U;
  629. Hacl_Bignum_fmul(x2, xx, zz);
  630. Hacl_Bignum_fdifference(zz, xx);
  631. scalar = (uint64_t)121665U;
  632. Hacl_Bignum_fscalar(zzz, zz, scalar);
  633. Hacl_Bignum_fsum(zzz, xx);
  634. Hacl_Bignum_fmul(z2, zzz, zz);
  635. }
  636. static void
  637. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
  638. uint64_t *nq,
  639. uint64_t *nqpq,
  640. uint64_t *nq2,
  641. uint64_t *nqpq2,
  642. uint64_t *q,
  643. uint8_t byt
  644. )
  645. {
  646. uint64_t bit0 = (uint64_t)(byt >> (uint32_t)7U);
  647. uint64_t bit;
  648. Hacl_EC_Point_swap_conditional(nq, nqpq, bit0);
  649. Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
  650. bit = (uint64_t)(byt >> (uint32_t)7U);
  651. Hacl_EC_Point_swap_conditional(nq2, nqpq2, bit);
  652. }
  653. static void
  654. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
  655. uint64_t *nq,
  656. uint64_t *nqpq,
  657. uint64_t *nq2,
  658. uint64_t *nqpq2,
  659. uint64_t *q,
  660. uint8_t byt
  661. )
  662. {
  663. uint8_t byt1;
  664. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
  665. byt1 = byt << (uint32_t)1U;
  666. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
  667. }
  668. static void
  669. Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
  670. uint64_t *nq,
  671. uint64_t *nqpq,
  672. uint64_t *nq2,
  673. uint64_t *nqpq2,
  674. uint64_t *q,
  675. uint8_t byt,
  676. uint32_t i
  677. )
  678. {
  679. if (!(i == (uint32_t)0U))
  680. {
  681. uint32_t i_ = i - (uint32_t)1U;
  682. uint8_t byt_;
  683. Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
  684. byt_ = byt << (uint32_t)2U;
  685. Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
  686. }
  687. }
  688. static void
  689. Hacl_EC_Ladder_BigLoop_cmult_big_loop(
  690. uint8_t *n1,
  691. uint64_t *nq,
  692. uint64_t *nqpq,
  693. uint64_t *nq2,
  694. uint64_t *nqpq2,
  695. uint64_t *q,
  696. uint32_t i
  697. )
  698. {
  699. if (!(i == (uint32_t)0U))
  700. {
  701. uint32_t i1 = i - (uint32_t)1U;
  702. uint8_t byte = n1[i1];
  703. Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4U);
  704. Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
  705. }
  706. }
  707. static void Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
  708. {
  709. uint64_t point_buf[40U] = { 0U };
  710. uint64_t *nq = point_buf;
  711. uint64_t *nqpq = point_buf + (uint32_t)10U;
  712. uint64_t *nq2 = point_buf + (uint32_t)20U;
  713. uint64_t *nqpq2 = point_buf + (uint32_t)30U;
  714. Hacl_EC_Point_copy(nqpq, q);
  715. nq[0U] = (uint64_t)1U;
  716. Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32U);
  717. Hacl_EC_Point_copy(result, nq);
  718. }
  719. void Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
  720. {
  721. uint64_t buf0[10U] = { 0U };
  722. uint64_t *x0 = buf0;
  723. uint64_t *z = buf0 + (uint32_t)5U;
  724. uint64_t *q;
  725. Hacl_EC_Format_fexpand(x0, basepoint);
  726. z[0U] = (uint64_t)1U;
  727. q = buf0;
  728. {
  729. uint8_t e[32U] = { 0U };
  730. uint8_t e0;
  731. uint8_t e31;
  732. uint8_t e01;
  733. uint8_t e311;
  734. uint8_t e312;
  735. uint8_t *scalar;
  736. memcpy(e, secret, (uint32_t)32U * sizeof secret[0U]);
  737. e0 = e[0U];
  738. e31 = e[31U];
  739. e01 = e0 & (uint8_t)248U;
  740. e311 = e31 & (uint8_t)127U;
  741. e312 = e311 | (uint8_t)64U;
  742. e[0U] = e01;
  743. e[31U] = e312;
  744. scalar = e;
  745. {
  746. uint64_t buf[15U] = { 0U };
  747. uint64_t *nq = buf;
  748. uint64_t *x = nq;
  749. x[0U] = (uint64_t)1U;
  750. Hacl_EC_Ladder_cmult(nq, scalar, q);
  751. Hacl_EC_Format_scalar_of_point(mypublic, nq);
  752. }
  753. }
  754. }