2 * Copyright (c) 2017 Thomas Pornin <pornin@bolet.org>
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #if BR_INT128 || BR_UMUL128
31 #define MUL128(hi, lo, x, y) do { \
32 unsigned __int128 mul128tmp; \
33 mul128tmp = (unsigned __int128)(x) * (unsigned __int128)(y); \
34 (hi) = (uint64_t)(mul128tmp >> 64); \
35 (lo) = (uint64_t)mul128tmp; \
42 #define MUL128(hi, lo, x, y) do { \
43 (lo) = _umul128((x), (y), &(hi)); \
48 #define MASK42 ((uint64_t)0x000003FFFFFFFFFF)
49 #define MASK44 ((uint64_t)0x00000FFFFFFFFFFF)
52 * The "accumulator" word is nominally a 130-bit value. We split it into
53 * words of 44 bits, each held in a 64-bit variable.
55 * If the current accumulator is a = a0 + a1*W + a2*W^2 (where W = 2^44)
56 * and r = r0 + r1*W + r2*W^2, then:
59 * + (a0*r1 + a1*r0) * W
60 * + (a0*r2 + a1*r1 + a2*r0) * W^2
61 * + (a1*r2 + a2*r1) * W^3
64 * We want to reduce that value modulo p = 2^130-5, so W^3 = 20 mod p,
65 * and W^4 = 20*W mod p. Thus, if we define u1 = 20*r1 and u2 = 20*r2,
66 * then the equations above become:
68 * b0 = a0*r0 + a1*u2 + a2*u1
69 * b1 = a0*r1 + a1*r0 + a2*u2
70 * b2 = a0*r2 + a1*r1 + a2*r0
72 * In order to make u1 fit in 44 bits, we can change these equations
75 * b0 = a0*r0 + a1*u2 + a2*t1
76 * b1 = a0*r1 + a1*r0 + a2*t2
77 * b2 = a0*r2 + a1*r1 + a2*r0
79 * Where t1 is u1 truncated to 44 bits, and t2 is u2 added to the extra
80 * bits of u1. Note that since r is clamped down to a 124-bit value, the
81 * values u2 and t2 fit on 44 bits too.
83 * The bx values are larger than 44 bits, so we may split them into a
84 * lower half (cx, 44 bits) and an upper half (dx). The new values for
85 * the accumulator are then:
91 * The equations allow for some room, i.e. the ax values may be larger
92 * than 44 bits. Similarly, the ex values will usually be larger than
93 * the ax. Thus, some sort of carry propagation must be done regularly,
94 * though not necessarily at each iteration. In particular, we do not
95 * need to compute the additions (for the bx values) over 128-bit
96 * quantities; we can stick to 64-bit computations.
99 * Since the 128-bit result of a 64x64 multiplication is actually
100 * represented over two 64-bit registers, it is cheaper to arrange for
101 * any split that happens between the "high" and "low" halves to be on
102 * that 64-bit boundary. This is done by left shifting the rx, ux and tx
103 * by 20 bits (since they all fit on 44 bits each, this shift is
108 poly1305_inner_big(uint64_t *acc
, uint64_t *r
, const void *data
, size_t len
)
111 #define MX(hi, lo, m0, m1, m2) do { \
112 uint64_t mxhi, mxlo; \
113 MUL128(mxhi, mxlo, a0, m0); \
116 MUL128(mxhi, mxlo, a1, m1); \
118 (lo) += mxlo >> 20; \
119 MUL128(mxhi, mxlo, a2, m2); \
121 (lo) += mxlo >> 20; \
124 const unsigned char *buf
;
126 uint64_t r0
, r1
, r2
, t1
, t2
, u2
;
141 uint64_t c0
, c1
, c2
, d0
, d1
, d2
;
143 v0
= br_dec64le(buf
+ 0);
144 v1
= br_dec64le(buf
+ 8);
146 v1
= ((v0
>> 44) | (v1
<< 20)) & MASK44
;
150 a2
+= v2
+ ((uint64_t)1 << 40);
151 MX(d0
, c0
, r0
, u2
, t1
);
152 MX(d1
, c1
, r1
, r0
, t2
);
153 MX(d2
, c2
, r2
, r1
, r0
);
158 v0
= br_dec64le(buf
+ 16);
159 v1
= br_dec64le(buf
+ 24);
161 v1
= ((v0
>> 44) | (v1
<< 20)) & MASK44
;
165 a2
+= v2
+ ((uint64_t)1 << 40);
166 MX(d0
, c0
, r0
, u2
, t1
);
167 MX(d1
, c1
, r1
, r0
, t2
);
168 MX(d2
, c2
, r2
, r1
, r0
);
173 v0
= br_dec64le(buf
+ 32);
174 v1
= br_dec64le(buf
+ 40);
176 v1
= ((v0
>> 44) | (v1
<< 20)) & MASK44
;
180 a2
+= v2
+ ((uint64_t)1 << 40);
181 MX(d0
, c0
, r0
, u2
, t1
);
182 MX(d1
, c1
, r1
, r0
, t2
);
183 MX(d2
, c2
, r2
, r1
, r0
);
188 v0
= br_dec64le(buf
+ 48);
189 v1
= br_dec64le(buf
+ 56);
191 v1
= ((v0
>> 44) | (v1
<< 20)) & MASK44
;
195 a2
+= v2
+ ((uint64_t)1 << 40);
196 MX(d0
, c0
, r0
, u2
, t1
);
197 MX(d1
, c1
, r1
, r0
, t2
);
198 MX(d2
, c2
, r2
, r1
, r0
);
207 a0
+= 20 * (a2
>> 44);
221 poly1305_inner_small(uint64_t *acc
, uint64_t *r
, const void *data
, size_t len
)
223 const unsigned char *buf
;
225 uint64_t r0
, r1
, r2
, t1
, t2
, u2
;
240 uint64_t c0
, c1
, c2
, d0
, d1
, d2
;
241 unsigned char tmp
[16];
244 memcpy(tmp
, buf
, len
);
245 memset(tmp
+ len
, 0, (sizeof tmp
) - len
);
249 v0
= br_dec64le(buf
+ 0);
250 v1
= br_dec64le(buf
+ 8);
253 v1
= ((v0
>> 44) | (v1
<< 20)) & MASK44
;
258 a2
+= v2
+ ((uint64_t)1 << 40);
260 #define MX(hi, lo, m0, m1, m2) do { \
261 uint64_t mxhi, mxlo; \
262 MUL128(mxhi, mxlo, a0, m0); \
265 MUL128(mxhi, mxlo, a1, m1); \
267 (lo) += mxlo >> 20; \
268 MUL128(mxhi, mxlo, a2, m2); \
270 (lo) += mxlo >> 20; \
273 MX(d0
, c0
, r0
, u2
, t1
);
274 MX(d1
, c1
, r1
, r0
, t2
);
275 MX(d2
, c2
, r2
, r1
, r0
);
287 a0
+= 20 * (a2
>> 44);
299 poly1305_inner(uint64_t *acc
, uint64_t *r
, const void *data
, size_t len
)
304 len2
= len
& ~(size_t)63;
305 poly1305_inner_big(acc
, r
, data
, len2
);
306 data
= (const unsigned char *)data
+ len2
;
310 poly1305_inner_small(acc
, r
, data
, len
);
314 /* see bearssl_block.h */
316 br_poly1305_ctmulq_run(const void *key
, const void *iv
,
317 void *data
, size_t len
, const void *aad
, size_t aad_len
,
318 void *tag
, br_chacha20_run ichacha
, int encrypt
)
320 unsigned char pkey
[32], foot
[16];
321 uint64_t r
[6], acc
[3], r0
, r1
;
322 uint32_t v0
, v1
, v2
, v3
, v4
;
323 uint64_t w0
, w1
, w2
, w3
;
327 * Compute the MAC key. The 'r' value is the first 16 bytes of
330 memset(pkey
, 0, sizeof pkey
);
331 ichacha(key
, iv
, 0, pkey
, sizeof pkey
);
334 * If encrypting, ChaCha20 must run first, followed by Poly1305.
335 * When decrypting, the operations are reversed.
338 ichacha(key
, iv
, 1, data
, len
);
342 * Run Poly1305. We must process the AAD, then ciphertext, then
343 * the footer (with the lengths). Note that the AAD and ciphertext
344 * are meant to be padded with zeros up to the next multiple of 16,
345 * and the length of the footer is 16 bytes as well.
349 * Apply the "clamping" on r.
360 * Decode the 'r' value into 44-bit words, left-shifted by 20 bits.
361 * Also compute the u1 and u2 values.
363 r0
= br_dec64le(pkey
+ 0);
364 r1
= br_dec64le(pkey
+ 8);
366 r
[1] = ((r0
>> 24) | (r1
<< 40)) & ~(uint64_t)0xFFFFF;
367 r
[2] = (r1
>> 4) & ~(uint64_t)0xFFFFF;
368 r1
= 20 * (r
[1] >> 20);
371 r
[4] = (r
[5] + (r1
>> 24)) & ~(uint64_t)0xFFFFF;
381 * Process the additional authenticated data, ciphertext, and
382 * footer in due order.
384 br_enc64le(foot
, (uint64_t)aad_len
);
385 br_enc64le(foot
+ 8, (uint64_t)len
);
386 poly1305_inner(acc
, r
, aad
, aad_len
);
387 poly1305_inner(acc
, r
, data
, len
);
388 poly1305_inner_small(acc
, r
, foot
, sizeof foot
);
391 * Finalise modular reduction. At that point, the value consists
392 * in three 44-bit values (the lowest one might be slightly above
393 * 2^44). Two loops shall be sufficient.
395 acc
[1] += (acc
[0] >> 44);
397 acc
[2] += (acc
[1] >> 44);
399 acc
[0] += 5 * (acc
[2] >> 42);
401 acc
[1] += (acc
[0] >> 44);
403 acc
[2] += (acc
[1] >> 44);
405 acc
[0] += 5 * (acc
[2] >> 42);
409 * The value may still fall in the 2^130-5..2^130-1 range, in
410 * which case we must reduce it again. The code below selects,
411 * in constant-time, between 'acc' and 'acc-p'. We encode the
412 * value over four 32-bit integers to finish the operation.
414 v0
= (uint32_t)acc
[0];
415 v1
= (uint32_t)(acc
[0] >> 32) | ((uint32_t)acc
[1] << 12);
416 v2
= (uint32_t)(acc
[1] >> 20) | ((uint32_t)acc
[2] << 24);
417 v3
= (uint32_t)(acc
[2] >> 8);
418 v4
= (uint32_t)(acc
[2] >> 40);
420 ctl
= GT(v0
, 0xFFFFFFFA);
421 ctl
&= EQ(v1
, 0xFFFFFFFF);
422 ctl
&= EQ(v2
, 0xFFFFFFFF);
423 ctl
&= EQ(v3
, 0xFFFFFFFF);
424 ctl
&= EQ(v4
, 0x00000003);
425 v0
= MUX(ctl
, v0
+ 5, v0
);
426 v1
= MUX(ctl
, 0, v1
);
427 v2
= MUX(ctl
, 0, v2
);
428 v3
= MUX(ctl
, 0, v3
);
431 * Add the "s" value. This is done modulo 2^128. Don't forget
432 * carry propagation...
434 w0
= (uint64_t)v0
+ (uint64_t)br_dec32le(pkey
+ 16);
435 w1
= (uint64_t)v1
+ (uint64_t)br_dec32le(pkey
+ 20) + (w0
>> 32);
436 w2
= (uint64_t)v2
+ (uint64_t)br_dec32le(pkey
+ 24) + (w1
>> 32);
437 w3
= (uint64_t)v3
+ (uint64_t)br_dec32le(pkey
+ 28) + (w2
>> 32);
446 br_enc32le((unsigned char *)tag
+ 0, v0
);
447 br_enc32le((unsigned char *)tag
+ 4, v1
);
448 br_enc32le((unsigned char *)tag
+ 8, v2
);
449 br_enc32le((unsigned char *)tag
+ 12, v3
);
452 * If decrypting, then ChaCha20 runs _after_ Poly1305.
455 ichacha(key
, iv
, 1, data
, len
);
459 /* see bearssl_block.h */
461 br_poly1305_ctmulq_get(void)
463 return &br_poly1305_ctmulq_run
;
468 /* see bearssl_block.h */
470 br_poly1305_ctmulq_get(void)