Workaround for compiler bug (GCC 4.8 and 4.9 when targetting 32-bit x86).
[BearSSL] / src / symcipher / aes_ct64_enc.c
1 /*
2 * Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "inner.h"
26
27 static inline void
28 add_round_key(uint64_t *q, const uint64_t *sk)
29 {
30 q[0] ^= sk[0];
31 q[1] ^= sk[1];
32 q[2] ^= sk[2];
33 q[3] ^= sk[3];
34 q[4] ^= sk[4];
35 q[5] ^= sk[5];
36 q[6] ^= sk[6];
37 q[7] ^= sk[7];
38 }
39
40 static inline void
41 shift_rows(uint64_t *q)
42 {
43 int i;
44
45 for (i = 0; i < 8; i ++) {
46 uint64_t x;
47
48 x = q[i];
49 q[i] = (x & (uint64_t)0x000000000000FFFF)
50 | ((x & (uint64_t)0x00000000FFF00000) >> 4)
51 | ((x & (uint64_t)0x00000000000F0000) << 12)
52 | ((x & (uint64_t)0x0000FF0000000000) >> 8)
53 | ((x & (uint64_t)0x000000FF00000000) << 8)
54 | ((x & (uint64_t)0xF000000000000000) >> 12)
55 | ((x & (uint64_t)0x0FFF000000000000) << 4);
56 }
57 }
58
59 static inline uint64_t
60 rotr32(uint64_t x)
61 {
62 return (x << 32) | (x >> 32);
63 }
64
65 static inline void
66 mix_columns(uint64_t *q)
67 {
68 uint64_t q0, q1, q2, q3, q4, q5, q6, q7;
69 uint64_t r0, r1, r2, r3, r4, r5, r6, r7;
70
71 q0 = q[0];
72 q1 = q[1];
73 q2 = q[2];
74 q3 = q[3];
75 q4 = q[4];
76 q5 = q[5];
77 q6 = q[6];
78 q7 = q[7];
79 r0 = (q0 >> 16) | (q0 << 48);
80 r1 = (q1 >> 16) | (q1 << 48);
81 r2 = (q2 >> 16) | (q2 << 48);
82 r3 = (q3 >> 16) | (q3 << 48);
83 r4 = (q4 >> 16) | (q4 << 48);
84 r5 = (q5 >> 16) | (q5 << 48);
85 r6 = (q6 >> 16) | (q6 << 48);
86 r7 = (q7 >> 16) | (q7 << 48);
87
88 q[0] = q7 ^ r7 ^ r0 ^ rotr32(q0 ^ r0);
89 q[1] = q0 ^ r0 ^ q7 ^ r7 ^ r1 ^ rotr32(q1 ^ r1);
90 q[2] = q1 ^ r1 ^ r2 ^ rotr32(q2 ^ r2);
91 q[3] = q2 ^ r2 ^ q7 ^ r7 ^ r3 ^ rotr32(q3 ^ r3);
92 q[4] = q3 ^ r3 ^ q7 ^ r7 ^ r4 ^ rotr32(q4 ^ r4);
93 q[5] = q4 ^ r4 ^ r5 ^ rotr32(q5 ^ r5);
94 q[6] = q5 ^ r5 ^ r6 ^ rotr32(q6 ^ r6);
95 q[7] = q6 ^ r6 ^ r7 ^ rotr32(q7 ^ r7);
96 }
97
98 /* see inner.h */
99 void
100 br_aes_ct64_bitslice_encrypt(unsigned num_rounds,
101 const uint64_t *skey, uint64_t *q)
102 {
103 unsigned u;
104
105 add_round_key(q, skey);
106 for (u = 1; u < num_rounds; u ++) {
107 br_aes_ct64_bitslice_Sbox(q);
108 shift_rows(q);
109 mix_columns(q);
110 add_round_key(q, skey + (u << 3));
111 }
112 br_aes_ct64_bitslice_Sbox(q);
113 shift_rows(q);
114 add_round_key(q, skey + (num_rounds << 3));
115 }