Switch C compiler to the generic 'cc' (to use the default compiler, not necessarily...
[BearSSL] / src / symcipher / aes_x86ni_ctr.c
1 /*
2 * Copyright (c) 2017 Thomas Pornin <pornin@bolet.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "inner.h"
26
27 #if BR_AES_X86NI
28
29 #if BR_AES_X86NI_GCC
30 #if BR_AES_X86NI_GCC_OLD
31 #pragma GCC target("sse2,sse4.1,aes,pclmul")
32 #endif
33 #include <smmintrin.h>
34 #include <wmmintrin.h>
35 #define bswap32 __builtin_bswap32
36 #endif
37
38 #if BR_AES_X86NI_MSC
39 #include <stdlib.h>
40 #include <intrin.h>
41 #define bswap32 _byteswap_ulong
42 #endif
43
44 /* see bearssl_block.h */
45 void
46 br_aes_x86ni_ctr_init(br_aes_x86ni_ctr_keys *ctx,
47 const void *key, size_t len)
48 {
49 ctx->vtable = &br_aes_x86ni_ctr_vtable;
50 ctx->num_rounds = br_aes_x86ni_keysched_enc(ctx->skey.skni, key, len);
51 }
52
53 /* see bearssl_block.h */
54 BR_TARGET("sse2,sse4.1,aes")
55 uint32_t
56 br_aes_x86ni_ctr_run(const br_aes_x86ni_ctr_keys *ctx,
57 const void *iv, uint32_t cc, void *data, size_t len)
58 {
59 unsigned char *buf;
60 unsigned char ivbuf[16];
61 unsigned num_rounds;
62 __m128i sk[15];
63 __m128i ivx;
64 unsigned u;
65
66 buf = data;
67 memcpy(ivbuf, iv, 12);
68 num_rounds = ctx->num_rounds;
69 for (u = 0; u <= num_rounds; u ++) {
70 sk[u] = _mm_loadu_si128((void *)(ctx->skey.skni + (u << 4)));
71 }
72 ivx = _mm_loadu_si128((void *)ivbuf);
73 while (len > 0) {
74 __m128i x0, x1, x2, x3;
75
76 x0 = _mm_insert_epi32(ivx, bswap32(cc + 0), 3);
77 x1 = _mm_insert_epi32(ivx, bswap32(cc + 1), 3);
78 x2 = _mm_insert_epi32(ivx, bswap32(cc + 2), 3);
79 x3 = _mm_insert_epi32(ivx, bswap32(cc + 3), 3);
80 x0 = _mm_xor_si128(x0, sk[0]);
81 x1 = _mm_xor_si128(x1, sk[0]);
82 x2 = _mm_xor_si128(x2, sk[0]);
83 x3 = _mm_xor_si128(x3, sk[0]);
84 x0 = _mm_aesenc_si128(x0, sk[1]);
85 x1 = _mm_aesenc_si128(x1, sk[1]);
86 x2 = _mm_aesenc_si128(x2, sk[1]);
87 x3 = _mm_aesenc_si128(x3, sk[1]);
88 x0 = _mm_aesenc_si128(x0, sk[2]);
89 x1 = _mm_aesenc_si128(x1, sk[2]);
90 x2 = _mm_aesenc_si128(x2, sk[2]);
91 x3 = _mm_aesenc_si128(x3, sk[2]);
92 x0 = _mm_aesenc_si128(x0, sk[3]);
93 x1 = _mm_aesenc_si128(x1, sk[3]);
94 x2 = _mm_aesenc_si128(x2, sk[3]);
95 x3 = _mm_aesenc_si128(x3, sk[3]);
96 x0 = _mm_aesenc_si128(x0, sk[4]);
97 x1 = _mm_aesenc_si128(x1, sk[4]);
98 x2 = _mm_aesenc_si128(x2, sk[4]);
99 x3 = _mm_aesenc_si128(x3, sk[4]);
100 x0 = _mm_aesenc_si128(x0, sk[5]);
101 x1 = _mm_aesenc_si128(x1, sk[5]);
102 x2 = _mm_aesenc_si128(x2, sk[5]);
103 x3 = _mm_aesenc_si128(x3, sk[5]);
104 x0 = _mm_aesenc_si128(x0, sk[6]);
105 x1 = _mm_aesenc_si128(x1, sk[6]);
106 x2 = _mm_aesenc_si128(x2, sk[6]);
107 x3 = _mm_aesenc_si128(x3, sk[6]);
108 x0 = _mm_aesenc_si128(x0, sk[7]);
109 x1 = _mm_aesenc_si128(x1, sk[7]);
110 x2 = _mm_aesenc_si128(x2, sk[7]);
111 x3 = _mm_aesenc_si128(x3, sk[7]);
112 x0 = _mm_aesenc_si128(x0, sk[8]);
113 x1 = _mm_aesenc_si128(x1, sk[8]);
114 x2 = _mm_aesenc_si128(x2, sk[8]);
115 x3 = _mm_aesenc_si128(x3, sk[8]);
116 x0 = _mm_aesenc_si128(x0, sk[9]);
117 x1 = _mm_aesenc_si128(x1, sk[9]);
118 x2 = _mm_aesenc_si128(x2, sk[9]);
119 x3 = _mm_aesenc_si128(x3, sk[9]);
120 if (num_rounds == 10) {
121 x0 = _mm_aesenclast_si128(x0, sk[10]);
122 x1 = _mm_aesenclast_si128(x1, sk[10]);
123 x2 = _mm_aesenclast_si128(x2, sk[10]);
124 x3 = _mm_aesenclast_si128(x3, sk[10]);
125 } else if (num_rounds == 12) {
126 x0 = _mm_aesenc_si128(x0, sk[10]);
127 x1 = _mm_aesenc_si128(x1, sk[10]);
128 x2 = _mm_aesenc_si128(x2, sk[10]);
129 x3 = _mm_aesenc_si128(x3, sk[10]);
130 x0 = _mm_aesenc_si128(x0, sk[11]);
131 x1 = _mm_aesenc_si128(x1, sk[11]);
132 x2 = _mm_aesenc_si128(x2, sk[11]);
133 x3 = _mm_aesenc_si128(x3, sk[11]);
134 x0 = _mm_aesenclast_si128(x0, sk[12]);
135 x1 = _mm_aesenclast_si128(x1, sk[12]);
136 x2 = _mm_aesenclast_si128(x2, sk[12]);
137 x3 = _mm_aesenclast_si128(x3, sk[12]);
138 } else {
139 x0 = _mm_aesenc_si128(x0, sk[10]);
140 x1 = _mm_aesenc_si128(x1, sk[10]);
141 x2 = _mm_aesenc_si128(x2, sk[10]);
142 x3 = _mm_aesenc_si128(x3, sk[10]);
143 x0 = _mm_aesenc_si128(x0, sk[11]);
144 x1 = _mm_aesenc_si128(x1, sk[11]);
145 x2 = _mm_aesenc_si128(x2, sk[11]);
146 x3 = _mm_aesenc_si128(x3, sk[11]);
147 x0 = _mm_aesenc_si128(x0, sk[12]);
148 x1 = _mm_aesenc_si128(x1, sk[12]);
149 x2 = _mm_aesenc_si128(x2, sk[12]);
150 x3 = _mm_aesenc_si128(x3, sk[12]);
151 x0 = _mm_aesenc_si128(x0, sk[13]);
152 x1 = _mm_aesenc_si128(x1, sk[13]);
153 x2 = _mm_aesenc_si128(x2, sk[13]);
154 x3 = _mm_aesenc_si128(x3, sk[13]);
155 x0 = _mm_aesenclast_si128(x0, sk[14]);
156 x1 = _mm_aesenclast_si128(x1, sk[14]);
157 x2 = _mm_aesenclast_si128(x2, sk[14]);
158 x3 = _mm_aesenclast_si128(x3, sk[14]);
159 }
160 if (len >= 64) {
161 x0 = _mm_xor_si128(x0,
162 _mm_loadu_si128((void *)(buf + 0)));
163 x1 = _mm_xor_si128(x1,
164 _mm_loadu_si128((void *)(buf + 16)));
165 x2 = _mm_xor_si128(x2,
166 _mm_loadu_si128((void *)(buf + 32)));
167 x3 = _mm_xor_si128(x3,
168 _mm_loadu_si128((void *)(buf + 48)));
169 _mm_storeu_si128((void *)(buf + 0), x0);
170 _mm_storeu_si128((void *)(buf + 16), x1);
171 _mm_storeu_si128((void *)(buf + 32), x2);
172 _mm_storeu_si128((void *)(buf + 48), x3);
173 buf += 64;
174 len -= 64;
175 cc += 4;
176 } else {
177 unsigned char tmp[64];
178
179 _mm_storeu_si128((void *)(tmp + 0), x0);
180 _mm_storeu_si128((void *)(tmp + 16), x1);
181 _mm_storeu_si128((void *)(tmp + 32), x2);
182 _mm_storeu_si128((void *)(tmp + 48), x3);
183 for (u = 0; u < len; u ++) {
184 buf[u] ^= tmp[u];
185 }
186 cc += (uint32_t)len >> 4;
187 break;
188 }
189 }
190 return cc;
191 }
192
193 /* see bearssl_block.h */
194 const br_block_ctr_class br_aes_x86ni_ctr_vtable = {
195 sizeof(br_aes_x86ni_ctr_keys),
196 16,
197 4,
198 (void (*)(const br_block_ctr_class **, const void *, size_t))
199 &br_aes_x86ni_ctr_init,
200 (uint32_t (*)(const br_block_ctr_class *const *,
201 const void *, uint32_t, void *, size_t))
202 &br_aes_x86ni_ctr_run
203 };
204
205 /* see bearssl_block.h */
206 const br_block_ctr_class *
207 br_aes_x86ni_ctr_get_vtable(void)
208 {
209 return br_aes_x86ni_supported() ? &br_aes_x86ni_ctr_vtable : NULL;
210 }
211
212 #else
213
214 /* see bearssl_block.h */
215 const br_block_ctr_class *
216 br_aes_x86ni_ctr_get_vtable(void)
217 {
218 return NULL;
219 }
220
221 #endif