1 /* AesOpt.c -- Intel's AES
\r
2 2013-11-12 : Igor Pavlov : Public domain */
\r
8 #ifdef MY_CPU_X86_OR_AMD64
\r
10 #define USE_INTEL_AES
\r
14 #ifdef USE_INTEL_AES
\r
16 #include <wmmintrin.h>
\r
18 void MY_FAST_CALL AesCbc_Encode_Intel(__m128i *p, __m128i *data, size_t numBlocks)
\r
21 for (; numBlocks != 0; numBlocks--, data++)
\r
23 UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
\r
24 const __m128i *w = p + 3;
\r
25 m = _mm_xor_si128(m, *data);
\r
26 m = _mm_xor_si128(m, p[2]);
\r
29 m = _mm_aesenc_si128(m, w[0]);
\r
30 m = _mm_aesenc_si128(m, w[1]);
\r
33 while (--numRounds2 != 0);
\r
34 m = _mm_aesenc_si128(m, w[0]);
\r
35 m = _mm_aesenclast_si128(m, w[1]);
\r
43 #define AES_OP_W(op, n) { \
\r
44 const __m128i t = w[n]; \
\r
50 #define AES_DEC(n) AES_OP_W(_mm_aesdec_si128, n)
\r
51 #define AES_DEC_LAST(n) AES_OP_W(_mm_aesdeclast_si128, n)
\r
52 #define AES_ENC(n) AES_OP_W(_mm_aesenc_si128, n)
\r
53 #define AES_ENC_LAST(n) AES_OP_W(_mm_aesenclast_si128, n)
\r
55 void MY_FAST_CALL AesCbc_Decode_Intel(__m128i *p, __m128i *data, size_t numBlocks)
\r
58 for (; numBlocks >= NUM_WAYS; numBlocks -= NUM_WAYS, data += NUM_WAYS)
\r
60 UInt32 numRounds2 = *(const UInt32 *)(p + 1);
\r
61 const __m128i *w = p + numRounds2 * 2;
\r
64 const __m128i t = w[2];
\r
65 m0 = _mm_xor_si128(t, data[0]);
\r
66 m1 = _mm_xor_si128(t, data[1]);
\r
67 m2 = _mm_xor_si128(t, data[2]);
\r
76 while (--numRounds2 != 0);
\r
82 t = _mm_xor_si128(m0, iv); iv = data[0]; data[0] = t;
\r
83 t = _mm_xor_si128(m1, iv); iv = data[1]; data[1] = t;
\r
84 t = _mm_xor_si128(m2, iv); iv = data[2]; data[2] = t;
\r
87 for (; numBlocks != 0; numBlocks--, data++)
\r
89 UInt32 numRounds2 = *(const UInt32 *)(p + 1);
\r
90 const __m128i *w = p + numRounds2 * 2;
\r
91 __m128i m = _mm_xor_si128(w[2], *data);
\r
95 m = _mm_aesdec_si128(m, w[1]);
\r
96 m = _mm_aesdec_si128(m, w[0]);
\r
99 while (--numRounds2 != 0);
\r
100 m = _mm_aesdec_si128(m, w[1]);
\r
101 m = _mm_aesdeclast_si128(m, w[0]);
\r
103 m = _mm_xor_si128(m, iv);
\r
110 void MY_FAST_CALL AesCtr_Code_Intel(__m128i *p, __m128i *data, size_t numBlocks)
\r
114 one.m128i_u64[0] = 1;
\r
115 one.m128i_u64[1] = 0;
\r
116 for (; numBlocks >= NUM_WAYS; numBlocks -= NUM_WAYS, data += NUM_WAYS)
\r
118 UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
\r
119 const __m128i *w = p;
\r
120 __m128i m0, m1, m2;
\r
122 const __m128i t = w[2];
\r
123 ctr = _mm_add_epi64(ctr, one); m0 = _mm_xor_si128(ctr, t);
\r
124 ctr = _mm_add_epi64(ctr, one); m1 = _mm_xor_si128(ctr, t);
\r
125 ctr = _mm_add_epi64(ctr, one); m2 = _mm_xor_si128(ctr, t);
\r
134 while (--numRounds2 != 0);
\r
137 data[0] = _mm_xor_si128(data[0], m0);
\r
138 data[1] = _mm_xor_si128(data[1], m1);
\r
139 data[2] = _mm_xor_si128(data[2], m2);
\r
141 for (; numBlocks != 0; numBlocks--, data++)
\r
143 UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
\r
144 const __m128i *w = p;
\r
146 ctr = _mm_add_epi64(ctr, one);
\r
147 m = _mm_xor_si128(ctr, p[2]);
\r
151 m = _mm_aesenc_si128(m, w[0]);
\r
152 m = _mm_aesenc_si128(m, w[1]);
\r
155 while (--numRounds2 != 0);
\r
156 m = _mm_aesenc_si128(m, w[0]);
\r
157 m = _mm_aesenclast_si128(m, w[1]);
\r
158 *data = _mm_xor_si128(*data, m);
\r
165 void MY_FAST_CALL AesCbc_Encode(UInt32 *ivAes, Byte *data, size_t numBlocks);
\r
166 void MY_FAST_CALL AesCbc_Decode(UInt32 *ivAes, Byte *data, size_t numBlocks);
\r
167 void MY_FAST_CALL AesCtr_Code(UInt32 *ivAes, Byte *data, size_t numBlocks);
\r
169 void MY_FAST_CALL AesCbc_Encode_Intel(UInt32 *p, Byte *data, size_t numBlocks)
\r
171 AesCbc_Encode(p, data, numBlocks);
\r
174 void MY_FAST_CALL AesCbc_Decode_Intel(UInt32 *p, Byte *data, size_t numBlocks)
\r
176 AesCbc_Decode(p, data, numBlocks);
\r
179 void MY_FAST_CALL AesCtr_Code_Intel(UInt32 *p, Byte *data, size_t numBlocks)
\r
181 AesCtr_Code(p, data, numBlocks);
\r