VeraCrypt
aboutsummaryrefslogtreecommitdiff
path: root/src/Crypto
diff options
context:
space:
mode:
Diffstat (limited to 'src/Crypto')
-rw-r--r--src/Crypto/Aes_hw_armv8.c316
-rw-r--r--src/Crypto/Aes_hw_cpu.h4
-rw-r--r--src/Crypto/Crypto.vcxproj8
-rw-r--r--src/Crypto/Crypto.vcxproj.filters18
-rw-r--r--src/Crypto/Sha2.c17
-rw-r--r--src/Crypto/Sha2.h2
-rw-r--r--src/Crypto/config.h52
-rw-r--r--src/Crypto/cpu.c69
-rw-r--r--src/Crypto/cpu.h20
-rw-r--r--src/Crypto/sha256_armv8.c184
10 files changed, 678 insertions, 12 deletions
diff --git a/src/Crypto/Aes_hw_armv8.c b/src/Crypto/Aes_hw_armv8.c
new file mode 100644
index 00000000..b67ed1a5
--- /dev/null
+++ b/src/Crypto/Aes_hw_armv8.c
@@ -0,0 +1,316 @@
+/*
+* AES using ARMv8
+* Contributed by Jeffrey Walton
+*
+* Further changes
+* (C) 2017,2018 Jack Lloyd
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+/* Modified and adapted for VeraCrypt */
+
+#include "Common/Tcdefs.h"
+#include "Aes_hw_cpu.h"
+#if !defined(_UEFI)
+#include <memory.h>
+#include <stdlib.h>
+#endif
+#include "cpu.h"
+#include "misc.h"
+
+#if CRYPTOPP_ARM_AES_AVAILABLE
+
+#include <arm_neon.h>
+
+// Single block encryption operations
+VC_INLINE void aes_enc_block(uint8x16_t* B, uint8x16_t K)
+{
+ *B = vaesmcq_u8(vaeseq_u8(*B, K));
+}
+
+VC_INLINE void aes_enc_block_last(uint8x16_t* B, uint8x16_t K, uint8x16_t K2)
+{
+ *B = veorq_u8(vaeseq_u8(*B, K), K2);
+}
+
+// 4-block parallel encryption operations
+VC_INLINE void aes_enc_4_blocks(uint8x16_t* B0, uint8x16_t* B1,
+ uint8x16_t* B2, uint8x16_t* B3, uint8x16_t K)
+{
+ *B0 = vaesmcq_u8(vaeseq_u8(*B0, K));
+ *B1 = vaesmcq_u8(vaeseq_u8(*B1, K));
+ *B2 = vaesmcq_u8(vaeseq_u8(*B2, K));
+ *B3 = vaesmcq_u8(vaeseq_u8(*B3, K));
+}
+
+VC_INLINE void aes_enc_4_blocks_last(uint8x16_t* B0, uint8x16_t* B1,
+ uint8x16_t* B2, uint8x16_t* B3,
+ uint8x16_t K, uint8x16_t K2)
+{
+ *B0 = veorq_u8(vaeseq_u8(*B0, K), K2);
+ *B1 = veorq_u8(vaeseq_u8(*B1, K), K2);
+ *B2 = veorq_u8(vaeseq_u8(*B2, K), K2);
+ *B3 = veorq_u8(vaeseq_u8(*B3, K), K2);
+}
+
+// Single block decryption operations
+VC_INLINE void aes_dec_block(uint8x16_t* B, uint8x16_t K)
+{
+ *B = vaesimcq_u8(vaesdq_u8(*B, K));
+}
+
+VC_INLINE void aes_dec_block_last(uint8x16_t* B, uint8x16_t K, uint8x16_t K2)
+{
+ *B = veorq_u8(vaesdq_u8(*B, K), K2);
+}
+
+// 4-block parallel decryption operations
+VC_INLINE void aes_dec_4_blocks(uint8x16_t* B0, uint8x16_t* B1,
+ uint8x16_t* B2, uint8x16_t* B3, uint8x16_t K)
+{
+ *B0 = vaesimcq_u8(vaesdq_u8(*B0, K));
+ *B1 = vaesimcq_u8(vaesdq_u8(*B1, K));
+ *B2 = vaesimcq_u8(vaesdq_u8(*B2, K));
+ *B3 = vaesimcq_u8(vaesdq_u8(*B3, K));
+}
+
+VC_INLINE void aes_dec_4_blocks_last(uint8x16_t* B0, uint8x16_t* B1,
+ uint8x16_t* B2, uint8x16_t* B3,
+ uint8x16_t K, uint8x16_t K2)
+{
+ *B0 = veorq_u8(vaesdq_u8(*B0, K), K2);
+ *B1 = veorq_u8(vaesdq_u8(*B1, K), K2);
+ *B2 = veorq_u8(vaesdq_u8(*B2, K), K2);
+ *B3 = veorq_u8(vaesdq_u8(*B3, K), K2);
+}
+
+VC_INLINE void aes256_hw_encrypt_blocks(uint8 buffer[], size_t blocks, const uint8* ks)
+{
+ const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
+ const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
+ const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
+ const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
+ const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
+ const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
+ const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
+ const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
+ const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
+ const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
+ const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
+ const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
+ const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
+ const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
+ const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
+
+ while(blocks >= 4) {
+ uint8x16_t B0 = vld1q_u8(buffer);
+ uint8x16_t B1 = vld1q_u8(buffer + 16);
+ uint8x16_t B2 = vld1q_u8(buffer + 32);
+ uint8x16_t B3 = vld1q_u8(buffer + 48);
+
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K0);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K1);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K2);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K3);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K4);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K5);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K6);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K7);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K8);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K9);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K10);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K11);
+ aes_enc_4_blocks(&B0, &B1, &B2, &B3, K12);
+ aes_enc_4_blocks_last(&B0, &B1, &B2, &B3, K13, K14);
+
+ vst1q_u8(buffer, B0);
+ vst1q_u8(buffer + 16, B1);
+ vst1q_u8(buffer + 32, B2);
+ vst1q_u8(buffer + 48, B3);
+
+ buffer += 16 * 4;
+ blocks -= 4;
+ }
+
+ for(size_t i = 0; i != blocks; ++i) {
+ uint8x16_t B = vld1q_u8(buffer + 16 * i);
+ aes_enc_block(&B, K0);
+ aes_enc_block(&B, K1);
+ aes_enc_block(&B, K2);
+ aes_enc_block(&B, K3);
+ aes_enc_block(&B, K4);
+ aes_enc_block(&B, K5);
+ aes_enc_block(&B, K6);
+ aes_enc_block(&B, K7);
+ aes_enc_block(&B, K8);
+ aes_enc_block(&B, K9);
+ aes_enc_block(&B, K10);
+ aes_enc_block(&B, K11);
+ aes_enc_block(&B, K12);
+ aes_enc_block_last(&B, K13, K14);
+ vst1q_u8(buffer + 16 * i, B);
+ }
+}
+
+VC_INLINE void aes256_hw_encrypt_block(uint8 buffer[], const uint8* ks)
+{
+ const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
+ const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
+ const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
+ const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
+ const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
+ const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
+ const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
+ const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
+ const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
+ const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
+ const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
+ const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
+ const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
+ const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
+ const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
+
+ uint8x16_t B = vld1q_u8(buffer);
+ aes_enc_block(&B, K0);
+ aes_enc_block(&B, K1);
+ aes_enc_block(&B, K2);
+ aes_enc_block(&B, K3);
+ aes_enc_block(&B, K4);
+ aes_enc_block(&B, K5);
+ aes_enc_block(&B, K6);
+ aes_enc_block(&B, K7);
+ aes_enc_block(&B, K8);
+ aes_enc_block(&B, K9);
+ aes_enc_block(&B, K10);
+ aes_enc_block(&B, K11);
+ aes_enc_block(&B, K12);
+ aes_enc_block_last(&B, K13, K14);
+ vst1q_u8(buffer, B);
+}
+
+VC_INLINE void aes256_hw_decrypt_blocks(uint8 buffer[], size_t blocks, const uint8* ks)
+{
+ const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
+ const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
+ const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
+ const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
+ const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
+ const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
+ const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
+ const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
+ const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
+ const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
+ const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
+ const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
+ const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
+ const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
+ const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
+
+ while(blocks >= 4) {
+ uint8x16_t B0 = vld1q_u8(buffer);
+ uint8x16_t B1 = vld1q_u8(buffer + 16);
+ uint8x16_t B2 = vld1q_u8(buffer + 32);
+ uint8x16_t B3 = vld1q_u8(buffer + 48);
+
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K0);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K1);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K2);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K3);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K4);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K5);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K6);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K7);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K8);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K9);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K10);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K11);
+ aes_dec_4_blocks(&B0, &B1, &B2, &B3, K12);
+ aes_dec_4_blocks_last(&B0, &B1, &B2, &B3, K13, K14);
+
+ vst1q_u8(buffer, B0);
+ vst1q_u8(buffer + 16, B1);
+ vst1q_u8(buffer + 32, B2);
+ vst1q_u8(buffer + 48, B3);
+
+ buffer += 16 * 4;
+ blocks -= 4;
+ }
+
+ for(size_t i = 0; i != blocks; ++i) {
+ uint8x16_t B = vld1q_u8(buffer + 16 * i);
+ aes_dec_block(&B, K0);
+ aes_dec_block(&B, K1);
+ aes_dec_block(&B, K2);
+ aes_dec_block(&B, K3);
+ aes_dec_block(&B, K4);
+ aes_dec_block(&B, K5);
+ aes_dec_block(&B, K6);
+ aes_dec_block(&B, K7);
+ aes_dec_block(&B, K8);
+ aes_dec_block(&B, K9);
+ aes_dec_block(&B, K10);
+ aes_dec_block(&B, K11);
+ aes_dec_block(&B, K12);
+ aes_dec_block_last(&B, K13, K14);
+ vst1q_u8(buffer + 16 * i, B);
+ }
+}
+
+VC_INLINE void aes256_hw_decrypt_block(uint8 buffer[], const uint8* ks)
+{
+ const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
+ const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
+ const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
+ const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
+ const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
+ const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
+ const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
+ const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
+ const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
+ const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
+ const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
+ const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
+ const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
+ const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
+ const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
+
+ uint8x16_t B = vld1q_u8(buffer);
+ aes_dec_block(&B, K0);
+ aes_dec_block(&B, K1);
+ aes_dec_block(&B, K2);
+ aes_dec_block(&B, K3);
+ aes_dec_block(&B, K4);
+ aes_dec_block(&B, K5);
+ aes_dec_block(&B, K6);
+ aes_dec_block(&B, K7);
+ aes_dec_block(&B, K8);
+ aes_dec_block(&B, K9);
+ aes_dec_block(&B, K10);
+ aes_dec_block(&B, K11);
+ aes_dec_block(&B, K12);
+ aes_dec_block_last(&B, K13, K14);
+ vst1q_u8(buffer, B);
+}
+
+void aes_hw_cpu_decrypt (const uint8 *ks, uint8 *data)
+{
+ aes256_hw_decrypt_block(data, ks);
+}
+
+void aes_hw_cpu_decrypt_32_blocks (const uint8 *ks, uint8 *data)
+{
+ aes256_hw_decrypt_blocks(data, 32, ks);
+}
+
+void aes_hw_cpu_encrypt (const uint8 *ks, uint8 *data)
+{
+ aes256_hw_encrypt_block(data, ks);
+}
+
+void aes_hw_cpu_encrypt_32_blocks (const uint8 *ks, uint8 *data)
+{
+ aes256_hw_encrypt_blocks(data, 32, ks);
+}
+
+#endif
diff --git a/src/Crypto/Aes_hw_cpu.h b/src/Crypto/Aes_hw_cpu.h
index face0a0c..d9dda1af 100644
--- a/src/Crypto/Aes_hw_cpu.h
+++ b/src/Crypto/Aes_hw_cpu.h
@@ -4,7 +4,7 @@
by the TrueCrypt License 3.0.
Modifications and additions to the original source code (contained in this file)
- and all other portions of this file are Copyright (c) 2013-2017 IDRIX
+ and all other portions of this file are Copyright (c) 2013-2025 IDRIX
and are governed by the Apache License 2.0 the full text of which is
contained in the file License.txt included in VeraCrypt binary and source
code distribution packages.
@@ -22,8 +22,8 @@ extern "C"
#if defined (TC_WINDOWS_BOOT)
uint8 is_aes_hw_cpu_supported ();
-#endif
void aes_hw_cpu_enable_sse ();
+#endif
void aes_hw_cpu_decrypt (const uint8 *ks, uint8 *data);
void VC_CDECL aes_hw_cpu_decrypt_32_blocks (const uint8 *ks, uint8 *data);
void aes_hw_cpu_encrypt (const uint8 *ks, uint8 *data);
diff --git a/src/Crypto/Crypto.vcxproj b/src/Crypto/Crypto.vcxproj
index 4aebc084..cad50c06 100644
--- a/src/Crypto/Crypto.vcxproj
+++ b/src/Crypto/Crypto.vcxproj
@@ -226,6 +226,10 @@
</ClCompile>
<ClCompile Include="Aeskey.c" />
<ClCompile Include="Aestab.c" />
+ <ClCompile Include="Aes_hw_armv8.c">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </ClCompile>
<ClCompile Include="blake2s.c" />
<ClCompile Include="blake2s_SSE2.c" />
<ClCompile Include="blake2s_SSE41.c" />
@@ -251,6 +255,10 @@
<ClCompile Include="SerpentFast.c" />
<ClCompile Include="SerpentFast_simd.cpp" />
<ClCompile Include="Sha2.c" />
+ <ClCompile Include="sha256_armv8.c">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </ClCompile>
<ClCompile Include="Sha2Intel.c" />
<ClCompile Include="Streebog.c" />
<ClCompile Include="t1ha2.c" />
diff --git a/src/Crypto/Crypto.vcxproj.filters b/src/Crypto/Crypto.vcxproj.filters
index 3d384f97..099c3ce4 100644
--- a/src/Crypto/Crypto.vcxproj.filters
+++ b/src/Crypto/Crypto.vcxproj.filters
@@ -90,6 +90,15 @@
<ClCompile Include="Sha2Intel.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="Aescrypt.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="Aes_hw_armv8.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="sha256_armv8.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Aes.h">
@@ -167,15 +176,6 @@
<ClInclude Include="t1ha_selfcheck.h">
<Filter>Header Files</Filter>
</ClInclude>
- <ClInclude Include="blake2s-load-sse2.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="blake2s-load-sse41.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="blake2s-round.h">
- <Filter>Header Files</Filter>
- </ClInclude>
</ItemGroup>
<ItemGroup>
<CustomBuild Include="Aes_hw_cpu.asm">
diff --git a/src/Crypto/Sha2.c b/src/Crypto/Sha2.c
index 5ae9cae2..27e61c3d 100644
--- a/src/Crypto/Sha2.c
+++ b/src/Crypto/Sha2.c
@@ -315,6 +315,10 @@ extern "C"
void VC_CDECL sha256_compress_nayuki(uint_32t state[8], const uint_8t block[64]);
#endif
+#if CRYPTOPP_ARM_SHA2_AVAILABLE
+ void sha256_compress_digest_armv8(const void* input_data, uint_32t digest[8], uint_64t num_blks);
+#endif
+
#if defined(__cplusplus)
}
#endif
@@ -757,6 +761,13 @@ void SSE2Sha256Transform(sha256_ctx* ctx, void* mp, uint_64t num_blks)
}
#endif
+#if CRYPTOPP_ARM_SHA2_AVAILABLE
+void ArmSha256Transform(sha256_ctx* ctx, void* mp, uint_64t num_blks)
+{
+ sha256_compress_digest_armv8(mp, ctx->hash, num_blks);
+}
+#endif
+
#if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
void Sha256AsmTransform(sha256_ctx* ctx, void* mp, uint_64t num_blks)
{
@@ -805,6 +816,12 @@ void sha256_begin(sha256_ctx* ctx)
else
#endif
+#if CRYPTOPP_ARM_SHA2_AVAILABLE
+ if (HasSHA256())
+ sha256transfunc = ArmSha256Transform;
+ else
+#endif
+
#if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32
sha256transfunc = Sha256AsmTransform;
#else
diff --git a/src/Crypto/Sha2.h b/src/Crypto/Sha2.h
index 1fbcb8d1..42d067fa 100644
--- a/src/Crypto/Sha2.h
+++ b/src/Crypto/Sha2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 IDRIX
+ * Copyright (c) 2013-2025 IDRIX
* Governed by the Apache License 2.0 the full text of which is contained
* in the file License.txt included in VeraCrypt binary and source
* code distribution packages.
diff --git a/src/Crypto/config.h b/src/Crypto/config.h
index 1c2aff72..f485c07e 100644
--- a/src/Crypto/config.h
+++ b/src/Crypto/config.h
@@ -29,6 +29,11 @@
#define CRYPTOPP_CLANG_INTEGRATED_ASSEMBLER 1
#endif
+#if defined(_MSC_VER) && !defined(__clang__)
+# undef CRYPTOPP_LLVM_CLANG_VERSION
+# define CRYPTOPP_MSC_VERSION (_MSC_VER)
+#endif
+
// Clang due to "Inline assembly operands don't work with .intel_syntax", http://llvm.org/bugs/show_bug.cgi?id=24232
// TODO: supply the upper version when LLVM fixes it. We set it to 20.0 for compilation purposes.
#if (defined(CRYPTOPP_LLVM_CLANG_VERSION) && CRYPTOPP_LLVM_CLANG_VERSION <= 200000) || (defined(CRYPTOPP_APPLE_CLANG_VERSION) && CRYPTOPP_APPLE_CLANG_VERSION <= 200000) || defined(CRYPTOPP_CLANG_INTEGRATED_ASSEMBLER)
@@ -201,6 +206,53 @@
#define CRYPTOPP_BOOL_X64 0
#endif
+#if defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64)
+ #define CRYPTOPP_BOOL_ARMV8 1
+ #define CRYPTOPP_BOOL_ARM64 1
+#else
+ #define CRYPTOPP_BOOL_ARMV8 0
+ #define CRYPTOPP_BOOL_ARM64 0
+#endif
+
+// ARMv8 and ASIMD. -march=armv8-a or above must be present
+// Requires GCC 4.8, Clang 3.3 or Visual Studio 2017
+// Do not use APPLE_CLANG_VERSION; use __ARM_FEATURE_XXX instead.
+#if !defined(CRYPTOPP_ARM_ASIMD_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ARM_ASIMD)
+# if defined(__aarch32__) || defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64)
+# if defined(__ARM_NEON) || defined(__ARM_ASIMD) || defined(__ARM_FEATURE_NEON) || defined(__ARM_FEATURE_ASIMD) || \
+ (CRYPTOPP_GCC_VERSION >= 40800) || (CRYPTOPP_LLVM_CLANG_VERSION >= 30300) || \
+ (CRYPTOPP_APPLE_CLANG_VERSION >= 40000) || (CRYPTOPP_MSC_VERSION >= 1916)
+# define CRYPTOPP_ARM_NEON_AVAILABLE 1
+# define CRYPTOPP_ARM_ASIMD_AVAILABLE 1
+# endif // Compilers
+# endif // Platforms
+#endif
+
+// ARMv8 and AES. -march=armv8-a+crypto or above must be present
+// Requires GCC 4.8, Clang 3.3 or Visual Studio 2017
+#if !defined(CRYPTOPP_ARM_AES_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ARM_AES)
+# if defined(__aarch32__) || defined(__aarch64__) || defined(_M_ARM64)
+# if defined(__ARM_FEATURE_CRYPTO) || (CRYPTOPP_GCC_VERSION >= 40800) || \
+ (CRYPTOPP_LLVM_CLANG_VERSION >= 30300) || (CRYPTOPP_APPLE_CLANG_VERSION >= 40300) || \
+ (CRYPTOPP_MSC_VERSION >= 1916)
+# define CRYPTOPP_ARM_AES_AVAILABLE 1
+# endif // Compilers
+# endif // Platforms
+#endif
+
+// ARMv8 and SHA-1, SHA-256. -march=armv8-a+crypto or above must be present
+// Requires GCC 4.8, Clang 3.3 or Visual Studio 2017
+#if !defined(CRYPTOPP_ARM_SHA_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ARM_SHA)
+# if defined(__aarch32__) || defined(__aarch64__) || defined(_M_ARM64)
+# if defined(__ARM_FEATURE_CRYPTO) || (CRYPTOPP_GCC_VERSION >= 40800) || \
+ (CRYPTOPP_LLVM_CLANG_VERSION >= 30300) || (CRYPTOPP_APPLE_CLANG_VERSION >= 40300) || \
+ (CRYPTOPP_MSC_VERSION >= 1916)
+# define CRYPTOPP_ARM_SHA1_AVAILABLE 1
+# define CRYPTOPP_ARM_SHA2_AVAILABLE 1
+# endif // Compilers
+# endif // Platforms
+#endif
+
// Undo the ASM and Intrinsic related defines due to X32.
#if CRYPTOPP_BOOL_X32
# undef CRYPTOPP_BOOL_X64
diff --git a/src/Crypto/cpu.c b/src/Crypto/cpu.c
index e611e9bb..a5b5bb19 100644
--- a/src/Crypto/cpu.c
+++ b/src/Crypto/cpu.c
@@ -469,3 +469,72 @@ void DisableCPUExtendedFeatures ()
#endif
+#if CRYPTOPP_BOOL_ARMV8
+#if defined(__linux__) && defined(__aarch64__)
+#include <sys/auxv.h>
+#ifndef HWCAP_AES
+# define HWCAP_AES (1 << 3)
+#endif
+#ifndef HWCAP_SHA2
+# define HWCAP_SHA2 (1 << 6)
+#endif
+#endif
+
+volatile int g_hasAESARM = 0;
+volatile int g_hasSHA256ARM = 0;
+
+inline int CPU_QueryAES()
+{
+#if defined(CRYPTOPP_ARM_AES_AVAILABLE)
+#if defined(__linux__) && defined(__aarch64__)
+ if ((getauxval(AT_HWCAP) & HWCAP_AES) != 0)
+ return 1;
+#elif defined(__APPLE__) && defined(__aarch64__)
+ // Apple Sillcon (M1) and later
+ return 1;
+#elif defined(_WIN32) && defined(_M_ARM64)
+#ifdef TC_WINDOWS_DRIVER
+ if (ExIsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0)
+ return 1;
+#else
+ if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0)
+ return 1;
+#endif
+#endif
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+inline int CPU_QuerySHA2()
+{
+#if defined(CRYPTOPP_ARM_SHA2_AVAILABLE)
+#if defined(__linux__) && defined(__aarch64__)
+ if ((getauxval(AT_HWCAP) & HWCAP_SHA2) != 0)
+ return 1;
+#elif defined(__APPLE__) && defined(__aarch64__)
+ // Apple Sillcon (M1) and later
+ return 1;
+#elif defined(_WIN32) && defined(_M_ARM64)
+#ifdef TC_WINDOWS_DRIVER
+ if (ExIsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0)
+ return 1;
+#else
+ if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0)
+ return 1;
+#endif
+#endif
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+void DetectArmFeatures()
+{
+ g_hasAESARM = CPU_QueryAES();
+ g_hasSHA256ARM = CPU_QuerySHA2();
+}
+
+#endif \ No newline at end of file
diff --git a/src/Crypto/cpu.h b/src/Crypto/cpu.h
index b0df6462..cb34ad1f 100644
--- a/src/Crypto/cpu.h
+++ b/src/Crypto/cpu.h
@@ -288,6 +288,26 @@ void DisableCPUExtendedFeatures ();
}
#endif
+#elif CRYPTOPP_BOOL_ARMV8
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if !defined(CRYPTOPP_DISABLE_AESNI) && !defined(WOLFCRYPT_BACKEND)
+#define TC_AES_HW_CPU
+#endif
+
+extern volatile int g_hasAESARM;
+extern volatile int g_hasSHA256ARM;
+void DetectArmFeatures();
+
+#define HasAESNI() g_hasAESARM
+#define HasSHA256() g_hasSHA256ARM
+
+#if defined(__cplusplus)
+}
+#endif
+
#else
#define HasSSE2() 0
diff --git a/src/Crypto/sha256_armv8.c b/src/Crypto/sha256_armv8.c
new file mode 100644
index 00000000..1599350a
--- /dev/null
+++ b/src/Crypto/sha256_armv8.c
@@ -0,0 +1,184 @@
+/*
+* SHA-256 using CPU instructions in ARMv8
+*
+* Contributed by Jeffrey Walton. Based on public domain code by
+* Johannes Schneiders, Skip Hovsmith and Barry O'Rourke.
+*
+* Further changes (C) 2020 Jack Lloyd
+*
+* Botan is released under the Simplified BSD License (see license.txt)
+*/
+
+/* Modified and adapted for VeraCrypt */
+
+#include "Common/Tcdefs.h"
+#if !defined(_UEFI)
+#include <memory.h>
+#include <stdlib.h>
+#endif
+#include "cpu.h"
+#include "misc.h"
+
+#if CRYPTOPP_ARM_SHA2_AVAILABLE
+
+#include <arm_neon.h>
+
+CRYPTOPP_ALIGN_DATA(64) static const uint32 K[] = {
+ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
+ 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
+ 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
+ 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
+ 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
+ 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
+ 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
+ 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
+};
+
+void sha256_compress_digest_armv8(void* input_data, uint32 digest[8], uint64 num_blks) {
+
+
+ // Load initial values
+ uint32x4_t STATE0 = vld1q_u32(&digest[0]);
+ uint32x4_t STATE1 = vld1q_u32(&digest[4]);
+
+ // Intermediate void* cast due to https://llvm.org/bugs/show_bug.cgi?id=20670
+ const uint32* input32 = (const uint32*)(const void*)input_data;
+
+ while (num_blks > 0) {
+ // Save current state
+ const uint32x4_t ABCD_SAVE = STATE0;
+ const uint32x4_t EFGH_SAVE = STATE1;
+
+ uint32x4_t MSG0 = vld1q_u32(input32 + 0);
+ uint32x4_t MSG1 = vld1q_u32(input32 + 4);
+ uint32x4_t MSG2 = vld1q_u32(input32 + 8);
+ uint32x4_t MSG3 = vld1q_u32(input32 + 12);
+
+ MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
+ MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
+ MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
+ MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
+
+ uint32x4_t MSG_K, TSTATE;
+
+ // Rounds 0-3
+ MSG_K = vaddq_u32(MSG0, vld1q_u32(&K[4 * 0]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG0 = vsha256su1q_u32(vsha256su0q_u32(MSG0, MSG1), MSG2, MSG3);
+
+ // Rounds 4-7
+ MSG_K = vaddq_u32(MSG1, vld1q_u32(&K[4 * 1]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG1 = vsha256su1q_u32(vsha256su0q_u32(MSG1, MSG2), MSG3, MSG0);
+
+ // Rounds 8-11
+ MSG_K = vaddq_u32(MSG2, vld1q_u32(&K[4 * 2]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG2 = vsha256su1q_u32(vsha256su0q_u32(MSG2, MSG3), MSG0, MSG1);
+
+ // Rounds 12-15
+ MSG_K = vaddq_u32(MSG3, vld1q_u32(&K[4 * 3]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG3 = vsha256su1q_u32(vsha256su0q_u32(MSG3, MSG0), MSG1, MSG2);
+
+ // Rounds 16-19
+ MSG_K = vaddq_u32(MSG0, vld1q_u32(&K[4 * 4]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG0 = vsha256su1q_u32(vsha256su0q_u32(MSG0, MSG1), MSG2, MSG3);
+
+ // Rounds 20-23
+ MSG_K = vaddq_u32(MSG1, vld1q_u32(&K[4 * 5]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG1 = vsha256su1q_u32(vsha256su0q_u32(MSG1, MSG2), MSG3, MSG0);
+
+ // Rounds 24-27
+ MSG_K = vaddq_u32(MSG2, vld1q_u32(&K[4 * 6]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG2 = vsha256su1q_u32(vsha256su0q_u32(MSG2, MSG3), MSG0, MSG1);
+
+ // Rounds 28-31
+ MSG_K = vaddq_u32(MSG3, vld1q_u32(&K[4 * 7]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG3 = vsha256su1q_u32(vsha256su0q_u32(MSG3, MSG0), MSG1, MSG2);
+
+ // Rounds 32-35
+ MSG_K = vaddq_u32(MSG0, vld1q_u32(&K[4 * 8]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG0 = vsha256su1q_u32(vsha256su0q_u32(MSG0, MSG1), MSG2, MSG3);
+
+ // Rounds 36-39
+ MSG_K = vaddq_u32(MSG1, vld1q_u32(&K[4 * 9]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG1 = vsha256su1q_u32(vsha256su0q_u32(MSG1, MSG2), MSG3, MSG0);
+
+ // Rounds 40-43
+ MSG_K = vaddq_u32(MSG2, vld1q_u32(&K[4 * 10]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG2 = vsha256su1q_u32(vsha256su0q_u32(MSG2, MSG3), MSG0, MSG1);
+
+ // Rounds 44-47
+ MSG_K = vaddq_u32(MSG3, vld1q_u32(&K[4 * 11]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+ MSG3 = vsha256su1q_u32(vsha256su0q_u32(MSG3, MSG0), MSG1, MSG2);
+
+ // Rounds 48-51
+ MSG_K = vaddq_u32(MSG0, vld1q_u32(&K[4 * 12]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+
+ // Rounds 52-55
+ MSG_K = vaddq_u32(MSG1, vld1q_u32(&K[4 * 13]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+
+ // Rounds 56-59
+ MSG_K = vaddq_u32(MSG2, vld1q_u32(&K[4 * 14]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+
+ // Rounds 60-63
+ MSG_K = vaddq_u32(MSG3, vld1q_u32(&K[4 * 15]));
+ TSTATE = vsha256hq_u32(STATE0, STATE1, MSG_K);
+ STATE1 = vsha256h2q_u32(STATE1, STATE0, MSG_K);
+ STATE0 = TSTATE;
+
+ // Add back to state
+ STATE0 = vaddq_u32(STATE0, ABCD_SAVE);
+ STATE1 = vaddq_u32(STATE1, EFGH_SAVE);
+
+ input32 += 64 / 4;
+ num_blks--;
+ }
+
+ // Save state
+ vst1q_u32(&digest[0], STATE0);
+ vst1q_u32(&digest[4], STATE1);
+}
+#endif