diff options
author | Mounir IDRASSI <mounir.idrassi@idrix.fr> | 2024-11-10 21:08:00 +0100 |
---|---|---|
committer | Mounir IDRASSI <mounir.idrassi@idrix.fr> | 2024-11-10 21:08:00 +0100 |
commit | 04c747fb2df007eddc27e515acaa91f8993a70af (patch) | |
tree | f1ecf9078e500f022d78f2441834bcaf3985ccd1 /src/Crypto | |
parent | fcc0c8283679c696a9938791bf6f7a3ea2921716 (diff) | |
download | VeraCrypt-04c747fb2df007eddc27e515acaa91f8993a70af.tar.gz VeraCrypt-04c747fb2df007eddc27e515acaa91f8993a70af.zip |
Add support for SHA-256 x86 instrinsic for enhance performance of PBKDF2-HMAC-SHA256
Diffstat (limited to 'src/Crypto')
-rw-r--r-- | src/Crypto/Crypto.vcxproj | 1 | ||||
-rw-r--r-- | src/Crypto/Crypto.vcxproj.filters | 3 | ||||
-rw-r--r-- | src/Crypto/Crypto_vs2019.vcxproj | 1 | ||||
-rw-r--r-- | src/Crypto/Sha2.c | 15 | ||||
-rw-r--r-- | src/Crypto/Sha2Intel.c | 218 | ||||
-rw-r--r-- | src/Crypto/Sources | 1 | ||||
-rw-r--r-- | src/Crypto/config.h | 9 | ||||
-rw-r--r-- | src/Crypto/cpu.c | 36 | ||||
-rw-r--r-- | src/Crypto/cpu.h | 18 |
9 files changed, 302 insertions, 0 deletions
diff --git a/src/Crypto/Crypto.vcxproj b/src/Crypto/Crypto.vcxproj index 97a472f7..0f79395f 100644 --- a/src/Crypto/Crypto.vcxproj +++ b/src/Crypto/Crypto.vcxproj @@ -223,6 +223,7 @@ <ClCompile Include="SerpentFast.c" /> <ClCompile Include="SerpentFast_simd.cpp" /> <ClCompile Include="Sha2.c" /> + <ClCompile Include="Sha2Intel.c" /> <ClCompile Include="Streebog.c" /> <ClCompile Include="t1ha2.c" /> <ClCompile Include="t1ha2_selfcheck.c" /> diff --git a/src/Crypto/Crypto.vcxproj.filters b/src/Crypto/Crypto.vcxproj.filters index 5d149bdd..3d384f97 100644 --- a/src/Crypto/Crypto.vcxproj.filters +++ b/src/Crypto/Crypto.vcxproj.filters @@ -87,6 +87,9 @@ <ClCompile Include="blake2s_SSSE3.c"> <Filter>Source Files</Filter> </ClCompile> + <ClCompile Include="Sha2Intel.c"> + <Filter>Source Files</Filter> + </ClCompile> </ItemGroup> <ItemGroup> <ClInclude Include="Aes.h"> diff --git a/src/Crypto/Crypto_vs2019.vcxproj b/src/Crypto/Crypto_vs2019.vcxproj index ab3d5f3b..0abf740a 100644 --- a/src/Crypto/Crypto_vs2019.vcxproj +++ b/src/Crypto/Crypto_vs2019.vcxproj @@ -339,6 +339,7 @@ <ClCompile Include="SerpentFast.c" /> <ClCompile Include="SerpentFast_simd.cpp" /> <ClCompile Include="Sha2.c" /> + <ClCompile Include="Sha2Intel.c" /> <ClCompile Include="Streebog.c" /> <ClCompile Include="t1ha2.c" /> <ClCompile Include="t1ha2_selfcheck.c" /> diff --git a/src/Crypto/Sha2.c b/src/Crypto/Sha2.c index 3cce21d7..5ae9cae2 100644 --- a/src/Crypto/Sha2.c +++ b/src/Crypto/Sha2.c @@ -306,6 +306,9 @@ extern "C" void sha256_sse4(void *input_data, uint_32t digest[8], uint_64t num_blks); void sha256_rorx(void *input_data, uint_32t digest[8], uint_64t num_blks); void sha256_avx(void *input_data, uint_32t digest[8], uint_64t num_blks); +#if CRYPTOPP_SHANI_AVAILABLE + void sha256_intel(void *input_data, uint_32t digest[8], uint_64t num_blks); +#endif #endif #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32 @@ -717,6 +720,13 @@ void StdSha256Transform(sha256_ctx* ctx, void* mp, uint_64t num_blks) #ifndef NO_OPTIMIZED_VERSIONS #if CRYPTOPP_BOOL_X64 +#if CRYPTOPP_SHANI_AVAILABLE +void IntelSha256Transform(sha256_ctx* ctx, void* mp, uint_64t num_blks) +{ + sha256_intel(mp, ctx->hash, num_blks); +} +#endif + void Avx2Sha256Transform(sha256_ctx* ctx, void* mp, uint_64t num_blks) { if (num_blks > 1) @@ -775,6 +785,11 @@ void sha256_begin(sha256_ctx* ctx) { #ifndef NO_OPTIMIZED_VERSIONS #if CRYPTOPP_BOOL_X64 +#if CRYPTOPP_SHANI_AVAILABLE + if (HasSHA256()) + sha256transfunc = IntelSha256Transform; + else +#endif if (g_isIntel && HasSAVX2() && HasSBMI2()) sha256transfunc = Avx2Sha256Transform; else if (g_isIntel && HasSAVX()) diff --git a/src/Crypto/Sha2Intel.c b/src/Crypto/Sha2Intel.c new file mode 100644 index 00000000..c926f76a --- /dev/null +++ b/src/Crypto/Sha2Intel.c @@ -0,0 +1,218 @@ +/* +* Support for SHA-256 x86 instrinsic +* Based on public domain code by Sean Gulley +* (https://github.com/mitls/hacl-star/tree/master/experimental/hash) +* +* Botan is released under the Simplified BSD License (see license.txt) +*/ + +/* November 10th 2024: Modified for VeraCrypt */ + +#include "Sha2.h" +#include "Common/Endian.h" +#include "cpu.h" +#include "misc.h" + +#if defined(_UEFI) || defined(CRYPTOPP_DISABLE_ASM) +#define NO_OPTIMIZED_VERSIONS +#endif + +#ifndef NO_OPTIMIZED_VERSIONS + +#if CRYPTOPP_SHANI_AVAILABLE + +// +void sha256_intel(void *mp, uint_32t state[8], uint_64t num_blks) +{ + // Constants table - align for better performance + CRYPTOPP_ALIGN_DATA(64) + static const uint_32t K[64] = { + 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, + 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, + 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, + 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, + 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, + 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, + 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, + 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, + }; + + const __m128i* K_mm = (const __m128i*)K; + const __m128i* input_mm = (const __m128i*)mp; + + // Create byte shuffle mask for big-endian to little-endian conversion + const __m128i MASK = _mm_set_epi64x(0x0c0d0e0f08090a0b, 0x0405060700010203); + + // Load initial values + __m128i STATE0 = _mm_loadu_si128((__m128i*)&state[0]); + __m128i STATE1 = _mm_loadu_si128((__m128i*)&state[4]); + + // Adjust byte ordering + STATE0 = _mm_shuffle_epi32(STATE0, 0xB1); // CDAB + STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH + + __m128i TMP = _mm_alignr_epi8(STATE0, STATE1, 8); // ABEF + STATE1 = _mm_blend_epi16(STATE1, STATE0, 0xF0); // CDGH + STATE0 = TMP; + + while(num_blks > 0) { + // Save current state + const __m128i ABEF_SAVE = STATE0; + const __m128i CDGH_SAVE = STATE1; + + __m128i MSG; + + __m128i TMSG0 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm), MASK); + __m128i TMSG1 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm + 1), MASK); + __m128i TMSG2 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm + 2), MASK); + __m128i TMSG3 = _mm_shuffle_epi8(_mm_loadu_si128(input_mm + 3), MASK); + + // Rounds 0-3 + MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + // Rounds 4-7 + MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 1)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1); + + // Rounds 8-11 + MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 2)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2); + + // Rounds 12-15 + MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 3)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG0 = _mm_add_epi32(TMSG0, _mm_alignr_epi8(TMSG3, TMSG2, 4)); + TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3); + TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3); + + // Rounds 16-19 + MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm + 4)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG1 = _mm_add_epi32(TMSG1, _mm_alignr_epi8(TMSG0, TMSG3, 4)); + TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0); + TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0); + + // Rounds 20-23 + MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 5)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG2 = _mm_add_epi32(TMSG2, _mm_alignr_epi8(TMSG1, TMSG0, 4)); + TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1); + TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1); + + // Rounds 24-27 + MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 6)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG3 = _mm_add_epi32(TMSG3, _mm_alignr_epi8(TMSG2, TMSG1, 4)); + TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2); + TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2); + + // Rounds 28-31 + MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 7)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG0 = _mm_add_epi32(TMSG0, _mm_alignr_epi8(TMSG3, TMSG2, 4)); + TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3); + TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3); + + // Rounds 32-35 + MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm + 8)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG1 = _mm_add_epi32(TMSG1, _mm_alignr_epi8(TMSG0, TMSG3, 4)); + TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0); + TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0); + + // Rounds 36-39 + MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 9)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG2 = _mm_add_epi32(TMSG2, _mm_alignr_epi8(TMSG1, TMSG0, 4)); + TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1); + TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1); + + // Rounds 40-43 + MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 10)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG3 = _mm_add_epi32(TMSG3, _mm_alignr_epi8(TMSG2, TMSG1, 4)); + TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2); + TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2); + + // Rounds 44-47 + MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 11)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG0 = _mm_add_epi32(TMSG0, _mm_alignr_epi8(TMSG3, TMSG2, 4)); + TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3); + TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3); + + // Rounds 48-51 + MSG = _mm_add_epi32(TMSG0, _mm_load_si128(K_mm + 12)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG1 = _mm_add_epi32(TMSG1, _mm_alignr_epi8(TMSG0, TMSG3, 4)); + TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0); + TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0); + + // Rounds 52-55 + MSG = _mm_add_epi32(TMSG1, _mm_load_si128(K_mm + 13)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG2 = _mm_add_epi32(TMSG2, _mm_alignr_epi8(TMSG1, TMSG0, 4)); + TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1); + + // Rounds 56-59 + MSG = _mm_add_epi32(TMSG2, _mm_load_si128(K_mm + 14)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + TMSG3 = _mm_add_epi32(TMSG3, _mm_alignr_epi8(TMSG2, TMSG1, 4)); + TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2); + + // Rounds 60-63 + MSG = _mm_add_epi32(TMSG3, _mm_load_si128(K_mm + 15)); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, _mm_shuffle_epi32(MSG, 0x0E)); + + // Add values back to state + STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE); + STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE); + + input_mm += 4; + num_blks--; + } + + // Shuffle state back to correct order + STATE0 = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA + STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG + + // Save state + _mm_storeu_si128((__m128i*)&state[0], _mm_blend_epi16(STATE0, STATE1, 0xF0)); // DCBA + _mm_storeu_si128((__m128i*)&state[4], _mm_alignr_epi8(STATE1, STATE0, 8)); // HGFE +} + +#endif +#endif diff --git a/src/Crypto/Sources b/src/Crypto/Sources index 9542d4b6..bd990382 100644 --- a/src/Crypto/Sources +++ b/src/Crypto/Sources @@ -39,6 +39,7 @@ SOURCES = \ SerpentFast.c \ SerpentFast_simd.cpp \ Sha2.c \ + Sha2Intel.c \ t1ha_selfcheck.c \ t1ha2.c \ t1ha2_selfcheck.c \ diff --git a/src/Crypto/config.h b/src/Crypto/config.h index d2a9cfea..774f7951 100644 --- a/src/Crypto/config.h +++ b/src/Crypto/config.h @@ -152,6 +152,15 @@ #define CRYPTOPP_BOOL_SSE41_INTRINSICS_AVAILABLE 0 #endif +#if !defined(CRYPTOPP_DISABLE_SHANI) && !defined(_M_ARM) && !defined(_M_ARM64) && !defined(__arm__) && !defined(__aarch64__) && !defined(__arm64__) && defined(CRYPTOPP_BOOL_SSE41_INTRINSICS_AVAILABLE) && \ + (defined(__SHA__) || (_MSC_VER >= 1900) || (__SUNPRO_CC >= 0x5160) || \ + (CRYPTOPP_GCC_VERSION >= 40900) || (__INTEL_COMPILER >= 1600) || \ + (CRYPTOPP_LLVM_CLANG_VERSION >= 30400) || (CRYPTOPP_APPLE_CLANG_VERSION >= 50100)) + #define CRYPTOPP_SHANI_AVAILABLE 1 +#else + #define CRYPTOPP_SHANI_AVAILABLE 0 +#endif + // how to allocate 16-byte aligned memory (for SSE2) #if defined(_MSC_VER) #define CRYPTOPP_MM_MALLOC_AVAILABLE diff --git a/src/Crypto/cpu.c b/src/Crypto/cpu.c index effde6ba..174596eb 100644 --- a/src/Crypto/cpu.c +++ b/src/Crypto/cpu.c @@ -17,6 +17,10 @@ #ifdef CRYPTOPP_CPUID_AVAILABLE +#if defined(__GNUC__) || defined(__clang__) + #include <cpuid.h> // for __get_cpuid and __get_cpuid_count +#endif + #if _MSC_VER >= 1400 && CRYPTOPP_BOOL_X64 int CpuId(uint32 input, uint32 output[4]) @@ -207,6 +211,7 @@ volatile int g_x86DetectionDone = 0; volatile int g_hasISSE = 0, g_hasSSE2 = 0, g_hasSSSE3 = 0, g_hasMMX = 0, g_hasAESNI = 0, g_hasCLMUL = 0, g_isP4 = 0; volatile int g_hasAVX = 0, g_hasAVX2 = 0, g_hasBMI2 = 0, g_hasSSE42 = 0, g_hasSSE41 = 0, g_isIntel = 0, g_isAMD = 0; volatile int g_hasRDRAND = 0, g_hasRDSEED = 0; +volatile int g_hasSHA256 = 0; volatile uint32 g_cacheLineSize = CRYPTOPP_L1_CACHE_LINE_SIZE; VC_INLINE int IsIntel(const uint32 output[4]) @@ -306,6 +311,35 @@ static int Detect_MS_HyperV_AES () #endif +static BOOL CheckSHA256Support() { +#if CRYPTOPP_BOOL_X64 && CRYPTOPP_SHANI_AVAILABLE +#if defined(_MSC_VER) // Windows with MSVC + int cpuInfo[4] = { 0 }; + __cpuidex(cpuInfo, 7, 0); + return (cpuInfo[1] & (1 << 29)) != 0? TRUE : FALSE; + +#elif defined(__GNUC__) || defined(__clang__) // Linux, FreeBSD, macOS with GCC/Clang + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + // First check if CPUID leaf 7 is supported + if (__get_cpuid(0, &eax, &ebx, &ecx, &edx)) { + if (eax >= 7) { + // Now check SHA-256 support in leaf 7, sub-leaf 0 + if (__get_cpuid_count(7, 0, &eax, &ebx, &ecx, &edx)) { + return (ebx & (1 << 29)) != 0? TRUE : FALSE; + } + } + } + return FALSE; + +#else + #error "Unsupported compiler" +#endif +#else + return FALSE; +#endif +} + + void DetectX86Features() { uint32 cpuid[4] = {0}, cpuid1[4] = {0}, cpuid2[4] = {0}; @@ -334,6 +368,7 @@ void DetectX86Features() g_hasAESNI = g_hasSSE2 && (cpuid1[2] & (1<<25)); #endif g_hasCLMUL = g_hasSSE2 && (cpuid1[2] & (1<<1)); + g_hasSHA256 = CheckSHA256Support(); #if !defined (_UEFI) && ((defined(__AES__) && defined(__PCLMUL__)) || defined(__INTEL_COMPILER) || CRYPTOPP_BOOL_AESNI_INTRINSICS_AVAILABLE) // Hypervisor = bit 31 of ECX of CPUID leaf 0x1 @@ -439,6 +474,7 @@ void DisableCPUExtendedFeatures () g_hasSSSE3 = 0; g_hasAESNI = 0; g_hasCLMUL = 0; + g_hasSHA256 = 0; } #endif diff --git a/src/Crypto/cpu.h b/src/Crypto/cpu.h index 2661bf1c..b0df6462 100644 --- a/src/Crypto/cpu.h +++ b/src/Crypto/cpu.h @@ -207,6 +207,22 @@ extern __m128i _mm_aesdeclast_si128(__m128i v, __m128i rkey); #endif #endif +#if CRYPTOPP_SHANI_AVAILABLE +#if defined(TC_WINDOWS_DRIVER) || defined (_UEFI) +#if defined(__cplusplus) +extern "C" { +#endif +extern __m128i __cdecl _mm_sha256msg1_epu32(__m128i, __m128i); +extern __m128i __cdecl _mm_sha256msg2_epu32(__m128i, __m128i); +extern __m128i __cdecl _mm_sha256rnds2_epu32(__m128i, __m128i, __m128i); +#if defined(__cplusplus) +} +#endif +#else +#include <immintrin.h> +#endif +#endif + #if CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X64 #if defined(__cplusplus) @@ -234,6 +250,7 @@ extern volatile int g_hasCLMUL; extern volatile int g_isP4; extern volatile int g_hasRDRAND; extern volatile int g_hasRDSEED; +extern volatile int g_hasSHA256; extern volatile int g_isIntel; extern volatile int g_isAMD; extern volatile uint32 g_cacheLineSize; @@ -262,6 +279,7 @@ void DisableCPUExtendedFeatures (); #define IsP4() g_isP4 #define HasRDRAND() g_hasRDRAND #define HasRDSEED() g_hasRDSEED +#define HasSHA256() g_hasSHA256 #define IsCpuIntel() g_isIntel #define IsCpuAMD() g_isAMD #define GetCacheLineSize() g_cacheLineSize |