Update slow-hash.c

1. Added huge pages support and optimized scratchpad twiddling. (credits to dga).
2. Added aes-ni key expansion support.
3. Minor speedup to scratchpad initialization/finalization.
This commit is contained in:
NoodleDoodleNoodleDoodleNoodleDoodleNoo 2014-06-16 01:58:17 -07:00
parent 9b7accb257
commit 37c5f393f1

View file

@ -11,143 +11,429 @@
#include "hash-ops.h" #include "hash-ops.h"
#include "oaes_lib.h" #include "oaes_lib.h"
static void (*const extra_hashes[4])(const void *, size_t, char *) = { #include <emmintrin.h>
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};
#define MEMORY (1 << 21) /* 2 MiB */ #if defined(_MSC_VER)
#include <intrin.h>
#include <Windows.h>
#define STATIC
#define INLINE __inline
#if !defined(RDATA_ALIGN16)
#define RDATA_ALIGN16 __declspec(align(16))
#endif
#else
#include <wmmintrin.h>
#include <sys/mman.h>
#define STATIC static
#define INLINE inline
#if !defined(RDATA_ALIGN16)
#define RDATA_ALIGN16 __attribute__ ((aligned(16)))
#endif
#endif
#if defined(__INTEL_COMPILER)
#define ASM __asm__
#elif !defined(_MSC_VER)
#define ASM __asm__
#else
#define ASM __asm
#endif
#define MEMORY (1 << 21) // 2MB scratchpad
#define ITER (1 << 20) #define ITER (1 << 20)
#define AES_BLOCK_SIZE 16 #define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32 /*16*/ #define AES_KEY_SIZE 32
#define INIT_SIZE_BLK 8 #define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) #define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
static size_t e2i(const uint8_t* a, size_t count) { return (*((uint64_t*)a) / AES_BLOCK_SIZE) & (count - 1); } #define U64(x) ((uint64_t *) (x))
#define R128(x) ((__m128i *) (x))
#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a)))
static void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) { #define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4)
uint64_t a0, b0; #if defined(_MSC_VER)
uint64_t hi, lo; #define __mul() lo = _umul128(c[0], b[0], &hi);
#else
#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc");
#endif
a0 = SWAP64LE(((uint64_t*)a)[0]); #define pre_aes() \
b0 = SWAP64LE(((uint64_t*)b)[0]); j = state_index(a); \
lo = mul128(a0, b0, &hi); _c = _mm_load_si128(R128(&hp_state[j])); \
((uint64_t*)res)[0] = SWAP64LE(hi); _a = _mm_load_si128(R128(a)); \
((uint64_t*)res)[1] = SWAP64LE(lo);
}
static void sum_half_blocks(uint8_t* a, const uint8_t* b) { // dga's optimized scratchpad twiddling
uint64_t a0, a1, b0, b1; #define post_aes() \
_mm_store_si128(R128(c), _c); \
_b = _mm_xor_si128(_b, _c); \
_mm_store_si128(R128(&hp_state[j]), _b); \
j = state_index(c); \
p = U64(&hp_state[j]); \
b[0] = p[0]; b[1] = p[1]; \
__mul(); \
a[0] += hi; a[1] += lo; \
p = U64(&hp_state[j]); \
p[0] = a[0]; p[1] = a[1]; \
a[0] ^= b[0]; a[1] ^= b[1]; \
_b = _c; \
a0 = SWAP64LE(((uint64_t*)a)[0]); #if defined(_MSC_VER)
a1 = SWAP64LE(((uint64_t*)a)[1]); #define THREADV __declspec(thread)
b0 = SWAP64LE(((uint64_t*)b)[0]); #else
b1 = SWAP64LE(((uint64_t*)b)[1]); #define THREADV __thread
a0 += b0; #endif
a1 += b1;
((uint64_t*)a)[0] = SWAP64LE(a0);
((uint64_t*)a)[1] = SWAP64LE(a1);
}
static void copy_block(uint8_t* dst, const uint8_t* src) { extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey);
memcpy(dst, src, AES_BLOCK_SIZE); extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey);
}
static void swap_blocks(uint8_t* a, uint8_t* b) {
size_t i;
uint8_t t;
for (i = 0; i < AES_BLOCK_SIZE; i++) {
t = a[i];
a[i] = b[i];
b[i] = t;
}
}
static void xor_blocks(uint8_t* a, const uint8_t* b) {
size_t i;
for (i = 0; i < AES_BLOCK_SIZE; i++) {
a[i] ^= b[i];
}
}
#pragma pack(push, 1) #pragma pack(push, 1)
union cn_slow_hash_state { union cn_slow_hash_state
union hash_state hs; {
struct { union hash_state hs;
uint8_t k[64]; struct
uint8_t init[INIT_SIZE_BYTE]; {
}; uint8_t k[64];
uint8_t init[INIT_SIZE_BYTE];
};
}; };
#pragma pack(pop) #pragma pack(pop)
void cn_slow_hash(const void *data, size_t length, char *hash) { THREADV uint8_t *hp_state = NULL;
uint8_t long_state[MEMORY]; THREADV int hp_allocated = 0;
union cn_slow_hash_state state;
uint8_t text[INIT_SIZE_BYTE];
uint8_t a[AES_BLOCK_SIZE];
uint8_t b[AES_BLOCK_SIZE];
uint8_t c[AES_BLOCK_SIZE];
uint8_t d[AES_BLOCK_SIZE];
size_t i, j;
uint8_t aes_key[AES_KEY_SIZE];
OAES_CTX* aes_ctx;
hash_process(&state.hs, data, length); #if defined(_MSC_VER)
memcpy(text, state.init, INIT_SIZE_BYTE); #define cpuid(info,x) __cpuidex(info,x,0)
memcpy(aes_key, state.hs.b, AES_KEY_SIZE); #else
aes_ctx = oaes_alloc(); void cpuid(int CPUInfo[4], int InfoType)
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { {
for (j = 0; j < INIT_SIZE_BLK; j++) { ASM __volatile__
oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE); (
oaes_pseudo_encrypt_ecb(aes_ctx, &text[AES_BLOCK_SIZE * j]); "cpuid":
/*memcpy(aes_key, &text[AES_BLOCK_SIZE * j], AES_KEY_SIZE);*/ "=a" (CPUInfo[0]),
memcpy(aes_key, state.hs.b, AES_KEY_SIZE); "=b" (CPUInfo[1]),
} "=c" (CPUInfo[2]),
memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); "=d" (CPUInfo[3]) :
} "a" (InfoType), "c" (0)
);
for (i = 0; i < 16; i++) { }
a[i] = state.k[ i] ^ state.k[32 + i]; #endif
b[i] = state.k[16 + i] ^ state.k[48 + i];
} STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b)
{
for (i = 0; i < ITER / 2; i++) { U64(a)[0] ^= U64(b)[0];
/* Dependency chain: address -> read value ------+ U64(a)[1] ^= U64(b)[1];
* written value <-+ hard function (AES or MUL) <+ }
* next address <-+
*/ STATIC INLINE int check_aes_hw(void)
/* Iteration 1 */ {
j = e2i(a, MEMORY / AES_BLOCK_SIZE); int cpuid_results[4];
copy_block(c, &long_state[j * AES_BLOCK_SIZE]); static int supported = -1;
oaes_encryption_round(a, c);
xor_blocks(b, c); if(supported >= 0)
swap_blocks(b, c); return supported;
copy_block(&long_state[j * AES_BLOCK_SIZE], c);
assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE)); cpuid(cpuid_results,1);
swap_blocks(a, b); return supported = cpuid_results[2] & (1 << 25);
/* Iteration 2 */ }
j = e2i(a, MEMORY / AES_BLOCK_SIZE);
copy_block(c, &long_state[j * AES_BLOCK_SIZE]); STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2)
mul(a, c, d); {
sum_half_blocks(b, d); __m128i t4;
swap_blocks(b, c); *t2 = _mm_shuffle_epi32(*t2, 0xff);
xor_blocks(b, c); t4 = _mm_slli_si128(*t1, 0x04);
copy_block(&long_state[j * AES_BLOCK_SIZE], c); *t1 = _mm_xor_si128(*t1, t4);
assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE)); t4 = _mm_slli_si128(t4, 0x04);
swap_blocks(a, b); *t1 = _mm_xor_si128(*t1, t4);
} t4 = _mm_slli_si128(t4, 0x04);
*t1 = _mm_xor_si128(*t1, t4);
memcpy(text, state.init, INIT_SIZE_BYTE); *t1 = _mm_xor_si128(*t1, *t2);
for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { }
for (j = 0; j < INIT_SIZE_BLK; j++) {
/*oaes_key_import_data(aes_ctx, &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE], AES_KEY_SIZE);*/ STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3)
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); {
xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); __m128i t2, t4;
oaes_pseudo_encrypt_ecb(aes_ctx, &text[j * AES_BLOCK_SIZE]); t4 = _mm_aeskeygenassist_si128(*t1, 0x00);
} t2 = _mm_shuffle_epi32(t4, 0xaa);
} t4 = _mm_slli_si128(*t3, 0x04);
memcpy(state.init, text, INIT_SIZE_BYTE); *t3 = _mm_xor_si128(*t3, t4);
hash_permutation(&state.hs); t4 = _mm_slli_si128(t4, 0x04);
/*memcpy(hash, &state, 32);*/ *t3 = _mm_xor_si128(*t3, t4);
extra_hashes[state.hs.b[0] & 3](&state, 200, hash); t4 = _mm_slli_si128(t4, 0x04);
oaes_free(&aes_ctx); *t3 = _mm_xor_si128(*t3, t4);
*t3 = _mm_xor_si128(*t3, t2);
}
STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey)
{
__m128i *ek = R128(expandedKey);
__m128i t1, t2, t3;
t1 = _mm_loadu_si128(R128(key));
t3 = _mm_loadu_si128(R128(key + 16));
ek[0] = t1;
ek[1] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x01);
aes_256_assist1(&t1, &t2);
ek[2] = t1;
aes_256_assist2(&t1, &t3);
ek[3] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x02);
aes_256_assist1(&t1, &t2);
ek[4] = t1;
aes_256_assist2(&t1, &t3);
ek[5] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x04);
aes_256_assist1(&t1, &t2);
ek[6] = t1;
aes_256_assist2(&t1, &t3);
ek[7] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x08);
aes_256_assist1(&t1, &t2);
ek[8] = t1;
aes_256_assist2(&t1, &t3);
ek[9] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x10);
aes_256_assist1(&t1, &t2);
ek[10] = t1;
}
STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out,
const uint8_t *expandedKey, int nblocks)
{
__m128i *k = R128(expandedKey);
__m128i d;
int i;
for(i = 0; i < nblocks; i++)
{
d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
d = _mm_aesenc_si128(d, *R128(&k[0]));
d = _mm_aesenc_si128(d, *R128(&k[1]));
d = _mm_aesenc_si128(d, *R128(&k[2]));
d = _mm_aesenc_si128(d, *R128(&k[3]));
d = _mm_aesenc_si128(d, *R128(&k[4]));
d = _mm_aesenc_si128(d, *R128(&k[5]));
d = _mm_aesenc_si128(d, *R128(&k[6]));
d = _mm_aesenc_si128(d, *R128(&k[7]));
d = _mm_aesenc_si128(d, *R128(&k[8]));
d = _mm_aesenc_si128(d, *R128(&k[9]));
_mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
}
}
STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out,
const uint8_t *expandedKey, const uint8_t *xor, int nblocks)
{
__m128i *k = R128(expandedKey);
__m128i *x = R128(xor);
__m128i d;
int i;
for(i = 0; i < nblocks; i++)
{
d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
d = _mm_xor_si128(d, *R128(x++));
d = _mm_aesenc_si128(d, *R128(&k[0]));
d = _mm_aesenc_si128(d, *R128(&k[1]));
d = _mm_aesenc_si128(d, *R128(&k[2]));
d = _mm_aesenc_si128(d, *R128(&k[3]));
d = _mm_aesenc_si128(d, *R128(&k[4]));
d = _mm_aesenc_si128(d, *R128(&k[5]));
d = _mm_aesenc_si128(d, *R128(&k[6]));
d = _mm_aesenc_si128(d, *R128(&k[7]));
d = _mm_aesenc_si128(d, *R128(&k[8]));
d = _mm_aesenc_si128(d, *R128(&k[9]));
_mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
}
}
#if defined(_MSC_VER)
BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable)
{
struct
{
DWORD count;
LUID_AND_ATTRIBUTES privilege[1];
} info;
HANDLE token;
if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token))
return FALSE;
info.count = 1;
info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0;
if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid)))
return FALSE;
if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL))
return FALSE;
if (GetLastError() != ERROR_SUCCESS)
return FALSE;
CloseHandle(token);
return TRUE;
}
#endif
void slow_hash_allocate_state(void)
{
int state = 0;
if(hp_state != NULL)
return;
#if defined(_MSC_VER)
SetLockPagesPrivilege(GetCurrentProcess(), TRUE);
hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES |
MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0);
if(hp_state == MAP_FAILED)
hp_state = NULL;
#endif
hp_allocated = 1;
if(hp_state == NULL)
{
hp_allocated = 0;
hp_state = (uint8_t *) malloc(MEMORY);
}
}
void slow_hash_free_state(void)
{
if(hp_state == NULL)
return;
if(!hp_allocated)
free(hp_state);
else
{
#if defined(_MSC_VER)
VirtualFree(hp_state, MEMORY, MEM_RELEASE);
#else
munmap(hp_state, MEMORY);
#endif
}
hp_state = NULL;
hp_allocated = 0;
}
void cn_slow_hash(const void *data, size_t length, char *hash)
{
RDATA_ALIGN16 uint8_t expandedKey[240];
uint8_t text[INIT_SIZE_BYTE];
RDATA_ALIGN16 uint64_t a[2];
RDATA_ALIGN16 uint64_t b[2];
RDATA_ALIGN16 uint64_t c[2];
RDATA_ALIGN16 uint8_t aes_key[AES_KEY_SIZE];
union cn_slow_hash_state state;
__m128i _a, _b, _c;
uint64_t hi, lo;
size_t i, j;
uint64_t *p = NULL;
oaes_ctx *aes_ctx;
int useAes = check_aes_hw();
static void (*const extra_hashes[4])(const void *, size_t, char *) =
{
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};
// this isn't supposed to happen, but guard against it for now.
if(hp_state == NULL)
slow_hash_allocate_state();
hash_process(&state.hs, data, length);
memcpy(text, state.init, INIT_SIZE_BYTE);
if(useAes)
{
aes_expand_key(state.hs.b, expandedKey);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK);
memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
else
{
aes_ctx = (oaes_ctx *) oaes_alloc();
oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0];
U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1];
U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0];
U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1];
_b = _mm_load_si128(R128(b));
// this is ugly but the branching affects the loop somewhat so put it outside.
if(useAes)
{
for(i = 0; i < ITER / 2; i++)
{
pre_aes();
_c = _mm_aesenc_si128(_c, _a);
// post_aes(), optimized scratchpad twiddling (credits to dga)
post_aes();
}
}
else
{
for(i = 0; i < ITER / 2; i++)
{
pre_aes();
aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a);
post_aes();
}
}
memcpy(text, state.init, INIT_SIZE_BYTE);
if(useAes)
{
aes_expand_key(&state.hs.b[32], expandedKey);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
// add the xor to the pseudo round
aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK);
}
}
else
{
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
{
xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
}
}
oaes_free((OAES_CTX **) &aes_ctx);
}
memcpy(state.init, text, INIT_SIZE_BYTE);
hash_permutation(&state.hs);
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
} }