Revert "Update slow-hash.c"

This reverts commit 37c5f393f1.
This commit is contained in:
NoodleDoodleNoodleDoodleNoodleDoodleNoo 2014-06-16 03:22:33 -07:00
parent 802d58054f
commit 81cf5650c4

View file

@ -11,429 +11,143 @@
#include "hash-ops.h" #include "hash-ops.h"
#include "oaes_lib.h" #include "oaes_lib.h"
#include <emmintrin.h> static void (*const extra_hashes[4])(const void *, size_t, char *) = {
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};
#if defined(_MSC_VER) #define MEMORY (1 << 21) /* 2 MiB */
#include <intrin.h>
#include <Windows.h>
#define STATIC
#define INLINE __inline
#if !defined(RDATA_ALIGN16)
#define RDATA_ALIGN16 __declspec(align(16))
#endif
#else
#include <wmmintrin.h>
#include <sys/mman.h>
#define STATIC static
#define INLINE inline
#if !defined(RDATA_ALIGN16)
#define RDATA_ALIGN16 __attribute__ ((aligned(16)))
#endif
#endif
#if defined(__INTEL_COMPILER)
#define ASM __asm__
#elif !defined(_MSC_VER)
#define ASM __asm__
#else
#define ASM __asm
#endif
#define MEMORY (1 << 21) // 2MB scratchpad
#define ITER (1 << 20) #define ITER (1 << 20)
#define AES_BLOCK_SIZE 16 #define AES_BLOCK_SIZE 16
#define AES_KEY_SIZE 32 #define AES_KEY_SIZE 32 /*16*/
#define INIT_SIZE_BLK 8 #define INIT_SIZE_BLK 8
#define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) #define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE)
#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE)
#define U64(x) ((uint64_t *) (x)) static size_t e2i(const uint8_t* a, size_t count) { return (*((uint64_t*)a) / AES_BLOCK_SIZE) & (count - 1); }
#define R128(x) ((__m128i *) (x))
#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a)))
#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4) static void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) {
#if defined(_MSC_VER) uint64_t a0, b0;
#define __mul() lo = _umul128(c[0], b[0], &hi); uint64_t hi, lo;
#else
#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc");
#endif
#define pre_aes() \ a0 = SWAP64LE(((uint64_t*)a)[0]);
j = state_index(a); \ b0 = SWAP64LE(((uint64_t*)b)[0]);
_c = _mm_load_si128(R128(&hp_state[j])); \ lo = mul128(a0, b0, &hi);
_a = _mm_load_si128(R128(a)); \ ((uint64_t*)res)[0] = SWAP64LE(hi);
((uint64_t*)res)[1] = SWAP64LE(lo);
// dga's optimized scratchpad twiddling }
#define post_aes() \
_mm_store_si128(R128(c), _c); \
_b = _mm_xor_si128(_b, _c); \
_mm_store_si128(R128(&hp_state[j]), _b); \
j = state_index(c); \
p = U64(&hp_state[j]); \
b[0] = p[0]; b[1] = p[1]; \
__mul(); \
a[0] += hi; a[1] += lo; \
p = U64(&hp_state[j]); \
p[0] = a[0]; p[1] = a[1]; \
a[0] ^= b[0]; a[1] ^= b[1]; \
_b = _c; \
#if defined(_MSC_VER)
#define THREADV __declspec(thread)
#else
#define THREADV __thread
#endif
extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey); static void sum_half_blocks(uint8_t* a, const uint8_t* b) {
extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey); uint64_t a0, a1, b0, b1;
a0 = SWAP64LE(((uint64_t*)a)[0]);
a1 = SWAP64LE(((uint64_t*)a)[1]);
b0 = SWAP64LE(((uint64_t*)b)[0]);
b1 = SWAP64LE(((uint64_t*)b)[1]);
a0 += b0;
a1 += b1;
((uint64_t*)a)[0] = SWAP64LE(a0);
((uint64_t*)a)[1] = SWAP64LE(a1);
}
static void copy_block(uint8_t* dst, const uint8_t* src) {
memcpy(dst, src, AES_BLOCK_SIZE);
}
static void swap_blocks(uint8_t* a, uint8_t* b) {
size_t i;
uint8_t t;
for (i = 0; i < AES_BLOCK_SIZE; i++) {
t = a[i];
a[i] = b[i];
b[i] = t;
}
}
static void xor_blocks(uint8_t* a, const uint8_t* b) {
size_t i;
for (i = 0; i < AES_BLOCK_SIZE; i++) {
a[i] ^= b[i];
}
}
#pragma pack(push, 1) #pragma pack(push, 1)
union cn_slow_hash_state union cn_slow_hash_state {
{ union hash_state hs;
union hash_state hs; struct {
struct uint8_t k[64];
{ uint8_t init[INIT_SIZE_BYTE];
uint8_t k[64]; };
uint8_t init[INIT_SIZE_BYTE];
};
}; };
#pragma pack(pop) #pragma pack(pop)
THREADV uint8_t *hp_state = NULL; void cn_slow_hash(const void *data, size_t length, char *hash) {
THREADV int hp_allocated = 0; uint8_t long_state[MEMORY];
union cn_slow_hash_state state;
uint8_t text[INIT_SIZE_BYTE];
uint8_t a[AES_BLOCK_SIZE];
uint8_t b[AES_BLOCK_SIZE];
uint8_t c[AES_BLOCK_SIZE];
uint8_t d[AES_BLOCK_SIZE];
size_t i, j;
uint8_t aes_key[AES_KEY_SIZE];
OAES_CTX* aes_ctx;
#if defined(_MSC_VER) hash_process(&state.hs, data, length);
#define cpuid(info,x) __cpuidex(info,x,0) memcpy(text, state.init, INIT_SIZE_BYTE);
#else memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
void cpuid(int CPUInfo[4], int InfoType) aes_ctx = oaes_alloc();
{ for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
ASM __volatile__ for (j = 0; j < INIT_SIZE_BLK; j++) {
( oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE);
"cpuid": oaes_pseudo_encrypt_ecb(aes_ctx, &text[AES_BLOCK_SIZE * j]);
"=a" (CPUInfo[0]), /*memcpy(aes_key, &text[AES_BLOCK_SIZE * j], AES_KEY_SIZE);*/
"=b" (CPUInfo[1]), memcpy(aes_key, state.hs.b, AES_KEY_SIZE);
"=c" (CPUInfo[2]),
"=d" (CPUInfo[3]) :
"a" (InfoType), "c" (0)
);
}
#endif
STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b)
{
U64(a)[0] ^= U64(b)[0];
U64(a)[1] ^= U64(b)[1];
}
STATIC INLINE int check_aes_hw(void)
{
int cpuid_results[4];
static int supported = -1;
if(supported >= 0)
return supported;
cpuid(cpuid_results,1);
return supported = cpuid_results[2] & (1 << 25);
}
STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2)
{
__m128i t4;
*t2 = _mm_shuffle_epi32(*t2, 0xff);
t4 = _mm_slli_si128(*t1, 0x04);
*t1 = _mm_xor_si128(*t1, t4);
t4 = _mm_slli_si128(t4, 0x04);
*t1 = _mm_xor_si128(*t1, t4);
t4 = _mm_slli_si128(t4, 0x04);
*t1 = _mm_xor_si128(*t1, t4);
*t1 = _mm_xor_si128(*t1, *t2);
}
STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3)
{
__m128i t2, t4;
t4 = _mm_aeskeygenassist_si128(*t1, 0x00);
t2 = _mm_shuffle_epi32(t4, 0xaa);
t4 = _mm_slli_si128(*t3, 0x04);
*t3 = _mm_xor_si128(*t3, t4);
t4 = _mm_slli_si128(t4, 0x04);
*t3 = _mm_xor_si128(*t3, t4);
t4 = _mm_slli_si128(t4, 0x04);
*t3 = _mm_xor_si128(*t3, t4);
*t3 = _mm_xor_si128(*t3, t2);
}
STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey)
{
__m128i *ek = R128(expandedKey);
__m128i t1, t2, t3;
t1 = _mm_loadu_si128(R128(key));
t3 = _mm_loadu_si128(R128(key + 16));
ek[0] = t1;
ek[1] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x01);
aes_256_assist1(&t1, &t2);
ek[2] = t1;
aes_256_assist2(&t1, &t3);
ek[3] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x02);
aes_256_assist1(&t1, &t2);
ek[4] = t1;
aes_256_assist2(&t1, &t3);
ek[5] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x04);
aes_256_assist1(&t1, &t2);
ek[6] = t1;
aes_256_assist2(&t1, &t3);
ek[7] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x08);
aes_256_assist1(&t1, &t2);
ek[8] = t1;
aes_256_assist2(&t1, &t3);
ek[9] = t3;
t2 = _mm_aeskeygenassist_si128(t3, 0x10);
aes_256_assist1(&t1, &t2);
ek[10] = t1;
}
STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out,
const uint8_t *expandedKey, int nblocks)
{
__m128i *k = R128(expandedKey);
__m128i d;
int i;
for(i = 0; i < nblocks; i++)
{
d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE));
d = _mm_aesenc_si128(d, *R128(&k[0]));
d = _mm_aesenc_si128(d, *R128(&k[1]));
d = _mm_aesenc_si128(d, *R128(&k[2]));
d = _mm_aesenc_si128(d, *R128(&k[3]));
d = _mm_aesenc_si128(d, *R128(&k[4]));
d = _mm_aesenc_si128(d, *R128(&k[5]));
d = _mm_aesenc_si128(d, *R128(&k[6]));
d = _mm_aesenc_si128(d, *R128(&k[7]));
d = _mm_aesenc_si128(d, *R128(&k[8]));
d = _mm_aesenc_si128(d, *R128(&k[9]));
_mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d);
} }
} memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out,
const uint8_t *expandedKey, const uint8_t *xor, int nblocks) for (i = 0; i < 16; i++) {
{ a[i] = state.k[ i] ^ state.k[32 + i];
__m128i *k = R128(expandedKey); b[i] = state.k[16 + i] ^ state.k[48 + i];
__m128i *x = R128(xor); }
__m128i d;
int i; for (i = 0; i < ITER / 2; i++) {
/* Dependency chain: address -> read value ------+
for(i = 0; i < nblocks; i++) * written value <-+ hard function (AES or MUL) <+
{ * next address <-+
d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); */
d = _mm_xor_si128(d, *R128(x++)); /* Iteration 1 */
d = _mm_aesenc_si128(d, *R128(&k[0])); j = e2i(a, MEMORY / AES_BLOCK_SIZE);
d = _mm_aesenc_si128(d, *R128(&k[1])); copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
d = _mm_aesenc_si128(d, *R128(&k[2])); oaes_encryption_round(a, c);
d = _mm_aesenc_si128(d, *R128(&k[3])); xor_blocks(b, c);
d = _mm_aesenc_si128(d, *R128(&k[4])); swap_blocks(b, c);
d = _mm_aesenc_si128(d, *R128(&k[5])); copy_block(&long_state[j * AES_BLOCK_SIZE], c);
d = _mm_aesenc_si128(d, *R128(&k[6])); assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
d = _mm_aesenc_si128(d, *R128(&k[7])); swap_blocks(a, b);
d = _mm_aesenc_si128(d, *R128(&k[8])); /* Iteration 2 */
d = _mm_aesenc_si128(d, *R128(&k[9])); j = e2i(a, MEMORY / AES_BLOCK_SIZE);
_mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); copy_block(c, &long_state[j * AES_BLOCK_SIZE]);
} mul(a, c, d);
} sum_half_blocks(b, d);
swap_blocks(b, c);
#if defined(_MSC_VER) xor_blocks(b, c);
BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable) copy_block(&long_state[j * AES_BLOCK_SIZE], c);
{ assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE));
struct swap_blocks(a, b);
{ }
DWORD count;
LUID_AND_ATTRIBUTES privilege[1]; memcpy(text, state.init, INIT_SIZE_BYTE);
} info; for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) {
for (j = 0; j < INIT_SIZE_BLK; j++) {
HANDLE token; /*oaes_key_import_data(aes_ctx, &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE], AES_KEY_SIZE);*/
if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token)) oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
return FALSE; xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
oaes_pseudo_encrypt_ecb(aes_ctx, &text[j * AES_BLOCK_SIZE]);
info.count = 1; }
info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0; }
memcpy(state.init, text, INIT_SIZE_BYTE);
if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid))) hash_permutation(&state.hs);
return FALSE; /*memcpy(hash, &state, 32);*/
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL)) oaes_free(&aes_ctx);
return FALSE;
if (GetLastError() != ERROR_SUCCESS)
return FALSE;
CloseHandle(token);
return TRUE;
}
#endif
void slow_hash_allocate_state(void)
{
int state = 0;
if(hp_state != NULL)
return;
#if defined(_MSC_VER)
SetLockPagesPrivilege(GetCurrentProcess(), TRUE);
hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES |
MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0);
if(hp_state == MAP_FAILED)
hp_state = NULL;
#endif
hp_allocated = 1;
if(hp_state == NULL)
{
hp_allocated = 0;
hp_state = (uint8_t *) malloc(MEMORY);
}
}
void slow_hash_free_state(void)
{
if(hp_state == NULL)
return;
if(!hp_allocated)
free(hp_state);
else
{
#if defined(_MSC_VER)
VirtualFree(hp_state, MEMORY, MEM_RELEASE);
#else
munmap(hp_state, MEMORY);
#endif
}
hp_state = NULL;
hp_allocated = 0;
}
void cn_slow_hash(const void *data, size_t length, char *hash)
{
RDATA_ALIGN16 uint8_t expandedKey[240];
uint8_t text[INIT_SIZE_BYTE];
RDATA_ALIGN16 uint64_t a[2];
RDATA_ALIGN16 uint64_t b[2];
RDATA_ALIGN16 uint64_t c[2];
RDATA_ALIGN16 uint8_t aes_key[AES_KEY_SIZE];
union cn_slow_hash_state state;
__m128i _a, _b, _c;
uint64_t hi, lo;
size_t i, j;
uint64_t *p = NULL;
oaes_ctx *aes_ctx;
int useAes = check_aes_hw();
static void (*const extra_hashes[4])(const void *, size_t, char *) =
{
hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein
};
// this isn't supposed to happen, but guard against it for now.
if(hp_state == NULL)
slow_hash_allocate_state();
hash_process(&state.hs, data, length);
memcpy(text, state.init, INIT_SIZE_BYTE);
if(useAes)
{
aes_expand_key(state.hs.b, expandedKey);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK);
memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
else
{
aes_ctx = (oaes_ctx *) oaes_alloc();
oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE);
}
}
U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0];
U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1];
U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0];
U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1];
_b = _mm_load_si128(R128(b));
// this is ugly but the branching affects the loop somewhat so put it outside.
if(useAes)
{
for(i = 0; i < ITER / 2; i++)
{
pre_aes();
_c = _mm_aesenc_si128(_c, _a);
// post_aes(), optimized scratchpad twiddling (credits to dga)
post_aes();
}
}
else
{
for(i = 0; i < ITER / 2; i++)
{
pre_aes();
aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a);
post_aes();
}
}
memcpy(text, state.init, INIT_SIZE_BYTE);
if(useAes)
{
aes_expand_key(&state.hs.b[32], expandedKey);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
// add the xor to the pseudo round
aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK);
}
}
else
{
oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE);
for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++)
{
for(j = 0; j < INIT_SIZE_BLK; j++)
{
xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]);
aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data);
}
}
oaes_free((OAES_CTX **) &aes_ctx);
}
memcpy(state.init, text, INIT_SIZE_BYTE);
hash_permutation(&state.hs);
extra_hashes[state.hs.b[0] & 3](&state, 200, hash);
} }