From 37c5f393f1693da23416a2a294752f0c3e997c31 Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 01:58:17 -0700 Subject: [PATCH 1/7] Update slow-hash.c 1. Added huge pages support and optimized scratchpad twiddling. (credits to dga). 2. Added aes-ni key expansion support. 3. Minor speedup to scratchpad initialization/finalization. --- src/crypto/slow-hash.c | 534 +++++++++++++++++++++++++++++++---------- 1 file changed, 410 insertions(+), 124 deletions(-) diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c index c7264bd9..cda66af5 100644 --- a/src/crypto/slow-hash.c +++ b/src/crypto/slow-hash.c @@ -11,143 +11,429 @@ #include "hash-ops.h" #include "oaes_lib.h" -static void (*const extra_hashes[4])(const void *, size_t, char *) = { - hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein -}; +#include -#define MEMORY (1 << 21) /* 2 MiB */ +#if defined(_MSC_VER) +#include +#include +#define STATIC +#define INLINE __inline +#if !defined(RDATA_ALIGN16) +#define RDATA_ALIGN16 __declspec(align(16)) +#endif +#else +#include +#include +#define STATIC static +#define INLINE inline +#if !defined(RDATA_ALIGN16) +#define RDATA_ALIGN16 __attribute__ ((aligned(16))) +#endif +#endif + +#if defined(__INTEL_COMPILER) +#define ASM __asm__ +#elif !defined(_MSC_VER) +#define ASM __asm__ +#else +#define ASM __asm +#endif + +#define MEMORY (1 << 21) // 2MB scratchpad #define ITER (1 << 20) #define AES_BLOCK_SIZE 16 -#define AES_KEY_SIZE 32 /*16*/ +#define AES_KEY_SIZE 32 #define INIT_SIZE_BLK 8 #define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) +#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE) -static size_t e2i(const uint8_t* a, size_t count) { return (*((uint64_t*)a) / AES_BLOCK_SIZE) & (count - 1); } +#define U64(x) ((uint64_t *) (x)) +#define R128(x) ((__m128i *) (x)) +#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a))) -static void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) { - uint64_t a0, b0; - uint64_t hi, lo; +#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4) +#if defined(_MSC_VER) +#define __mul() lo = _umul128(c[0], b[0], &hi); +#else +#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc"); +#endif - a0 = SWAP64LE(((uint64_t*)a)[0]); - b0 = SWAP64LE(((uint64_t*)b)[0]); - lo = mul128(a0, b0, &hi); - ((uint64_t*)res)[0] = SWAP64LE(hi); - ((uint64_t*)res)[1] = SWAP64LE(lo); -} +#define pre_aes() \ + j = state_index(a); \ + _c = _mm_load_si128(R128(&hp_state[j])); \ + _a = _mm_load_si128(R128(a)); \ + +// dga's optimized scratchpad twiddling +#define post_aes() \ + _mm_store_si128(R128(c), _c); \ + _b = _mm_xor_si128(_b, _c); \ + _mm_store_si128(R128(&hp_state[j]), _b); \ + j = state_index(c); \ + p = U64(&hp_state[j]); \ + b[0] = p[0]; b[1] = p[1]; \ + __mul(); \ + a[0] += hi; a[1] += lo; \ + p = U64(&hp_state[j]); \ + p[0] = a[0]; p[1] = a[1]; \ + a[0] ^= b[0]; a[1] ^= b[1]; \ + _b = _c; \ + +#if defined(_MSC_VER) +#define THREADV __declspec(thread) +#else +#define THREADV __thread +#endif -static void sum_half_blocks(uint8_t* a, const uint8_t* b) { - uint64_t a0, a1, b0, b1; - - a0 = SWAP64LE(((uint64_t*)a)[0]); - a1 = SWAP64LE(((uint64_t*)a)[1]); - b0 = SWAP64LE(((uint64_t*)b)[0]); - b1 = SWAP64LE(((uint64_t*)b)[1]); - a0 += b0; - a1 += b1; - ((uint64_t*)a)[0] = SWAP64LE(a0); - ((uint64_t*)a)[1] = SWAP64LE(a1); -} - -static void copy_block(uint8_t* dst, const uint8_t* src) { - memcpy(dst, src, AES_BLOCK_SIZE); -} - -static void swap_blocks(uint8_t* a, uint8_t* b) { - size_t i; - uint8_t t; - for (i = 0; i < AES_BLOCK_SIZE; i++) { - t = a[i]; - a[i] = b[i]; - b[i] = t; - } -} - -static void xor_blocks(uint8_t* a, const uint8_t* b) { - size_t i; - for (i = 0; i < AES_BLOCK_SIZE; i++) { - a[i] ^= b[i]; - } -} +extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey); +extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey); #pragma pack(push, 1) -union cn_slow_hash_state { - union hash_state hs; - struct { - uint8_t k[64]; - uint8_t init[INIT_SIZE_BYTE]; - }; +union cn_slow_hash_state +{ + union hash_state hs; + struct + { + uint8_t k[64]; + uint8_t init[INIT_SIZE_BYTE]; + }; }; #pragma pack(pop) -void cn_slow_hash(const void *data, size_t length, char *hash) { - uint8_t long_state[MEMORY]; - union cn_slow_hash_state state; - uint8_t text[INIT_SIZE_BYTE]; - uint8_t a[AES_BLOCK_SIZE]; - uint8_t b[AES_BLOCK_SIZE]; - uint8_t c[AES_BLOCK_SIZE]; - uint8_t d[AES_BLOCK_SIZE]; - size_t i, j; - uint8_t aes_key[AES_KEY_SIZE]; - OAES_CTX* aes_ctx; +THREADV uint8_t *hp_state = NULL; +THREADV int hp_allocated = 0; - hash_process(&state.hs, data, length); - memcpy(text, state.init, INIT_SIZE_BYTE); - memcpy(aes_key, state.hs.b, AES_KEY_SIZE); - aes_ctx = oaes_alloc(); - for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { - for (j = 0; j < INIT_SIZE_BLK; j++) { - oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE); - oaes_pseudo_encrypt_ecb(aes_ctx, &text[AES_BLOCK_SIZE * j]); - /*memcpy(aes_key, &text[AES_BLOCK_SIZE * j], AES_KEY_SIZE);*/ - memcpy(aes_key, state.hs.b, AES_KEY_SIZE); - } - memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); - } - - for (i = 0; i < 16; i++) { - a[i] = state.k[ i] ^ state.k[32 + i]; - b[i] = state.k[16 + i] ^ state.k[48 + i]; - } - - for (i = 0; i < ITER / 2; i++) { - /* Dependency chain: address -> read value ------+ - * written value <-+ hard function (AES or MUL) <+ - * next address <-+ - */ - /* Iteration 1 */ - j = e2i(a, MEMORY / AES_BLOCK_SIZE); - copy_block(c, &long_state[j * AES_BLOCK_SIZE]); - oaes_encryption_round(a, c); - xor_blocks(b, c); - swap_blocks(b, c); - copy_block(&long_state[j * AES_BLOCK_SIZE], c); - assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE)); - swap_blocks(a, b); - /* Iteration 2 */ - j = e2i(a, MEMORY / AES_BLOCK_SIZE); - copy_block(c, &long_state[j * AES_BLOCK_SIZE]); - mul(a, c, d); - sum_half_blocks(b, d); - swap_blocks(b, c); - xor_blocks(b, c); - copy_block(&long_state[j * AES_BLOCK_SIZE], c); - assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE)); - swap_blocks(a, b); - } - - memcpy(text, state.init, INIT_SIZE_BYTE); - for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { - for (j = 0; j < INIT_SIZE_BLK; j++) { - /*oaes_key_import_data(aes_ctx, &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE], AES_KEY_SIZE);*/ - oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); - xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); - oaes_pseudo_encrypt_ecb(aes_ctx, &text[j * AES_BLOCK_SIZE]); - } - } - memcpy(state.init, text, INIT_SIZE_BYTE); - hash_permutation(&state.hs); - /*memcpy(hash, &state, 32);*/ - extra_hashes[state.hs.b[0] & 3](&state, 200, hash); - oaes_free(&aes_ctx); +#if defined(_MSC_VER) +#define cpuid(info,x) __cpuidex(info,x,0) +#else +void cpuid(int CPUInfo[4], int InfoType) +{ + ASM __volatile__ + ( + "cpuid": + "=a" (CPUInfo[0]), + "=b" (CPUInfo[1]), + "=c" (CPUInfo[2]), + "=d" (CPUInfo[3]) : + "a" (InfoType), "c" (0) + ); +} +#endif + +STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b) +{ + U64(a)[0] ^= U64(b)[0]; + U64(a)[1] ^= U64(b)[1]; +} + +STATIC INLINE int check_aes_hw(void) +{ + int cpuid_results[4]; + static int supported = -1; + + if(supported >= 0) + return supported; + + cpuid(cpuid_results,1); + return supported = cpuid_results[2] & (1 << 25); +} + +STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2) +{ + __m128i t4; + *t2 = _mm_shuffle_epi32(*t2, 0xff); + t4 = _mm_slli_si128(*t1, 0x04); + *t1 = _mm_xor_si128(*t1, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t1 = _mm_xor_si128(*t1, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t1 = _mm_xor_si128(*t1, t4); + *t1 = _mm_xor_si128(*t1, *t2); +} + +STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3) +{ + __m128i t2, t4; + t4 = _mm_aeskeygenassist_si128(*t1, 0x00); + t2 = _mm_shuffle_epi32(t4, 0xaa); + t4 = _mm_slli_si128(*t3, 0x04); + *t3 = _mm_xor_si128(*t3, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t3 = _mm_xor_si128(*t3, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t3 = _mm_xor_si128(*t3, t4); + *t3 = _mm_xor_si128(*t3, t2); +} + +STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey) +{ + __m128i *ek = R128(expandedKey); + __m128i t1, t2, t3; + + t1 = _mm_loadu_si128(R128(key)); + t3 = _mm_loadu_si128(R128(key + 16)); + + ek[0] = t1; + ek[1] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x01); + aes_256_assist1(&t1, &t2); + ek[2] = t1; + aes_256_assist2(&t1, &t3); + ek[3] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x02); + aes_256_assist1(&t1, &t2); + ek[4] = t1; + aes_256_assist2(&t1, &t3); + ek[5] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x04); + aes_256_assist1(&t1, &t2); + ek[6] = t1; + aes_256_assist2(&t1, &t3); + ek[7] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x08); + aes_256_assist1(&t1, &t2); + ek[8] = t1; + aes_256_assist2(&t1, &t3); + ek[9] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x10); + aes_256_assist1(&t1, &t2); + ek[10] = t1; +} + +STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out, + const uint8_t *expandedKey, int nblocks) +{ + __m128i *k = R128(expandedKey); + __m128i d; + int i; + + for(i = 0; i < nblocks; i++) + { + d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); + d = _mm_aesenc_si128(d, *R128(&k[0])); + d = _mm_aesenc_si128(d, *R128(&k[1])); + d = _mm_aesenc_si128(d, *R128(&k[2])); + d = _mm_aesenc_si128(d, *R128(&k[3])); + d = _mm_aesenc_si128(d, *R128(&k[4])); + d = _mm_aesenc_si128(d, *R128(&k[5])); + d = _mm_aesenc_si128(d, *R128(&k[6])); + d = _mm_aesenc_si128(d, *R128(&k[7])); + d = _mm_aesenc_si128(d, *R128(&k[8])); + d = _mm_aesenc_si128(d, *R128(&k[9])); + _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); + } +} + +STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out, + const uint8_t *expandedKey, const uint8_t *xor, int nblocks) +{ + __m128i *k = R128(expandedKey); + __m128i *x = R128(xor); + __m128i d; + int i; + + for(i = 0; i < nblocks; i++) + { + d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); + d = _mm_xor_si128(d, *R128(x++)); + d = _mm_aesenc_si128(d, *R128(&k[0])); + d = _mm_aesenc_si128(d, *R128(&k[1])); + d = _mm_aesenc_si128(d, *R128(&k[2])); + d = _mm_aesenc_si128(d, *R128(&k[3])); + d = _mm_aesenc_si128(d, *R128(&k[4])); + d = _mm_aesenc_si128(d, *R128(&k[5])); + d = _mm_aesenc_si128(d, *R128(&k[6])); + d = _mm_aesenc_si128(d, *R128(&k[7])); + d = _mm_aesenc_si128(d, *R128(&k[8])); + d = _mm_aesenc_si128(d, *R128(&k[9])); + _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); + } +} + +#if defined(_MSC_VER) +BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable) +{ + struct + { + DWORD count; + LUID_AND_ATTRIBUTES privilege[1]; + } info; + + HANDLE token; + if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token)) + return FALSE; + + info.count = 1; + info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0; + + if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid))) + return FALSE; + + if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL)) + return FALSE; + + if (GetLastError() != ERROR_SUCCESS) + return FALSE; + + CloseHandle(token); + + return TRUE; + +} +#endif + +void slow_hash_allocate_state(void) +{ + int state = 0; + if(hp_state != NULL) + return; + +#if defined(_MSC_VER) + SetLockPagesPrivilege(GetCurrentProcess(), TRUE); + hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES | + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else + hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0); + if(hp_state == MAP_FAILED) + hp_state = NULL; +#endif + hp_allocated = 1; + if(hp_state == NULL) + { + hp_allocated = 0; + hp_state = (uint8_t *) malloc(MEMORY); + } +} + +void slow_hash_free_state(void) +{ + if(hp_state == NULL) + return; + + if(!hp_allocated) + free(hp_state); + else + { +#if defined(_MSC_VER) + VirtualFree(hp_state, MEMORY, MEM_RELEASE); +#else + munmap(hp_state, MEMORY); +#endif + } + + hp_state = NULL; + hp_allocated = 0; +} + +void cn_slow_hash(const void *data, size_t length, char *hash) +{ + RDATA_ALIGN16 uint8_t expandedKey[240]; + + uint8_t text[INIT_SIZE_BYTE]; + RDATA_ALIGN16 uint64_t a[2]; + RDATA_ALIGN16 uint64_t b[2]; + RDATA_ALIGN16 uint64_t c[2]; + RDATA_ALIGN16 uint8_t aes_key[AES_KEY_SIZE]; + union cn_slow_hash_state state; + __m128i _a, _b, _c; + uint64_t hi, lo; + + size_t i, j; + uint64_t *p = NULL; + oaes_ctx *aes_ctx; + int useAes = check_aes_hw(); + + static void (*const extra_hashes[4])(const void *, size_t, char *) = + { + hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein + }; + + // this isn't supposed to happen, but guard against it for now. + if(hp_state == NULL) + slow_hash_allocate_state(); + + hash_process(&state.hs, data, length); + memcpy(text, state.init, INIT_SIZE_BYTE); + + if(useAes) + { + aes_expand_key(state.hs.b, expandedKey); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK); + memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); + } + } + else + { + aes_ctx = (oaes_ctx *) oaes_alloc(); + oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + for(j = 0; j < INIT_SIZE_BLK; j++) + aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data); + + memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); + } + } + + U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0]; + U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1]; + U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0]; + U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1]; + + _b = _mm_load_si128(R128(b)); + // this is ugly but the branching affects the loop somewhat so put it outside. + if(useAes) + { + for(i = 0; i < ITER / 2; i++) + { + pre_aes(); + _c = _mm_aesenc_si128(_c, _a); + // post_aes(), optimized scratchpad twiddling (credits to dga) + post_aes(); + } + } + else + { + for(i = 0; i < ITER / 2; i++) + { + pre_aes(); + aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a); + post_aes(); + } + } + + memcpy(text, state.init, INIT_SIZE_BYTE); + if(useAes) + { + aes_expand_key(&state.hs.b[32], expandedKey); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + // add the xor to the pseudo round + aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK); + } + } + else + { + oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + for(j = 0; j < INIT_SIZE_BLK; j++) + { + xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); + aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data); + } + } + oaes_free((OAES_CTX **) &aes_ctx); + } + + memcpy(state.init, text, INIT_SIZE_BYTE); + hash_permutation(&state.hs); + extra_hashes[state.hs.b[0] & 3](&state, 200, hash); } From c0520ad63d71f94bc78a119c61ade30876e95ed6 Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 02:00:44 -0700 Subject: [PATCH 2/7] Update miner.cpp 1. Added scratchpad memory allocation support. --- src/cryptonote_core/miner.cpp | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/src/cryptonote_core/miner.cpp b/src/cryptonote_core/miner.cpp index 56b459d6..d021b05b 100644 --- a/src/cryptonote_core/miner.cpp +++ b/src/cryptonote_core/miner.cpp @@ -23,7 +23,8 @@ using namespace epee; #include "miner.h" - +extern "C" void slow_hash_allocate_state(); +extern "C" void slow_hash_free_state(); namespace cryptonote { @@ -188,10 +189,19 @@ namespace cryptonote return true; } //----------------------------------------------------------------------------------------------------- - bool miner::is_mining() + bool miner::is_mining() const { return !m_stop; } + //----------------------------------------------------------------------------------------------------- + const account_public_address& miner::get_mining_address() const + { + return m_mine_address; + } + //----------------------------------------------------------------------------------------------------- + uint32_t miner::get_threads_count() const { + return m_threads_total; + } //----------------------------------------------------------------------------------------------------- bool miner::start(const account_public_address& adr, size_t threads_count, const boost::thread::attributes& attrs) { @@ -226,12 +236,14 @@ namespace cryptonote return true; } //----------------------------------------------------------------------------------------------------- - uint64_t miner::get_speed() + uint64_t miner::get_speed() const { - if(is_mining()) + if(is_mining()) { return m_current_hash_rate; - else + } + else { return 0; + } } //----------------------------------------------------------------------------------------------------- void miner::send_stop_signal() @@ -309,6 +321,7 @@ namespace cryptonote difficulty_type local_diff = 0; uint32_t local_template_ver = 0; block b; + slow_hash_allocate_state(); while(!m_stop) { if(m_pausers_count)//anti split workaround @@ -357,6 +370,7 @@ namespace cryptonote nonce+=m_threads_total; ++m_hashes; } + slow_hash_free_state(); LOG_PRINT_L0("Miner thread stopped ["<< th_local_index << "]"); return true; } From 802d58054fd53a2907a03170cdb5c9b49098d0ad Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 03:22:27 -0700 Subject: [PATCH 3/7] Revert "Update miner.cpp" This reverts commit c0520ad63d71f94bc78a119c61ade30876e95ed6. --- src/cryptonote_core/miner.cpp | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/src/cryptonote_core/miner.cpp b/src/cryptonote_core/miner.cpp index d021b05b..56b459d6 100644 --- a/src/cryptonote_core/miner.cpp +++ b/src/cryptonote_core/miner.cpp @@ -23,8 +23,7 @@ using namespace epee; #include "miner.h" -extern "C" void slow_hash_allocate_state(); -extern "C" void slow_hash_free_state(); + namespace cryptonote { @@ -189,19 +188,10 @@ namespace cryptonote return true; } //----------------------------------------------------------------------------------------------------- - bool miner::is_mining() const + bool miner::is_mining() { return !m_stop; } - //----------------------------------------------------------------------------------------------------- - const account_public_address& miner::get_mining_address() const - { - return m_mine_address; - } - //----------------------------------------------------------------------------------------------------- - uint32_t miner::get_threads_count() const { - return m_threads_total; - } //----------------------------------------------------------------------------------------------------- bool miner::start(const account_public_address& adr, size_t threads_count, const boost::thread::attributes& attrs) { @@ -236,14 +226,12 @@ namespace cryptonote return true; } //----------------------------------------------------------------------------------------------------- - uint64_t miner::get_speed() const + uint64_t miner::get_speed() { - if(is_mining()) { + if(is_mining()) return m_current_hash_rate; - } - else { + else return 0; - } } //----------------------------------------------------------------------------------------------------- void miner::send_stop_signal() @@ -321,7 +309,6 @@ namespace cryptonote difficulty_type local_diff = 0; uint32_t local_template_ver = 0; block b; - slow_hash_allocate_state(); while(!m_stop) { if(m_pausers_count)//anti split workaround @@ -370,7 +357,6 @@ namespace cryptonote nonce+=m_threads_total; ++m_hashes; } - slow_hash_free_state(); LOG_PRINT_L0("Miner thread stopped ["<< th_local_index << "]"); return true; } From 81cf5650c4c2576c74317b40a6ca57d07645a6f3 Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 03:22:33 -0700 Subject: [PATCH 4/7] Revert "Update slow-hash.c" This reverts commit 37c5f393f1693da23416a2a294752f0c3e997c31. --- src/crypto/slow-hash.c | 532 ++++++++++------------------------------- 1 file changed, 123 insertions(+), 409 deletions(-) diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c index cda66af5..c7264bd9 100644 --- a/src/crypto/slow-hash.c +++ b/src/crypto/slow-hash.c @@ -11,429 +11,143 @@ #include "hash-ops.h" #include "oaes_lib.h" -#include +static void (*const extra_hashes[4])(const void *, size_t, char *) = { + hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein +}; -#if defined(_MSC_VER) -#include -#include -#define STATIC -#define INLINE __inline -#if !defined(RDATA_ALIGN16) -#define RDATA_ALIGN16 __declspec(align(16)) -#endif -#else -#include -#include -#define STATIC static -#define INLINE inline -#if !defined(RDATA_ALIGN16) -#define RDATA_ALIGN16 __attribute__ ((aligned(16))) -#endif -#endif - -#if defined(__INTEL_COMPILER) -#define ASM __asm__ -#elif !defined(_MSC_VER) -#define ASM __asm__ -#else -#define ASM __asm -#endif - -#define MEMORY (1 << 21) // 2MB scratchpad +#define MEMORY (1 << 21) /* 2 MiB */ #define ITER (1 << 20) #define AES_BLOCK_SIZE 16 -#define AES_KEY_SIZE 32 +#define AES_KEY_SIZE 32 /*16*/ #define INIT_SIZE_BLK 8 #define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) -#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE) -#define U64(x) ((uint64_t *) (x)) -#define R128(x) ((__m128i *) (x)) -#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a))) +static size_t e2i(const uint8_t* a, size_t count) { return (*((uint64_t*)a) / AES_BLOCK_SIZE) & (count - 1); } -#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4) -#if defined(_MSC_VER) -#define __mul() lo = _umul128(c[0], b[0], &hi); -#else -#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc"); -#endif +static void mul(const uint8_t* a, const uint8_t* b, uint8_t* res) { + uint64_t a0, b0; + uint64_t hi, lo; -#define pre_aes() \ - j = state_index(a); \ - _c = _mm_load_si128(R128(&hp_state[j])); \ - _a = _mm_load_si128(R128(a)); \ - -// dga's optimized scratchpad twiddling -#define post_aes() \ - _mm_store_si128(R128(c), _c); \ - _b = _mm_xor_si128(_b, _c); \ - _mm_store_si128(R128(&hp_state[j]), _b); \ - j = state_index(c); \ - p = U64(&hp_state[j]); \ - b[0] = p[0]; b[1] = p[1]; \ - __mul(); \ - a[0] += hi; a[1] += lo; \ - p = U64(&hp_state[j]); \ - p[0] = a[0]; p[1] = a[1]; \ - a[0] ^= b[0]; a[1] ^= b[1]; \ - _b = _c; \ - -#if defined(_MSC_VER) -#define THREADV __declspec(thread) -#else -#define THREADV __thread -#endif + a0 = SWAP64LE(((uint64_t*)a)[0]); + b0 = SWAP64LE(((uint64_t*)b)[0]); + lo = mul128(a0, b0, &hi); + ((uint64_t*)res)[0] = SWAP64LE(hi); + ((uint64_t*)res)[1] = SWAP64LE(lo); +} -extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey); -extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey); +static void sum_half_blocks(uint8_t* a, const uint8_t* b) { + uint64_t a0, a1, b0, b1; + + a0 = SWAP64LE(((uint64_t*)a)[0]); + a1 = SWAP64LE(((uint64_t*)a)[1]); + b0 = SWAP64LE(((uint64_t*)b)[0]); + b1 = SWAP64LE(((uint64_t*)b)[1]); + a0 += b0; + a1 += b1; + ((uint64_t*)a)[0] = SWAP64LE(a0); + ((uint64_t*)a)[1] = SWAP64LE(a1); +} + +static void copy_block(uint8_t* dst, const uint8_t* src) { + memcpy(dst, src, AES_BLOCK_SIZE); +} + +static void swap_blocks(uint8_t* a, uint8_t* b) { + size_t i; + uint8_t t; + for (i = 0; i < AES_BLOCK_SIZE; i++) { + t = a[i]; + a[i] = b[i]; + b[i] = t; + } +} + +static void xor_blocks(uint8_t* a, const uint8_t* b) { + size_t i; + for (i = 0; i < AES_BLOCK_SIZE; i++) { + a[i] ^= b[i]; + } +} #pragma pack(push, 1) -union cn_slow_hash_state -{ - union hash_state hs; - struct - { - uint8_t k[64]; - uint8_t init[INIT_SIZE_BYTE]; - }; +union cn_slow_hash_state { + union hash_state hs; + struct { + uint8_t k[64]; + uint8_t init[INIT_SIZE_BYTE]; + }; }; #pragma pack(pop) -THREADV uint8_t *hp_state = NULL; -THREADV int hp_allocated = 0; +void cn_slow_hash(const void *data, size_t length, char *hash) { + uint8_t long_state[MEMORY]; + union cn_slow_hash_state state; + uint8_t text[INIT_SIZE_BYTE]; + uint8_t a[AES_BLOCK_SIZE]; + uint8_t b[AES_BLOCK_SIZE]; + uint8_t c[AES_BLOCK_SIZE]; + uint8_t d[AES_BLOCK_SIZE]; + size_t i, j; + uint8_t aes_key[AES_KEY_SIZE]; + OAES_CTX* aes_ctx; -#if defined(_MSC_VER) -#define cpuid(info,x) __cpuidex(info,x,0) -#else -void cpuid(int CPUInfo[4], int InfoType) -{ - ASM __volatile__ - ( - "cpuid": - "=a" (CPUInfo[0]), - "=b" (CPUInfo[1]), - "=c" (CPUInfo[2]), - "=d" (CPUInfo[3]) : - "a" (InfoType), "c" (0) - ); -} -#endif - -STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b) -{ - U64(a)[0] ^= U64(b)[0]; - U64(a)[1] ^= U64(b)[1]; -} - -STATIC INLINE int check_aes_hw(void) -{ - int cpuid_results[4]; - static int supported = -1; - - if(supported >= 0) - return supported; - - cpuid(cpuid_results,1); - return supported = cpuid_results[2] & (1 << 25); -} - -STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2) -{ - __m128i t4; - *t2 = _mm_shuffle_epi32(*t2, 0xff); - t4 = _mm_slli_si128(*t1, 0x04); - *t1 = _mm_xor_si128(*t1, t4); - t4 = _mm_slli_si128(t4, 0x04); - *t1 = _mm_xor_si128(*t1, t4); - t4 = _mm_slli_si128(t4, 0x04); - *t1 = _mm_xor_si128(*t1, t4); - *t1 = _mm_xor_si128(*t1, *t2); -} - -STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3) -{ - __m128i t2, t4; - t4 = _mm_aeskeygenassist_si128(*t1, 0x00); - t2 = _mm_shuffle_epi32(t4, 0xaa); - t4 = _mm_slli_si128(*t3, 0x04); - *t3 = _mm_xor_si128(*t3, t4); - t4 = _mm_slli_si128(t4, 0x04); - *t3 = _mm_xor_si128(*t3, t4); - t4 = _mm_slli_si128(t4, 0x04); - *t3 = _mm_xor_si128(*t3, t4); - *t3 = _mm_xor_si128(*t3, t2); -} - -STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey) -{ - __m128i *ek = R128(expandedKey); - __m128i t1, t2, t3; - - t1 = _mm_loadu_si128(R128(key)); - t3 = _mm_loadu_si128(R128(key + 16)); - - ek[0] = t1; - ek[1] = t3; - - t2 = _mm_aeskeygenassist_si128(t3, 0x01); - aes_256_assist1(&t1, &t2); - ek[2] = t1; - aes_256_assist2(&t1, &t3); - ek[3] = t3; - - t2 = _mm_aeskeygenassist_si128(t3, 0x02); - aes_256_assist1(&t1, &t2); - ek[4] = t1; - aes_256_assist2(&t1, &t3); - ek[5] = t3; - - t2 = _mm_aeskeygenassist_si128(t3, 0x04); - aes_256_assist1(&t1, &t2); - ek[6] = t1; - aes_256_assist2(&t1, &t3); - ek[7] = t3; - - t2 = _mm_aeskeygenassist_si128(t3, 0x08); - aes_256_assist1(&t1, &t2); - ek[8] = t1; - aes_256_assist2(&t1, &t3); - ek[9] = t3; - - t2 = _mm_aeskeygenassist_si128(t3, 0x10); - aes_256_assist1(&t1, &t2); - ek[10] = t1; -} - -STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out, - const uint8_t *expandedKey, int nblocks) -{ - __m128i *k = R128(expandedKey); - __m128i d; - int i; - - for(i = 0; i < nblocks; i++) - { - d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); - d = _mm_aesenc_si128(d, *R128(&k[0])); - d = _mm_aesenc_si128(d, *R128(&k[1])); - d = _mm_aesenc_si128(d, *R128(&k[2])); - d = _mm_aesenc_si128(d, *R128(&k[3])); - d = _mm_aesenc_si128(d, *R128(&k[4])); - d = _mm_aesenc_si128(d, *R128(&k[5])); - d = _mm_aesenc_si128(d, *R128(&k[6])); - d = _mm_aesenc_si128(d, *R128(&k[7])); - d = _mm_aesenc_si128(d, *R128(&k[8])); - d = _mm_aesenc_si128(d, *R128(&k[9])); - _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); + hash_process(&state.hs, data, length); + memcpy(text, state.init, INIT_SIZE_BYTE); + memcpy(aes_key, state.hs.b, AES_KEY_SIZE); + aes_ctx = oaes_alloc(); + for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { + for (j = 0; j < INIT_SIZE_BLK; j++) { + oaes_key_import_data(aes_ctx, aes_key, AES_KEY_SIZE); + oaes_pseudo_encrypt_ecb(aes_ctx, &text[AES_BLOCK_SIZE * j]); + /*memcpy(aes_key, &text[AES_BLOCK_SIZE * j], AES_KEY_SIZE);*/ + memcpy(aes_key, state.hs.b, AES_KEY_SIZE); } -} - -STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out, - const uint8_t *expandedKey, const uint8_t *xor, int nblocks) -{ - __m128i *k = R128(expandedKey); - __m128i *x = R128(xor); - __m128i d; - int i; - - for(i = 0; i < nblocks; i++) - { - d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); - d = _mm_xor_si128(d, *R128(x++)); - d = _mm_aesenc_si128(d, *R128(&k[0])); - d = _mm_aesenc_si128(d, *R128(&k[1])); - d = _mm_aesenc_si128(d, *R128(&k[2])); - d = _mm_aesenc_si128(d, *R128(&k[3])); - d = _mm_aesenc_si128(d, *R128(&k[4])); - d = _mm_aesenc_si128(d, *R128(&k[5])); - d = _mm_aesenc_si128(d, *R128(&k[6])); - d = _mm_aesenc_si128(d, *R128(&k[7])); - d = _mm_aesenc_si128(d, *R128(&k[8])); - d = _mm_aesenc_si128(d, *R128(&k[9])); - _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); - } -} - -#if defined(_MSC_VER) -BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable) -{ - struct - { - DWORD count; - LUID_AND_ATTRIBUTES privilege[1]; - } info; - - HANDLE token; - if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token)) - return FALSE; - - info.count = 1; - info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0; - - if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid))) - return FALSE; - - if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL)) - return FALSE; - - if (GetLastError() != ERROR_SUCCESS) - return FALSE; - - CloseHandle(token); - - return TRUE; - -} -#endif - -void slow_hash_allocate_state(void) -{ - int state = 0; - if(hp_state != NULL) - return; - -#if defined(_MSC_VER) - SetLockPagesPrivilege(GetCurrentProcess(), TRUE); - hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES | - MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -#else - hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0); - if(hp_state == MAP_FAILED) - hp_state = NULL; -#endif - hp_allocated = 1; - if(hp_state == NULL) - { - hp_allocated = 0; - hp_state = (uint8_t *) malloc(MEMORY); - } -} - -void slow_hash_free_state(void) -{ - if(hp_state == NULL) - return; - - if(!hp_allocated) - free(hp_state); - else - { -#if defined(_MSC_VER) - VirtualFree(hp_state, MEMORY, MEM_RELEASE); -#else - munmap(hp_state, MEMORY); -#endif - } - - hp_state = NULL; - hp_allocated = 0; -} - -void cn_slow_hash(const void *data, size_t length, char *hash) -{ - RDATA_ALIGN16 uint8_t expandedKey[240]; - - uint8_t text[INIT_SIZE_BYTE]; - RDATA_ALIGN16 uint64_t a[2]; - RDATA_ALIGN16 uint64_t b[2]; - RDATA_ALIGN16 uint64_t c[2]; - RDATA_ALIGN16 uint8_t aes_key[AES_KEY_SIZE]; - union cn_slow_hash_state state; - __m128i _a, _b, _c; - uint64_t hi, lo; - - size_t i, j; - uint64_t *p = NULL; - oaes_ctx *aes_ctx; - int useAes = check_aes_hw(); - - static void (*const extra_hashes[4])(const void *, size_t, char *) = - { - hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein - }; - - // this isn't supposed to happen, but guard against it for now. - if(hp_state == NULL) - slow_hash_allocate_state(); - - hash_process(&state.hs, data, length); - memcpy(text, state.init, INIT_SIZE_BYTE); - - if(useAes) - { - aes_expand_key(state.hs.b, expandedKey); - for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) - { - aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK); - memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); - } - } - else - { - aes_ctx = (oaes_ctx *) oaes_alloc(); - oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE); - for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) - { - for(j = 0; j < INIT_SIZE_BLK; j++) - aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data); - - memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); - } - } - - U64(a)[0] = U64(&state.k[0])[0] ^ U64(&state.k[32])[0]; - U64(a)[1] = U64(&state.k[0])[1] ^ U64(&state.k[32])[1]; - U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0]; - U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1]; - - _b = _mm_load_si128(R128(b)); - // this is ugly but the branching affects the loop somewhat so put it outside. - if(useAes) - { - for(i = 0; i < ITER / 2; i++) - { - pre_aes(); - _c = _mm_aesenc_si128(_c, _a); - // post_aes(), optimized scratchpad twiddling (credits to dga) - post_aes(); - } - } - else - { - for(i = 0; i < ITER / 2; i++) - { - pre_aes(); - aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a); - post_aes(); - } - } - - memcpy(text, state.init, INIT_SIZE_BYTE); - if(useAes) - { - aes_expand_key(&state.hs.b[32], expandedKey); - for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) - { - // add the xor to the pseudo round - aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK); - } - } - else - { - oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); - for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) - { - for(j = 0; j < INIT_SIZE_BLK; j++) - { - xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); - aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data); - } - } - oaes_free((OAES_CTX **) &aes_ctx); - } - - memcpy(state.init, text, INIT_SIZE_BYTE); - hash_permutation(&state.hs); - extra_hashes[state.hs.b[0] & 3](&state, 200, hash); + memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); + } + + for (i = 0; i < 16; i++) { + a[i] = state.k[ i] ^ state.k[32 + i]; + b[i] = state.k[16 + i] ^ state.k[48 + i]; + } + + for (i = 0; i < ITER / 2; i++) { + /* Dependency chain: address -> read value ------+ + * written value <-+ hard function (AES or MUL) <+ + * next address <-+ + */ + /* Iteration 1 */ + j = e2i(a, MEMORY / AES_BLOCK_SIZE); + copy_block(c, &long_state[j * AES_BLOCK_SIZE]); + oaes_encryption_round(a, c); + xor_blocks(b, c); + swap_blocks(b, c); + copy_block(&long_state[j * AES_BLOCK_SIZE], c); + assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE)); + swap_blocks(a, b); + /* Iteration 2 */ + j = e2i(a, MEMORY / AES_BLOCK_SIZE); + copy_block(c, &long_state[j * AES_BLOCK_SIZE]); + mul(a, c, d); + sum_half_blocks(b, d); + swap_blocks(b, c); + xor_blocks(b, c); + copy_block(&long_state[j * AES_BLOCK_SIZE], c); + assert(j == e2i(a, MEMORY / AES_BLOCK_SIZE)); + swap_blocks(a, b); + } + + memcpy(text, state.init, INIT_SIZE_BYTE); + for (i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { + for (j = 0; j < INIT_SIZE_BLK; j++) { + /*oaes_key_import_data(aes_ctx, &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE], AES_KEY_SIZE);*/ + oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); + xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); + oaes_pseudo_encrypt_ecb(aes_ctx, &text[j * AES_BLOCK_SIZE]); + } + } + memcpy(state.init, text, INIT_SIZE_BYTE); + hash_permutation(&state.hs); + /*memcpy(hash, &state, 32);*/ + extra_hashes[state.hs.b[0] & 3](&state, 200, hash); + oaes_free(&aes_ctx); } From 72643c47daa06a8fed739d7f7045d960e0cb69d6 Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 03:35:09 -0700 Subject: [PATCH 5/7] Update slow-hash.c 1. Added huge pages support and optimized scratchpad twiddling. (credits to dga). 2. Added aes-ni key expansion support. 3. Minor speedup to scratchpad initialization/finalization. --- src/crypto/slow-hash.c | 408 +++++++++++++++++++++++++++++------------ 1 file changed, 293 insertions(+), 115 deletions(-) diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c index 35438dca..cda66af5 100644 --- a/src/crypto/slow-hash.c +++ b/src/crypto/slow-hash.c @@ -13,8 +13,9 @@ #include -#if defined(_MSC_VER) || defined(__INTEL_COMPILER) +#if defined(_MSC_VER) #include +#include #define STATIC #define INLINE __inline #if !defined(RDATA_ALIGN16) @@ -22,6 +23,7 @@ #endif #else #include +#include #define STATIC static #define INLINE inline #if !defined(RDATA_ALIGN16) @@ -29,15 +31,58 @@ #endif #endif +#if defined(__INTEL_COMPILER) +#define ASM __asm__ +#elif !defined(_MSC_VER) +#define ASM __asm__ +#else +#define ASM __asm +#endif + #define MEMORY (1 << 21) // 2MB scratchpad #define ITER (1 << 20) #define AES_BLOCK_SIZE 16 #define AES_KEY_SIZE 32 #define INIT_SIZE_BLK 8 #define INIT_SIZE_BYTE (INIT_SIZE_BLK * AES_BLOCK_SIZE) +#define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE) #define U64(x) ((uint64_t *) (x)) #define R128(x) ((__m128i *) (x)) +#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a))) + +#define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4) +#if defined(_MSC_VER) +#define __mul() lo = _umul128(c[0], b[0], &hi); +#else +#define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc"); +#endif + +#define pre_aes() \ + j = state_index(a); \ + _c = _mm_load_si128(R128(&hp_state[j])); \ + _a = _mm_load_si128(R128(a)); \ + +// dga's optimized scratchpad twiddling +#define post_aes() \ + _mm_store_si128(R128(c), _c); \ + _b = _mm_xor_si128(_b, _c); \ + _mm_store_si128(R128(&hp_state[j]), _b); \ + j = state_index(c); \ + p = U64(&hp_state[j]); \ + b[0] = p[0]; b[1] = p[1]; \ + __mul(); \ + a[0] += hi; a[1] += lo; \ + p = U64(&hp_state[j]); \ + p[0] = a[0]; p[1] = a[1]; \ + a[0] ^= b[0]; a[1] ^= b[1]; \ + _b = _c; \ + +#if defined(_MSC_VER) +#define THREADV __declspec(thread) +#else +#define THREADV __thread +#endif extern int aesb_single_round(const uint8_t *in, uint8_t*out, const uint8_t *expandedKey); extern int aesb_pseudo_round(const uint8_t *in, uint8_t *out, const uint8_t *expandedKey); @@ -54,59 +99,26 @@ union cn_slow_hash_state }; #pragma pack(pop) -#if defined(_MSC_VER) || defined(__INTEL_COMPILER) +THREADV uint8_t *hp_state = NULL; +THREADV int hp_allocated = 0; + +#if defined(_MSC_VER) #define cpuid(info,x) __cpuidex(info,x,0) #else void cpuid(int CPUInfo[4], int InfoType) { - __asm__ __volatile__ + ASM __volatile__ ( - "cpuid": + "cpuid": "=a" (CPUInfo[0]), "=b" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : - "a" (InfoType), "c" (0) - ); + "a" (InfoType), "c" (0) + ); } #endif -STATIC INLINE void mul(const uint8_t *a, const uint8_t *b, uint8_t *res) -{ - uint64_t a0, b0; - uint64_t hi, lo; - - a0 = U64(a)[0]; - b0 = U64(b)[0]; - lo = mul128(a0, b0, &hi); - U64(res)[0] = hi; - U64(res)[1] = lo; -} - -STATIC INLINE void sum_half_blocks(uint8_t *a, const uint8_t *b) -{ - uint64_t a0, a1, b0, b1; - a0 = U64(a)[0]; - a1 = U64(a)[1]; - b0 = U64(b)[0]; - b1 = U64(b)[1]; - a0 += b0; - a1 += b1; - U64(a)[0] = a0; - U64(a)[1] = a1; -} - -STATIC INLINE void swap_blocks(uint8_t *a, uint8_t *b) -{ - uint64_t t[2]; - U64(t)[0] = U64(a)[0]; - U64(t)[1] = U64(a)[1]; - U64(a)[0] = U64(b)[0]; - U64(a)[1] = U64(b)[1]; - U64(b)[0] = U64(t)[0]; - U64(b)[1] = U64(t)[1]; -} - STATIC INLINE void xor_blocks(uint8_t *a, const uint8_t *b) { U64(a)[0] ^= U64(b)[0]; @@ -125,74 +137,248 @@ STATIC INLINE int check_aes_hw(void) return supported = cpuid_results[2] & (1 << 25); } -STATIC INLINE void aesni_pseudo_round(const uint8_t *in, uint8_t *out, - const uint8_t *expandedKey) +STATIC INLINE void aes_256_assist1(__m128i* t1, __m128i * t2) +{ + __m128i t4; + *t2 = _mm_shuffle_epi32(*t2, 0xff); + t4 = _mm_slli_si128(*t1, 0x04); + *t1 = _mm_xor_si128(*t1, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t1 = _mm_xor_si128(*t1, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t1 = _mm_xor_si128(*t1, t4); + *t1 = _mm_xor_si128(*t1, *t2); +} + +STATIC INLINE void aes_256_assist2(__m128i* t1, __m128i * t3) +{ + __m128i t2, t4; + t4 = _mm_aeskeygenassist_si128(*t1, 0x00); + t2 = _mm_shuffle_epi32(t4, 0xaa); + t4 = _mm_slli_si128(*t3, 0x04); + *t3 = _mm_xor_si128(*t3, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t3 = _mm_xor_si128(*t3, t4); + t4 = _mm_slli_si128(t4, 0x04); + *t3 = _mm_xor_si128(*t3, t4); + *t3 = _mm_xor_si128(*t3, t2); +} + +STATIC INLINE void aes_expand_key(const uint8_t *key, uint8_t *expandedKey) +{ + __m128i *ek = R128(expandedKey); + __m128i t1, t2, t3; + + t1 = _mm_loadu_si128(R128(key)); + t3 = _mm_loadu_si128(R128(key + 16)); + + ek[0] = t1; + ek[1] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x01); + aes_256_assist1(&t1, &t2); + ek[2] = t1; + aes_256_assist2(&t1, &t3); + ek[3] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x02); + aes_256_assist1(&t1, &t2); + ek[4] = t1; + aes_256_assist2(&t1, &t3); + ek[5] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x04); + aes_256_assist1(&t1, &t2); + ek[6] = t1; + aes_256_assist2(&t1, &t3); + ek[7] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x08); + aes_256_assist1(&t1, &t2); + ek[8] = t1; + aes_256_assist2(&t1, &t3); + ek[9] = t3; + + t2 = _mm_aeskeygenassist_si128(t3, 0x10); + aes_256_assist1(&t1, &t2); + ek[10] = t1; +} + +STATIC INLINE void aes_pseudo_round(const uint8_t *in, uint8_t *out, + const uint8_t *expandedKey, int nblocks) { __m128i *k = R128(expandedKey); __m128i d; + int i; - d = _mm_loadu_si128(R128(in)); - d = _mm_aesenc_si128(d, *R128(&k[0])); - d = _mm_aesenc_si128(d, *R128(&k[1])); - d = _mm_aesenc_si128(d, *R128(&k[2])); - d = _mm_aesenc_si128(d, *R128(&k[3])); - d = _mm_aesenc_si128(d, *R128(&k[4])); - d = _mm_aesenc_si128(d, *R128(&k[5])); - d = _mm_aesenc_si128(d, *R128(&k[6])); - d = _mm_aesenc_si128(d, *R128(&k[7])); - d = _mm_aesenc_si128(d, *R128(&k[8])); - d = _mm_aesenc_si128(d, *R128(&k[9])); - _mm_storeu_si128((R128(out)), d); + for(i = 0; i < nblocks; i++) + { + d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); + d = _mm_aesenc_si128(d, *R128(&k[0])); + d = _mm_aesenc_si128(d, *R128(&k[1])); + d = _mm_aesenc_si128(d, *R128(&k[2])); + d = _mm_aesenc_si128(d, *R128(&k[3])); + d = _mm_aesenc_si128(d, *R128(&k[4])); + d = _mm_aesenc_si128(d, *R128(&k[5])); + d = _mm_aesenc_si128(d, *R128(&k[6])); + d = _mm_aesenc_si128(d, *R128(&k[7])); + d = _mm_aesenc_si128(d, *R128(&k[8])); + d = _mm_aesenc_si128(d, *R128(&k[9])); + _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); + } +} + +STATIC INLINE void aes_pseudo_round_xor(const uint8_t *in, uint8_t *out, + const uint8_t *expandedKey, const uint8_t *xor, int nblocks) +{ + __m128i *k = R128(expandedKey); + __m128i *x = R128(xor); + __m128i d; + int i; + + for(i = 0; i < nblocks; i++) + { + d = _mm_loadu_si128(R128(in + i * AES_BLOCK_SIZE)); + d = _mm_xor_si128(d, *R128(x++)); + d = _mm_aesenc_si128(d, *R128(&k[0])); + d = _mm_aesenc_si128(d, *R128(&k[1])); + d = _mm_aesenc_si128(d, *R128(&k[2])); + d = _mm_aesenc_si128(d, *R128(&k[3])); + d = _mm_aesenc_si128(d, *R128(&k[4])); + d = _mm_aesenc_si128(d, *R128(&k[5])); + d = _mm_aesenc_si128(d, *R128(&k[6])); + d = _mm_aesenc_si128(d, *R128(&k[7])); + d = _mm_aesenc_si128(d, *R128(&k[8])); + d = _mm_aesenc_si128(d, *R128(&k[9])); + _mm_storeu_si128((R128(out + i * AES_BLOCK_SIZE)), d); + } +} + +#if defined(_MSC_VER) +BOOL SetLockPagesPrivilege(HANDLE hProcess, BOOL bEnable) +{ + struct + { + DWORD count; + LUID_AND_ATTRIBUTES privilege[1]; + } info; + + HANDLE token; + if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &token)) + return FALSE; + + info.count = 1; + info.privilege[0].Attributes = bEnable ? SE_PRIVILEGE_ENABLED : 0; + + if(!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &(info.privilege[0].Luid))) + return FALSE; + + if(!AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &info, 0, NULL, NULL)) + return FALSE; + + if (GetLastError() != ERROR_SUCCESS) + return FALSE; + + CloseHandle(token); + + return TRUE; + +} +#endif + +void slow_hash_allocate_state(void) +{ + int state = 0; + if(hp_state != NULL) + return; + +#if defined(_MSC_VER) + SetLockPagesPrivilege(GetCurrentProcess(), TRUE); + hp_state = (uint8_t *) VirtualAlloc(hp_state, MEMORY, MEM_LARGE_PAGES | + MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); +#else + hp_state = mmap(0, MEMORY, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0); + if(hp_state == MAP_FAILED) + hp_state = NULL; +#endif + hp_allocated = 1; + if(hp_state == NULL) + { + hp_allocated = 0; + hp_state = (uint8_t *) malloc(MEMORY); + } +} + +void slow_hash_free_state(void) +{ + if(hp_state == NULL) + return; + + if(!hp_allocated) + free(hp_state); + else + { +#if defined(_MSC_VER) + VirtualFree(hp_state, MEMORY, MEM_RELEASE); +#else + munmap(hp_state, MEMORY); +#endif + } + + hp_state = NULL; + hp_allocated = 0; } void cn_slow_hash(const void *data, size_t length, char *hash) { - uint8_t long_state[MEMORY]; - uint8_t text[INIT_SIZE_BYTE]; - uint8_t a[AES_BLOCK_SIZE]; - uint8_t b[AES_BLOCK_SIZE]; - uint8_t d[AES_BLOCK_SIZE]; - uint8_t aes_key[AES_KEY_SIZE]; - RDATA_ALIGN16 uint8_t expandedKey[256]; + RDATA_ALIGN16 uint8_t expandedKey[240]; + uint8_t text[INIT_SIZE_BYTE]; + RDATA_ALIGN16 uint64_t a[2]; + RDATA_ALIGN16 uint64_t b[2]; + RDATA_ALIGN16 uint64_t c[2]; + RDATA_ALIGN16 uint8_t aes_key[AES_KEY_SIZE]; union cn_slow_hash_state state; + __m128i _a, _b, _c; + uint64_t hi, lo; size_t i, j; - uint8_t *p = NULL; + uint64_t *p = NULL; oaes_ctx *aes_ctx; - int useAes = check_aes_hw(); + static void (*const extra_hashes[4])(const void *, size_t, char *) = { hash_extra_blake, hash_extra_groestl, hash_extra_jh, hash_extra_skein }; + // this isn't supposed to happen, but guard against it for now. + if(hp_state == NULL) + slow_hash_allocate_state(); + hash_process(&state.hs, data, length); memcpy(text, state.init, INIT_SIZE_BYTE); - aes_ctx = (oaes_ctx *) oaes_alloc(); - oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE); - - // use aligned data - memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len); - if(useAes) { + aes_expand_key(state.hs.b, expandedKey); for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { - for(j = 0; j < INIT_SIZE_BLK; j++) - aesni_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey); - memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); + aes_pseudo_round(text, text, expandedKey, INIT_SIZE_BLK); + memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); } } else { + aes_ctx = (oaes_ctx *) oaes_alloc(); + oaes_key_import_data(aes_ctx, state.hs.b, AES_KEY_SIZE); for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { for(j = 0; j < INIT_SIZE_BLK; j++) - aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey); + aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data); - memcpy(&long_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); + memcpy(&hp_state[i * INIT_SIZE_BYTE], text, INIT_SIZE_BYTE); } } @@ -201,60 +387,52 @@ void cn_slow_hash(const void *data, size_t length, char *hash) U64(b)[0] = U64(&state.k[16])[0] ^ U64(&state.k[48])[0]; U64(b)[1] = U64(&state.k[16])[1] ^ U64(&state.k[48])[1]; - for(i = 0; i < ITER / 2; i++) - { - #define TOTALBLOCKS (MEMORY / AES_BLOCK_SIZE) - #define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4) - - // Iteration 1 - p = &long_state[state_index(a)]; - - if(useAes) - _mm_storeu_si128(R128(p), _mm_aesenc_si128(_mm_loadu_si128(R128(p)), _mm_loadu_si128(R128(a)))); - else - aesb_single_round(p, p, a); - - xor_blocks(b, p); - swap_blocks(b, p); - swap_blocks(a, b); - - // Iteration 2 - p = &long_state[state_index(a)]; - - mul(a, p, d); - sum_half_blocks(b, d); - swap_blocks(b, p); - xor_blocks(b, p); - swap_blocks(a, b); - } - - memcpy(text, state.init, INIT_SIZE_BYTE); - oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); - memcpy(expandedKey, aes_ctx->key->exp_data, aes_ctx->key->exp_data_len); + _b = _mm_load_si128(R128(b)); + // this is ugly but the branching affects the loop somewhat so put it outside. if(useAes) { - for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + for(i = 0; i < ITER / 2; i++) { - for(j = 0; j < INIT_SIZE_BLK; j++) - { - xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); - aesni_pseudo_round(&text[j * AES_BLOCK_SIZE], &text[j * AES_BLOCK_SIZE], expandedKey); - } + pre_aes(); + _c = _mm_aesenc_si128(_c, _a); + // post_aes(), optimized scratchpad twiddling (credits to dga) + post_aes(); } } else { + for(i = 0; i < ITER / 2; i++) + { + pre_aes(); + aesb_single_round((uint8_t *) &_c, (uint8_t *) &_c, (uint8_t *) &_a); + post_aes(); + } + } + + memcpy(text, state.init, INIT_SIZE_BYTE); + if(useAes) + { + aes_expand_key(&state.hs.b[32], expandedKey); + for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) + { + // add the xor to the pseudo round + aes_pseudo_round_xor(text, text, expandedKey, &hp_state[i * INIT_SIZE_BYTE], INIT_SIZE_BLK); + } + } + else + { + oaes_key_import_data(aes_ctx, &state.hs.b[32], AES_KEY_SIZE); for(i = 0; i < MEMORY / INIT_SIZE_BYTE; i++) { for(j = 0; j < INIT_SIZE_BLK; j++) { - xor_blocks(&text[j * AES_BLOCK_SIZE], &long_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); - aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], expandedKey); + xor_blocks(&text[j * AES_BLOCK_SIZE], &hp_state[i * INIT_SIZE_BYTE + j * AES_BLOCK_SIZE]); + aesb_pseudo_round(&text[AES_BLOCK_SIZE * j], &text[AES_BLOCK_SIZE * j], aes_ctx->key->exp_data); } } + oaes_free((OAES_CTX **) &aes_ctx); } - oaes_free((OAES_CTX **) &aes_ctx); memcpy(state.init, text, INIT_SIZE_BYTE); hash_permutation(&state.hs); extra_hashes[state.hs.b[0] & 3](&state, 200, hash); From 5fcac268b7982051557b64a3d12dffcc5feb37e6 Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 03:35:55 -0700 Subject: [PATCH 6/7] Update miner.cpp 1. Added support for scratchpad memory allocation. --- src/cryptonote_core/miner.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cryptonote_core/miner.cpp b/src/cryptonote_core/miner.cpp index 2055bb15..d021b05b 100644 --- a/src/cryptonote_core/miner.cpp +++ b/src/cryptonote_core/miner.cpp @@ -23,7 +23,8 @@ using namespace epee; #include "miner.h" - +extern "C" void slow_hash_allocate_state(); +extern "C" void slow_hash_free_state(); namespace cryptonote { @@ -320,6 +321,7 @@ namespace cryptonote difficulty_type local_diff = 0; uint32_t local_template_ver = 0; block b; + slow_hash_allocate_state(); while(!m_stop) { if(m_pausers_count)//anti split workaround @@ -368,6 +370,7 @@ namespace cryptonote nonce+=m_threads_total; ++m_hashes; } + slow_hash_free_state(); LOG_PRINT_L0("Miner thread stopped ["<< th_local_index << "]"); return true; } From 156312f64adae4a6bdca7b9fc01fa7b3eb8d2452 Mon Sep 17 00:00:00 2001 From: NoodleDoodleNoodleDoodleNoodleDoodleNoo Date: Mon, 16 Jun 2014 21:13:05 -0700 Subject: [PATCH 7/7] Update slow-hash.c 1. Added multiplication support in 32-bit mode --- src/crypto/slow-hash.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/crypto/slow-hash.c b/src/crypto/slow-hash.c index cda66af5..46a5b5f3 100644 --- a/src/crypto/slow-hash.c +++ b/src/crypto/slow-hash.c @@ -49,13 +49,20 @@ #define U64(x) ((uint64_t *) (x)) #define R128(x) ((__m128i *) (x)) -#define SWAP(a, b) (((a) -= (b)), ((b) += (a)), ((a) = (b) - (a))) #define state_index(x) (((*((uint64_t *)x) >> 4) & (TOTALBLOCKS - 1)) << 4) #if defined(_MSC_VER) -#define __mul() lo = _umul128(c[0], b[0], &hi); +#if !defined(_WIN64) +#define __mul() lo = mul128(c[0], b[0], &hi); #else +#define __mul() lo = _umul128(c[0], b[0], &hi); +#endif +#else +#if defined(__x86_64__) #define __mul() ASM("mulq %3\n\t" : "=d"(hi), "=a"(lo) : "%a" (c[0]), "rm" (b[0]) : "cc"); +#else +#define __mul() lo = mul128(c[0], b[0], &hi); +#endif #endif #define pre_aes() \