2014-05-17 23:25:58 +00:00
|
|
|
/*
|
|
|
|
---------------------------------------------------------------------------
|
|
|
|
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
|
|
|
|
|
|
|
|
The redistribution and use of this software (with or without changes)
|
|
|
|
is allowed without the payment of fees or royalties provided that:
|
|
|
|
|
|
|
|
source code distributions include the above copyright notice, this
|
|
|
|
list of conditions and the following disclaimer;
|
|
|
|
|
|
|
|
binary distributions include the above copyright notice, this list
|
|
|
|
of conditions and the following disclaimer in their documentation.
|
|
|
|
|
|
|
|
This software is provided 'as is' with no explicit or implied warranties
|
|
|
|
in respect of its operation, including, but not limited to, correctness
|
|
|
|
and fitness for purpose.
|
|
|
|
---------------------------------------------------------------------------
|
|
|
|
Issue Date: 20/12/2007
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
#if defined(__cplusplus)
|
|
|
|
extern "C"
|
|
|
|
{
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TABLE_ALIGN 32
|
|
|
|
#define WPOLY 0x011b
|
|
|
|
#define N_COLS 4
|
|
|
|
#define AES_BLOCK_SIZE 16
|
|
|
|
#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2))
|
|
|
|
|
|
|
|
#if defined(_MSC_VER)
|
|
|
|
#define ALIGN __declspec(align(TABLE_ALIGN))
|
|
|
|
#elif defined(__GNUC__)
|
|
|
|
#define ALIGN __attribute__ ((aligned(16)))
|
|
|
|
#else
|
|
|
|
#define ALIGN
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define rf1(r,c) (r)
|
|
|
|
#define word_in(x,c) (*((uint32_t*)(x)+(c)))
|
|
|
|
#define word_out(x,c,v) (*((uint32_t*)(x)+(c)) = (v))
|
|
|
|
|
|
|
|
#define s(x,c) x[c]
|
|
|
|
#define si(y,x,c) (s(y,c) = word_in(x, c))
|
|
|
|
#define so(y,x,c) word_out(y, c, s(x,c))
|
|
|
|
#define state_in(y,x) si(y,x,0); si(y,x,1); si(y,x,2); si(y,x,3)
|
|
|
|
#define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3)
|
|
|
|
#define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3)
|
|
|
|
#define to_byte(x) ((x) & 0xff)
|
|
|
|
#define bval(x,n) to_byte((x) >> (8 * (n)))
|
|
|
|
|
|
|
|
#define fwd_var(x,r,c)\
|
|
|
|
( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
|
|
|
|
: r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\
|
|
|
|
: r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
|
|
|
|
: ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2)))
|
|
|
|
|
|
|
|
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
|
|
|
|
|
|
|
|
#define sb_data(w) {\
|
|
|
|
w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
|
|
|
|
w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
|
|
|
|
w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
|
|
|
|
w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
|
|
|
|
w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
|
|
|
|
w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
|
|
|
|
w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
|
|
|
|
w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
|
|
|
|
w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
|
|
|
|
w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
|
|
|
|
w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
|
|
|
|
w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
|
|
|
|
w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
|
|
|
|
w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
|
|
|
|
w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
|
|
|
|
w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
|
|
|
|
w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
|
|
|
|
w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
|
|
|
|
w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
|
|
|
|
w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
|
|
|
|
w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
|
|
|
|
w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
|
|
|
|
w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
|
|
|
|
w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
|
|
|
|
w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
|
|
|
|
w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
|
|
|
|
w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
|
|
|
|
w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
|
|
|
|
w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
|
|
|
|
w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
|
|
|
|
w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
|
|
|
|
w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
|
|
|
|
|
|
|
|
#define rc_data(w) {\
|
|
|
|
w(0x01), w(0x02), w(0x04), w(0x08), w(0x10),w(0x20), w(0x40), w(0x80),\
|
|
|
|
w(0x1b), w(0x36) }
|
|
|
|
|
|
|
|
#define bytes2word(b0, b1, b2, b3) (((uint32_t)(b3) << 24) | \
|
|
|
|
((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | (b0))
|
|
|
|
|
|
|
|
#define h0(x) (x)
|
|
|
|
#define w0(p) bytes2word(p, 0, 0, 0)
|
|
|
|
#define w1(p) bytes2word(0, p, 0, 0)
|
|
|
|
#define w2(p) bytes2word(0, 0, p, 0)
|
|
|
|
#define w3(p) bytes2word(0, 0, 0, p)
|
|
|
|
|
|
|
|
#define u0(p) bytes2word(f2(p), p, p, f3(p))
|
|
|
|
#define u1(p) bytes2word(f3(p), f2(p), p, p)
|
|
|
|
#define u2(p) bytes2word(p, f3(p), f2(p), p)
|
|
|
|
#define u3(p) bytes2word(p, p, f3(p), f2(p))
|
|
|
|
|
|
|
|
#define v0(p) bytes2word(fe(p), f9(p), fd(p), fb(p))
|
|
|
|
#define v1(p) bytes2word(fb(p), fe(p), f9(p), fd(p))
|
|
|
|
#define v2(p) bytes2word(fd(p), fb(p), fe(p), f9(p))
|
|
|
|
#define v3(p) bytes2word(f9(p), fd(p), fb(p), fe(p))
|
|
|
|
|
|
|
|
#define f2(x) ((x<<1) ^ (((x>>7) & 1) * WPOLY))
|
|
|
|
#define f4(x) ((x<<2) ^ (((x>>6) & 1) * WPOLY) ^ (((x>>6) & 2) * WPOLY))
|
|
|
|
#define f8(x) ((x<<3) ^ (((x>>5) & 1) * WPOLY) ^ (((x>>5) & 2) * WPOLY) ^ (((x>>5) & 4) * WPOLY))
|
|
|
|
#define f3(x) (f2(x) ^ x)
|
|
|
|
#define f9(x) (f8(x) ^ x)
|
|
|
|
#define fb(x) (f8(x) ^ f2(x) ^ x)
|
|
|
|
#define fd(x) (f8(x) ^ f4(x) ^ x)
|
|
|
|
#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
|
|
|
|
|
|
|
|
#define t_dec(m,n) t_##m##n
|
|
|
|
#define t_set(m,n) t_##m##n
|
|
|
|
#define t_use(m,n) t_##m##n
|
|
|
|
|
|
|
|
#define d_4(t,n,b,e,f,g,h) ALIGN const t n[4][256] = { b(e), b(f), b(g), b(h) }
|
|
|
|
|
|
|
|
#define four_tables(x,tab,vf,rf,c) \
|
|
|
|
(tab[0][bval(vf(x,0,c),rf(0,c))] \
|
|
|
|
^ tab[1][bval(vf(x,1,c),rf(1,c))] \
|
|
|
|
^ tab[2][bval(vf(x,2,c),rf(2,c))] \
|
|
|
|
^ tab[3][bval(vf(x,3,c),rf(3,c))])
|
|
|
|
|
|
|
|
d_4(uint32_t, t_dec(f,n), sb_data, u0, u1, u2, u3);
|
|
|
|
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
#if !defined(STATIC)
|
|
|
|
#define STATIC
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if !defined(INLINE)
|
|
|
|
#define INLINE
|
|
|
|
#endif
|
|
|
|
|
|
|
|
STATIC INLINE void aesb_single_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey)
|
2014-05-17 23:25:58 +00:00
|
|
|
{
|
|
|
|
uint32_t b0[4], b1[4];
|
|
|
|
const uint32_t *kp = (uint32_t *) expandedKey;
|
|
|
|
state_in(b0, in);
|
|
|
|
|
|
|
|
round(fwd_rnd, b1, b0, kp);
|
|
|
|
|
|
|
|
state_out(out, b1);
|
|
|
|
}
|
|
|
|
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
STATIC INLINE void aesb_pseudo_round(const uint8_t *in, uint8_t *out, uint8_t *expandedKey)
|
2014-05-17 23:25:58 +00:00
|
|
|
{
|
|
|
|
uint32_t b0[4], b1[4];
|
|
|
|
const uint32_t *kp = (uint32_t *) expandedKey;
|
|
|
|
state_in(b0, in);
|
|
|
|
|
|
|
|
round(fwd_rnd, b1, b0, kp);
|
|
|
|
round(fwd_rnd, b0, b1, kp + 1 * N_COLS);
|
|
|
|
round(fwd_rnd, b1, b0, kp + 2 * N_COLS);
|
|
|
|
round(fwd_rnd, b0, b1, kp + 3 * N_COLS);
|
|
|
|
round(fwd_rnd, b1, b0, kp + 4 * N_COLS);
|
|
|
|
round(fwd_rnd, b0, b1, kp + 5 * N_COLS);
|
|
|
|
round(fwd_rnd, b1, b0, kp + 6 * N_COLS);
|
|
|
|
round(fwd_rnd, b0, b1, kp + 7 * N_COLS);
|
|
|
|
round(fwd_rnd, b1, b0, kp + 8 * N_COLS);
|
|
|
|
round(fwd_rnd, b0, b1, kp + 9 * N_COLS);
|
|
|
|
|
|
|
|
state_out(out, b0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(__cplusplus)
|
|
|
|
}
|
|
|
|
#endif
|