b649e03
Create new BlockPolicyEstimator for fee estimates (Alex Morcos)
tags/v0.15.1
@@ -1,5 +1,5 @@ | |||
#!/usr/bin/env python2 | |||
# Copyright (c) 2014 The Bitcoin Core developers | |||
# Copyright (c) 2014-2015 The Bitcoin Core developers | |||
# Distributed under the MIT software license, see the accompanying | |||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. | |||
@@ -11,82 +11,249 @@ from test_framework import BitcoinTestFramework | |||
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException | |||
from util import * | |||
# Construct 2 trivial P2SH's and the ScriptSigs that spend them | |||
# So we can create many many transactions without needing to spend | |||
# time signing. | |||
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP" | |||
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP" | |||
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 | |||
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP" | |||
SCRIPT_SIG = ["0451025175", "0451025275"] | |||
def satoshi_round(amount): | |||
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) | |||
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): | |||
''' | |||
Create and send a transaction with a random fee. | |||
The transaction pays to a trival P2SH script, and assumes that its inputs | |||
are of the same form. | |||
The function takes a list of confirmed outputs and unconfirmed outputs | |||
and attempts to use the confirmed list first for its inputs. | |||
It adds the newly created outputs to the unconfirmed list. | |||
Returns (raw transaction, fee) | |||
''' | |||
# It's best to exponentially distribute our random fees | |||
# because the buckets are exponentially spaced. | |||
# Exponentially distributed from 1-128 * fee_increment | |||
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) | |||
# Total fee ranges from min_fee to min_fee + 127*fee_increment | |||
fee = min_fee - fee_increment + satoshi_round(rand_fee) | |||
inputs = [] | |||
total_in = Decimal("0.00000000") | |||
while total_in <= (amount + fee) and len(conflist) > 0: | |||
t = conflist.pop(0) | |||
total_in += t["amount"] | |||
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} ) | |||
if total_in <= amount + fee: | |||
while total_in <= (amount + fee) and len(unconflist) > 0: | |||
t = unconflist.pop(0) | |||
total_in += t["amount"] | |||
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} ) | |||
if total_in <= amount + fee: | |||
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) | |||
outputs = {} | |||
outputs[P2SH_1] = total_in - amount - fee | |||
outputs[P2SH_2] = amount | |||
rawtx = from_node.createrawtransaction(inputs, outputs) | |||
# Createrawtransaction constructions a transaction that is ready to be signed | |||
# These transactions don't need to be signed, but we still have to insert the ScriptSig | |||
# that will satisfy the ScriptPubKey. | |||
completetx = rawtx[0:10] | |||
inputnum = 0 | |||
for inp in inputs: | |||
completetx += rawtx[10+82*inputnum:82+82*inputnum] | |||
completetx += SCRIPT_SIG[inp["vout"]] | |||
completetx += rawtx[84+82*inputnum:92+82*inputnum] | |||
inputnum += 1 | |||
completetx += rawtx[10+82*inputnum:] | |||
txid = from_node.sendrawtransaction(completetx, True) | |||
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) | |||
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) | |||
return (completetx, fee) | |||
def split_inputs(from_node, txins, txouts, initial_split = False): | |||
''' | |||
We need to generate a lot of very small inputs so we can generate a ton of transactions | |||
and they will have low priority. | |||
This function takes an input from txins, and creates and sends a transaction | |||
which splits the value into 2 outputs which are appended to txouts. | |||
''' | |||
prevtxout = txins.pop() | |||
inputs = [] | |||
outputs = {} | |||
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] }) | |||
half_change = satoshi_round(prevtxout["amount"]/2) | |||
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") | |||
outputs[P2SH_1] = half_change | |||
outputs[P2SH_2] = rem_change | |||
rawtx = from_node.createrawtransaction(inputs, outputs) | |||
# If this is the initial split we actually need to sign the transaction | |||
# Otherwise we just need to insert the property ScriptSig | |||
if (initial_split) : | |||
completetx = from_node.signrawtransaction(rawtx)["hex"] | |||
else : | |||
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:] | |||
txid = from_node.sendrawtransaction(completetx, True) | |||
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) | |||
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) | |||
def check_estimates(node, fees_seen, max_invalid, print_estimates = True): | |||
''' | |||
This function calls estimatefee and verifies that the estimates | |||
meet certain invariants. | |||
''' | |||
all_estimates = [ node.estimatefee(i) for i in range(1,26) ] | |||
if print_estimates: | |||
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) | |||
delta = 1.0e-6 # account for rounding error | |||
last_e = max(fees_seen) | |||
for e in filter(lambda x: x >= 0, all_estimates): | |||
# Estimates should be within the bounds of what transactions fees actually were: | |||
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen): | |||
raise AssertionError("Estimated fee (%f) out of range (%f,%f)" | |||
%(float(e), min(fees_seen), max(fees_seen))) | |||
# Estimates should be monotonically decreasing | |||
if float(e)-delta > last_e: | |||
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" | |||
%(float(e),float(last_e))) | |||
last_e = e | |||
valid_estimate = False | |||
invalid_estimates = 0 | |||
for e in all_estimates: | |||
if e >= 0: | |||
valid_estimate = True | |||
else: | |||
invalid_estimates += 1 | |||
# Once we're at a high enough confirmation count that we can give an estimate | |||
# We should have estimates for all higher confirmation counts | |||
if valid_estimate and e < 0: | |||
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate") | |||
# Check on the expected number of different confirmation counts | |||
# that we might not have valid estimates for | |||
if invalid_estimates > max_invalid: | |||
raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) | |||
return all_estimates | |||
class EstimateFeeTest(BitcoinTestFramework): | |||
def setup_network(self): | |||
''' | |||
We'll setup the network to have 3 nodes that all mine with different parameters. | |||
But first we need to use one node to create a lot of small low priority outputs | |||
which we will use to generate our transactions. | |||
''' | |||
self.nodes = [] | |||
self.nodes.append(start_node(0, self.options.tmpdir, | |||
["-debug=mempool", "-debug=estimatefee", "-relaypriority=0"])) | |||
# Node1 mines small-but-not-tiny blocks, and allows free transactions. | |||
# Use node0 to mine blocks for input splitting | |||
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", | |||
"-relaypriority=0", "-whitelist=127.0.0.1"])) | |||
print("This test is time consuming, please be patient") | |||
print("Splitting inputs to small size so we can generate low priority tx's") | |||
self.txouts = [] | |||
self.txouts2 = [] | |||
# Split a coinbase into two transaction puzzle outputs | |||
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) | |||
# Mine | |||
while (len(self.nodes[0].getrawmempool()) > 0): | |||
self.nodes[0].generate(1) | |||
# Repeatedly split those 2 outputs, doubling twice for each rep | |||
# Use txouts to monitor the available utxo, since these won't be tracked in wallet | |||
reps = 0 | |||
while (reps < 5): | |||
#Double txouts to txouts2 | |||
while (len(self.txouts)>0): | |||
split_inputs(self.nodes[0], self.txouts, self.txouts2) | |||
while (len(self.nodes[0].getrawmempool()) > 0): | |||
self.nodes[0].generate(1) | |||
#Double txouts2 to txouts | |||
while (len(self.txouts2)>0): | |||
split_inputs(self.nodes[0], self.txouts2, self.txouts) | |||
while (len(self.nodes[0].getrawmempool()) > 0): | |||
self.nodes[0].generate(1) | |||
reps += 1 | |||
print("Finished splitting") | |||
# Now we can connect the other nodes, didn't want to connect them earlier | |||
# so the estimates would not be affected by the splitting transactions | |||
# Node1 mines small blocks but that are bigger than the expected transaction rate, | |||
# and allows free transactions. | |||
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, | |||
# so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for | |||
# 6 or 7 transactions) | |||
# (17k is room enough for 110 or so transactions) | |||
self.nodes.append(start_node(1, self.options.tmpdir, | |||
["-blockprioritysize=1500", "-blockmaxsize=2000", | |||
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"])) | |||
["-blockprioritysize=1500", "-blockmaxsize=18000", | |||
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"])) | |||
connect_nodes(self.nodes[1], 0) | |||
# Node2 is a stingy miner, that | |||
# produces very small blocks (room for only 3 or so transactions) | |||
node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500", | |||
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"] | |||
# produces too small blocks (room for only 70 or so transactions) | |||
node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"] | |||
self.nodes.append(start_node(2, self.options.tmpdir, node2args)) | |||
connect_nodes(self.nodes[2], 0) | |||
connect_nodes(self.nodes[0], 2) | |||
connect_nodes(self.nodes[2], 1) | |||
self.is_network_split = False | |||
self.sync_all() | |||
def transact_and_mine(self, numblocks, mining_node): | |||
min_fee = Decimal("0.00001") | |||
# We will now mine numblocks blocks generating on average 100 transactions between each block | |||
# We shuffle our confirmed txout set before each set of transactions | |||
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible | |||
# resorting to tx's that depend on the mempool when those run out | |||
for i in range(numblocks): | |||
random.shuffle(self.confutxo) | |||
for j in range(random.randrange(100-50,100+50)): | |||
from_index = random.randint(1,2) | |||
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, | |||
self.memutxo, Decimal("0.005"), min_fee, min_fee) | |||
tx_kbytes = (len(txhex)/2)/1000.0 | |||
self.fees_per_kb.append(float(fee)/tx_kbytes) | |||
sync_mempools(self.nodes[0:3],.1) | |||
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"] | |||
sync_blocks(self.nodes[0:3],.1) | |||
#update which txouts are confirmed | |||
newmem = [] | |||
for utx in self.memutxo: | |||
if utx["txid"] in mined: | |||
self.confutxo.append(utx) | |||
else: | |||
newmem.append(utx) | |||
self.memutxo = newmem | |||
def run_test(self): | |||
# Prime the memory pool with pairs of transactions | |||
# (high-priority, random fee and zero-priority, random fee) | |||
min_fee = Decimal("0.001") | |||
fees_per_kb = []; | |||
for i in range(12): | |||
(txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"), | |||
min_fee, min_fee, 20) | |||
tx_kbytes = (len(txhex)/2)/1000.0 | |||
fees_per_kb.append(float(fee)/tx_kbytes) | |||
# Mine blocks with node2 until the memory pool clears: | |||
count_start = self.nodes[2].getblockcount() | |||
while len(self.nodes[2].getrawmempool()) > 0: | |||
self.nodes[2].generate(1) | |||
self.sync_all() | |||
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ] | |||
print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates])) | |||
self.fees_per_kb = [] | |||
self.memutxo = [] | |||
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting | |||
print("Checking estimates for 1/2/3/6/15/25 blocks") | |||
print("Creating transactions and mining them with a huge block size") | |||
# Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied | |||
self.transact_and_mine(30, self.nodes[0]) | |||
check_estimates(self.nodes[1], self.fees_per_kb, 1) | |||
# Estimates should be within the bounds of what transactions fees actually were: | |||
delta = 1.0e-6 # account for rounding error | |||
for e in filter(lambda x: x >= 0, all_estimates): | |||
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb): | |||
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb)) | |||
# Generate transactions while mining 30 more blocks, this time with node1: | |||
for i in range(30): | |||
for j in range(random.randrange(6-4,6+4)): | |||
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), | |||
Decimal("0.0"), min_fee, 20) | |||
tx_kbytes = (len(txhex)/2)/1000.0 | |||
fees_per_kb.append(float(fee)/tx_kbytes) | |||
self.nodes[1].generate(1) | |||
self.sync_all() | |||
print("Creating transactions and mining them with a block size that can't keep up") | |||
# Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine | |||
self.transact_and_mine(20, self.nodes[2]) | |||
check_estimates(self.nodes[1], self.fees_per_kb, 3) | |||
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ] | |||
print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates])) | |||
for e in filter(lambda x: x >= 0, all_estimates): | |||
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb): | |||
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb)) | |||
print("Creating transactions and mining them at a block size that is just big enough") | |||
# Generate transactions while mining 40 more blocks, this time with node1 | |||
# which mines blocks with capacity just above the rate that transactions are being created | |||
self.transact_and_mine(40, self.nodes[1]) | |||
check_estimates(self.nodes[1], self.fees_per_kb, 2) | |||
# Finish by mining a normal-sized block: | |||
while len(self.nodes[0].getrawmempool()) > 0: | |||
self.nodes[0].generate(1) | |||
self.sync_all() | |||
final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ] | |||
print("Final fee estimates: "+str([ str(e) for e in final_estimates])) | |||
while len(self.nodes[1].getrawmempool()) > 0: | |||
self.nodes[1].generate(1) | |||
sync_blocks(self.nodes[0:3],.1) | |||
print("Final estimates after emptying mempools") | |||
check_estimates(self.nodes[1], self.fees_per_kb, 2) | |||
if __name__ == '__main__': | |||
EstimateFeeTest().main() |
@@ -33,7 +33,7 @@ def check_json_precision(): | |||
if satoshis != 2000000000000003: | |||
raise RuntimeError("JSON encode/decode loses precision") | |||
def sync_blocks(rpc_connections): | |||
def sync_blocks(rpc_connections, wait=1): | |||
""" | |||
Wait until everybody has the same block count | |||
""" | |||
@@ -41,9 +41,9 @@ def sync_blocks(rpc_connections): | |||
counts = [ x.getblockcount() for x in rpc_connections ] | |||
if counts == [ counts[0] ]*len(counts): | |||
break | |||
time.sleep(1) | |||
time.sleep(wait) | |||
def sync_mempools(rpc_connections): | |||
def sync_mempools(rpc_connections, wait=1): | |||
""" | |||
Wait until everybody has the same transactions in their memory | |||
pools | |||
@@ -56,7 +56,7 @@ def sync_mempools(rpc_connections): | |||
num_match = num_match+1 | |||
if num_match == len(rpc_connections): | |||
break | |||
time.sleep(1) | |||
time.sleep(wait) | |||
bitcoind_processes = {} | |||
@@ -106,6 +106,7 @@ BITCOIN_CORE_H = \ | |||
netbase.h \ | |||
net.h \ | |||
noui.h \ | |||
policy/fees.h \ | |||
pow.h \ | |||
primitives/block.h \ | |||
primitives/transaction.h \ | |||
@@ -182,6 +183,7 @@ libbitcoin_server_a_SOURCES = \ | |||
miner.cpp \ | |||
net.cpp \ | |||
noui.cpp \ | |||
policy/fees.cpp \ | |||
pow.cpp \ | |||
rest.cpp \ | |||
rpcblockchain.cpp \ |
@@ -57,6 +57,7 @@ BITCOIN_TESTS =\ | |||
test/multisig_tests.cpp \ | |||
test/netbase_tests.cpp \ | |||
test/pmt_tests.cpp \ | |||
test/policyestimator_tests.cpp \ | |||
test/pow_tests.cpp \ | |||
test/rpc_tests.cpp \ | |||
test/sanity_tests.cpp \ |
@@ -987,7 +987,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa | |||
CAmount nFees = nValueIn-nValueOut; | |||
double dPriority = view.GetPriority(tx, chainActive.Height()); | |||
CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height()); | |||
CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx)); | |||
unsigned int nSize = entry.GetTxSize(); | |||
// Don't accept it if it can't get into a block | |||
@@ -1053,7 +1053,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa | |||
} | |||
// Store transaction in memory | |||
pool.addUnchecked(hash, entry); | |||
pool.addUnchecked(hash, entry, !IsInitialBlockDownload()); | |||
} | |||
SyncWithWallets(tx, NULL); | |||
@@ -2087,7 +2087,7 @@ bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock * | |||
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); | |||
// Remove conflicting transactions from the mempool. | |||
list<CTransaction> txConflicted; | |||
mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted); | |||
mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload()); | |||
mempool.check(pcoinsTip); | |||
// Update chainActive & related variables. | |||
UpdateTip(pindexNew); |
@@ -0,0 +1,529 @@ | |||
// Copyright (c) 2009-2010 Satoshi Nakamoto | |||
// Copyright (c) 2009-2015 The Bitcoin developers | |||
// Distributed under the MIT software license, see the accompanying | |||
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | |||
#include "policy/fees.h" | |||
#include "amount.h" | |||
#include "primitives/transaction.h" | |||
#include "streams.h" | |||
#include "txmempool.h" | |||
#include "util.h" | |||
void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets, | |||
unsigned int maxConfirms, double _decay, std::string _dataTypeString) | |||
{ | |||
decay = _decay; | |||
dataTypeString = _dataTypeString; | |||
for (unsigned int i = 0; i < defaultBuckets.size(); i++) { | |||
buckets.push_back(defaultBuckets[i]); | |||
bucketMap[defaultBuckets[i]] = i; | |||
} | |||
confAvg.resize(maxConfirms); | |||
curBlockConf.resize(maxConfirms); | |||
unconfTxs.resize(maxConfirms); | |||
for (unsigned int i = 0; i < maxConfirms; i++) { | |||
confAvg[i].resize(buckets.size()); | |||
curBlockConf[i].resize(buckets.size()); | |||
unconfTxs[i].resize(buckets.size()); | |||
} | |||
oldUnconfTxs.resize(buckets.size()); | |||
curBlockTxCt.resize(buckets.size()); | |||
txCtAvg.resize(buckets.size()); | |||
curBlockVal.resize(buckets.size()); | |||
avg.resize(buckets.size()); | |||
} | |||
// Zero out the data for the current block | |||
void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight) | |||
{ | |||
for (unsigned int j = 0; j < buckets.size(); j++) { | |||
oldUnconfTxs[j] += unconfTxs[nBlockHeight%unconfTxs.size()][j]; | |||
unconfTxs[nBlockHeight%unconfTxs.size()][j] = 0; | |||
for (unsigned int i = 0; i < curBlockConf.size(); i++) | |||
curBlockConf[i][j] = 0; | |||
curBlockTxCt[j] = 0; | |||
curBlockVal[j] = 0; | |||
} | |||
} | |||
void TxConfirmStats::Record(int blocksToConfirm, double val) | |||
{ | |||
// blocksToConfirm is 1-based | |||
if (blocksToConfirm < 1) | |||
return; | |||
unsigned int bucketindex = bucketMap.lower_bound(val)->second; | |||
for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) { | |||
curBlockConf[i - 1][bucketindex]++; | |||
} | |||
curBlockTxCt[bucketindex]++; | |||
curBlockVal[bucketindex] += val; | |||
} | |||
void TxConfirmStats::UpdateMovingAverages() | |||
{ | |||
for (unsigned int j = 0; j < buckets.size(); j++) { | |||
for (unsigned int i = 0; i < confAvg.size(); i++) | |||
confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j]; | |||
avg[j] = avg[j] * decay + curBlockVal[j]; | |||
txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j]; | |||
} | |||
} | |||
// returns -1 on error conditions | |||
double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal, | |||
double successBreakPoint, bool requireGreater, | |||
unsigned int nBlockHeight) | |||
{ | |||
// Counters for a bucket (or range of buckets) | |||
double nConf = 0; // Number of tx's confirmed within the confTarget | |||
double totalNum = 0; // Total number of tx's that were ever confirmed | |||
int extraNum = 0; // Number of tx's still in mempool for confTarget or longer | |||
int maxbucketindex = buckets.size() - 1; | |||
// requireGreater means we are looking for the lowest fee/priority such that all higher | |||
// values pass, so we start at maxbucketindex (highest fee) and look at succesively | |||
// smaller buckets until we reach failure. Otherwise, we are looking for the highest | |||
// fee/priority such that all lower values fail, and we go in the opposite direction. | |||
unsigned int startbucket = requireGreater ? maxbucketindex : 0; | |||
int step = requireGreater ? -1 : 1; | |||
// We'll combine buckets until we have enough samples. | |||
// The near and far variables will define the range we've combined | |||
// The best variables are the last range we saw which still had a high | |||
// enough confirmation rate to count as success. | |||
// The cur variables are the current range we're counting. | |||
unsigned int curNearBucket = startbucket; | |||
unsigned int bestNearBucket = startbucket; | |||
unsigned int curFarBucket = startbucket; | |||
unsigned int bestFarBucket = startbucket; | |||
bool foundAnswer = false; | |||
unsigned int bins = unconfTxs.size(); | |||
// Start counting from highest(default) or lowest fee/pri transactions | |||
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) { | |||
curFarBucket = bucket; | |||
nConf += confAvg[confTarget - 1][bucket]; | |||
totalNum += txCtAvg[bucket]; | |||
for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++) | |||
extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket]; | |||
extraNum += oldUnconfTxs[bucket]; | |||
// If we have enough transaction data points in this range of buckets, | |||
// we can test for success | |||
// (Only count the confirmed data points, so that each confirmation count | |||
// will be looking at the same amount of data and same bucket breaks) | |||
if (totalNum >= sufficientTxVal / (1 - decay)) { | |||
double curPct = nConf / (totalNum + extraNum); | |||
// Check to see if we are no longer getting confirmed at the success rate | |||
if (requireGreater && curPct < successBreakPoint) | |||
break; | |||
if (!requireGreater && curPct > successBreakPoint) | |||
break; | |||
// Otherwise update the cumulative stats, and the bucket variables | |||
// and reset the counters | |||
else { | |||
foundAnswer = true; | |||
nConf = 0; | |||
totalNum = 0; | |||
extraNum = 0; | |||
bestNearBucket = curNearBucket; | |||
bestFarBucket = curFarBucket; | |||
curNearBucket = bucket + step; | |||
} | |||
} | |||
} | |||
double median = -1; | |||
double txSum = 0; | |||
// Calculate the "average" fee of the best bucket range that met success conditions | |||
// Find the bucket with the median transaction and then report the average fee from that bucket | |||
// This is a compromise between finding the median which we can't since we don't save all tx's | |||
// and reporting the average which is less accurate | |||
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket; | |||
unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket; | |||
for (unsigned int j = minBucket; j <= maxBucket; j++) { | |||
txSum += txCtAvg[j]; | |||
} | |||
if (foundAnswer && txSum != 0) { | |||
txSum = txSum / 2; | |||
for (unsigned int j = minBucket; j <= maxBucket; j++) { | |||
if (txCtAvg[j] < txSum) | |||
txSum -= txCtAvg[j]; | |||
else { // we're in the right bucket | |||
median = avg[j] / txCtAvg[j]; | |||
break; | |||
} | |||
} | |||
} | |||
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need %s %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n", | |||
confTarget, requireGreater ? ">" : "<", successBreakPoint, dataTypeString, | |||
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket], | |||
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum); | |||
return median; | |||
} | |||
void TxConfirmStats::Write(CAutoFile& fileout) | |||
{ | |||
fileout << decay; | |||
fileout << buckets; | |||
fileout << avg; | |||
fileout << txCtAvg; | |||
fileout << confAvg; | |||
} | |||
void TxConfirmStats::Read(CAutoFile& filein) | |||
{ | |||
// Read data file into temporary variables and do some very basic sanity checking | |||
std::vector<double> fileBuckets; | |||
std::vector<double> fileAvg; | |||
std::vector<std::vector<double> > fileConfAvg; | |||
std::vector<double> fileTxCtAvg; | |||
double fileDecay; | |||
size_t maxConfirms; | |||
size_t numBuckets; | |||
filein >> fileDecay; | |||
if (fileDecay <= 0 || fileDecay >= 1) | |||
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)"); | |||
filein >> fileBuckets; | |||
numBuckets = fileBuckets.size(); | |||
if (numBuckets <= 1 || numBuckets > 1000) | |||
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 fee/pri buckets"); | |||
filein >> fileAvg; | |||
if (fileAvg.size() != numBuckets) | |||
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri average bucket count"); | |||
filein >> fileTxCtAvg; | |||
if (fileTxCtAvg.size() != numBuckets) | |||
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count"); | |||
filein >> fileConfAvg; | |||
maxConfirms = fileConfAvg.size(); | |||
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) // one week | |||
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms"); | |||
for (unsigned int i = 0; i < maxConfirms; i++) { | |||
if (fileConfAvg[i].size() != numBuckets) | |||
throw std::runtime_error("Corrupt estimates file. Mismatch in fee/pri conf average bucket count"); | |||
} | |||
// Now that we've processed the entire fee estimate data file and not | |||
// thrown any errors, we can copy it to our data structures | |||
decay = fileDecay; | |||
buckets = fileBuckets; | |||
avg = fileAvg; | |||
confAvg = fileConfAvg; | |||
txCtAvg = fileTxCtAvg; | |||
bucketMap.clear(); | |||
// Resize the current block variables which aren't stored in the data file | |||
// to match the number of confirms and buckets | |||
curBlockConf.resize(maxConfirms); | |||
for (unsigned int i = 0; i < maxConfirms; i++) { | |||
curBlockConf[i].resize(buckets.size()); | |||
} | |||
curBlockTxCt.resize(buckets.size()); | |||
curBlockVal.resize(buckets.size()); | |||
unconfTxs.resize(maxConfirms); | |||
for (unsigned int i = 0; i < maxConfirms; i++) { | |||
unconfTxs[i].resize(buckets.size()); | |||
} | |||
oldUnconfTxs.resize(buckets.size()); | |||
for (unsigned int i = 0; i < buckets.size(); i++) | |||
bucketMap[buckets[i]] = i; | |||
LogPrint("estimatefee", "Reading estimates: %u %s buckets counting confirms up to %u blocks\n", | |||
numBuckets, dataTypeString, maxConfirms); | |||
} | |||
unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val) | |||
{ | |||
unsigned int bucketindex = bucketMap.lower_bound(val)->second; | |||
unsigned int blockIndex = nBlockHeight % unconfTxs.size(); | |||
unconfTxs[blockIndex][bucketindex]++; | |||
LogPrint("estimatefee", "adding to %s\n", dataTypeString); | |||
return bucketindex; | |||
} | |||
void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex) | |||
{ | |||
//nBestSeenHeight is not updated yet for the new block | |||
int blocksAgo = nBestSeenHeight - entryHeight; | |||
if (nBestSeenHeight == 0) // the BlockPolicyEstimator hasn't seen any blocks yet | |||
blocksAgo = 0; | |||
if (blocksAgo < 0) { | |||
LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n"); | |||
return; //This can't happen becasue we call this with our best seen height, no entries can have higher | |||
} | |||
if (blocksAgo >= (int)unconfTxs.size()) { | |||
if (oldUnconfTxs[bucketindex] > 0) | |||
oldUnconfTxs[bucketindex]--; | |||
else | |||
LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n", | |||
bucketindex); | |||
} | |||
else { | |||
unsigned int blockIndex = entryHeight % unconfTxs.size(); | |||
if (unconfTxs[blockIndex][bucketindex] > 0) | |||
unconfTxs[blockIndex][bucketindex]--; | |||
else | |||
LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n", | |||
blockIndex, bucketindex); | |||
} | |||
} | |||
void CBlockPolicyEstimator::removeTx(uint256 hash) | |||
{ | |||
std::map<uint256, TxStatsInfo>::iterator pos = mapMemPoolTxs.find(hash); | |||
if (pos == mapMemPoolTxs.end()) { | |||
LogPrint("estimatefee", "Blockpolicy error mempool tx %s not found for removeTx\n", | |||
hash.ToString().c_str()); | |||
return; | |||
} | |||
TxConfirmStats *stats = pos->second.stats; | |||
unsigned int entryHeight = pos->second.blockHeight; | |||
unsigned int bucketIndex = pos->second.bucketIndex; | |||
if (stats != NULL) | |||
stats->removeTx(entryHeight, nBestSeenHeight, bucketIndex); | |||
mapMemPoolTxs.erase(hash); | |||
} | |||
CBlockPolicyEstimator::CBlockPolicyEstimator(const CFeeRate& _minRelayFee) | |||
: nBestSeenHeight(0) | |||
{ | |||
minTrackedFee = _minRelayFee < CFeeRate(MIN_FEERATE) ? CFeeRate(MIN_FEERATE) : _minRelayFee; | |||
std::vector<double> vfeelist; | |||
for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_FEERATE; bucketBoundary *= FEE_SPACING) { | |||
vfeelist.push_back(bucketBoundary); | |||
} | |||
vfeelist.push_back(INF_FEERATE); | |||
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "FeeRate"); | |||
minTrackedPriority = AllowFreeThreshold() < MIN_PRIORITY ? MIN_PRIORITY : AllowFreeThreshold(); | |||
std::vector<double> vprilist; | |||
for (double bucketBoundary = minTrackedPriority; bucketBoundary <= MAX_PRIORITY; bucketBoundary *= PRI_SPACING) { | |||
vprilist.push_back(bucketBoundary); | |||
} | |||
vprilist.push_back(INF_PRIORITY); | |||
priStats.Initialize(vprilist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY, "Priority"); | |||
feeUnlikely = CFeeRate(0); | |||
feeLikely = CFeeRate(INF_FEERATE); | |||
priUnlikely = 0; | |||
priLikely = INF_PRIORITY; | |||
} | |||
bool CBlockPolicyEstimator::isFeeDataPoint(const CFeeRate &fee, double pri) | |||
{ | |||
if ((pri < minTrackedPriority && fee >= minTrackedFee) || | |||
(pri < priUnlikely && fee > feeLikely)) { | |||
return true; | |||
} | |||
return false; | |||
} | |||
bool CBlockPolicyEstimator::isPriDataPoint(const CFeeRate &fee, double pri) | |||
{ | |||
if ((fee < minTrackedFee && pri >= minTrackedPriority) || | |||
(fee < feeUnlikely && pri > priLikely)) { | |||
return true; | |||
} | |||
return false; | |||
} | |||
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate) | |||
{ | |||
unsigned int txHeight = entry.GetHeight(); | |||
uint256 hash = entry.GetTx().GetHash(); | |||
if (mapMemPoolTxs[hash].stats != NULL) { | |||
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n", | |||
hash.ToString().c_str()); | |||
return; | |||
} | |||
if (txHeight < nBestSeenHeight) { | |||
// Ignore side chains and re-orgs; assuming they are random they don't | |||
// affect the estimate. We'll potentially double count transactions in 1-block reorgs. | |||
return; | |||
} | |||
// Only want to be updating estimates when our blockchain is synced, | |||
// otherwise we'll miscalculate how many blocks its taking to get included. | |||
if (!fCurrentEstimate) | |||
return; | |||
if (!entry.WasClearAtEntry()) { | |||
// This transaction depends on other transactions in the mempool to | |||
// be included in a block before it will be able to be included, so | |||
// we shouldn't include it in our calculations | |||
return; | |||
} | |||
// Fees are stored and reported as BTC-per-kb: | |||
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); | |||
// Want the priority of the tx at confirmation. However we don't know | |||
// what that will be and its too hard to continue updating it | |||
// so use starting priority as a proxy | |||
double curPri = entry.GetPriority(txHeight); | |||
mapMemPoolTxs[hash].blockHeight = txHeight; | |||
LogPrint("estimatefee", "Blockpolicy mempool tx %s ", hash.ToString().substr(0,10)); | |||
// Record this as a priority estimate | |||
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) { | |||
mapMemPoolTxs[hash].stats = &priStats; | |||
mapMemPoolTxs[hash].bucketIndex = priStats.NewTx(txHeight, curPri); | |||
} | |||
// Record this as a fee estimate | |||
else if (isFeeDataPoint(feeRate, curPri)) { | |||
mapMemPoolTxs[hash].stats = &feeStats; | |||
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK()); | |||
} | |||
else { | |||
LogPrint("estimatefee", "not adding\n"); | |||
} | |||
} | |||
void CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry) | |||
{ | |||
if (!entry.WasClearAtEntry()) { | |||
// This transaction depended on other transactions in the mempool to | |||
// be included in a block before it was able to be included, so | |||
// we shouldn't include it in our calculations | |||
return; | |||
} | |||
// How many blocks did it take for miners to include this transaction? | |||
// blocksToConfirm is 1-based, so a transaction included in the earliest | |||
// possible block has confirmation count of 1 | |||
int blocksToConfirm = nBlockHeight - entry.GetHeight(); | |||
if (blocksToConfirm <= 0) { | |||
// This can't happen because we don't process transactions from a block with a height | |||
// lower than our greatest seen height | |||
LogPrint("estimatefee", "Blockpolicy error Transaction had negative blocksToConfirm\n"); | |||
return; | |||
} | |||
// Fees are stored and reported as BTC-per-kb: | |||
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize()); | |||
// Want the priority of the tx at confirmation. The priority when it | |||
// entered the mempool could easily be very small and change quickly | |||
double curPri = entry.GetPriority(nBlockHeight); | |||
// Record this as a priority estimate | |||
if (entry.GetFee() == 0 || isPriDataPoint(feeRate, curPri)) { | |||
priStats.Record(blocksToConfirm, curPri); | |||
} | |||
// Record this as a fee estimate | |||
else if (isFeeDataPoint(feeRate, curPri)) { | |||
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK()); | |||
} | |||
} | |||
void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight, | |||
std::vector<CTxMemPoolEntry>& entries, bool fCurrentEstimate) | |||
{ | |||
if (nBlockHeight <= nBestSeenHeight) { | |||
// Ignore side chains and re-orgs; assuming they are random | |||
// they don't affect the estimate. | |||
// And if an attacker can re-org the chain at will, then | |||
// you've got much bigger problems than "attacker can influence | |||
// transaction fees." | |||
return; | |||
} | |||
nBestSeenHeight = nBlockHeight; | |||
// Only want to be updating estimates when our blockchain is synced, | |||
// otherwise we'll miscalculate how many blocks its taking to get included. | |||
if (!fCurrentEstimate) | |||
return; | |||
// Update the dynamic cutoffs | |||
// a fee/priority is "likely" the reason your tx was included in a block if >85% of such tx's | |||
// were confirmed in 2 blocks and is "unlikely" if <50% were confirmed in 10 blocks | |||
LogPrint("estimatefee", "Blockpolicy recalculating dynamic cutoffs:\n"); | |||
priLikely = priStats.EstimateMedianVal(2, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBlockHeight); | |||
if (priLikely == -1) | |||
priLikely = INF_PRIORITY; | |||
double feeLikelyEst = feeStats.EstimateMedianVal(2, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBlockHeight); | |||
if (feeLikelyEst == -1) | |||
feeLikely = CFeeRate(INF_FEERATE); | |||
else | |||
feeLikely = CFeeRate(feeLikelyEst); | |||
priUnlikely = priStats.EstimateMedianVal(10, SUFFICIENT_PRITXS, UNLIKELY_PCT, false, nBlockHeight); | |||
if (priUnlikely == -1) | |||
priUnlikely = 0; | |||
double feeUnlikelyEst = feeStats.EstimateMedianVal(10, SUFFICIENT_FEETXS, UNLIKELY_PCT, false, nBlockHeight); | |||
if (feeUnlikelyEst == -1) | |||
feeUnlikely = CFeeRate(0); | |||
else | |||
feeUnlikely = CFeeRate(feeUnlikelyEst); | |||
// Clear the current block states | |||
feeStats.ClearCurrent(nBlockHeight); | |||
priStats.ClearCurrent(nBlockHeight); | |||
// Repopulate the current block states | |||
for (unsigned int i = 0; i < entries.size(); i++) | |||
processBlockTx(nBlockHeight, entries[i]); | |||
// Update all exponential averages with the current block states | |||
feeStats.UpdateMovingAverages(); | |||
priStats.UpdateMovingAverages(); | |||
LogPrint("estimatefee", "Blockpolicy after updating estimates for %u confirmed entries, new mempool map size %u\n", | |||
entries.size(), mapMemPoolTxs.size()); | |||
} | |||
CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget) | |||
{ | |||
// Return failure if trying to analyze a target we're not tracking | |||
if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms()) | |||
return CFeeRate(0); | |||
double median = feeStats.EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); | |||
if (median < 0) | |||
return CFeeRate(0); | |||
return CFeeRate(median); | |||
} | |||
double CBlockPolicyEstimator::estimatePriority(int confTarget) | |||
{ | |||
// Return failure if trying to analyze a target we're not tracking | |||
if (confTarget <= 0 || (unsigned int)confTarget > priStats.GetMaxConfirms()) | |||
return -1; | |||
return priStats.EstimateMedianVal(confTarget, SUFFICIENT_PRITXS, MIN_SUCCESS_PCT, true, nBestSeenHeight); | |||
} | |||
void CBlockPolicyEstimator::Write(CAutoFile& fileout) | |||
{ | |||
fileout << nBestSeenHeight; | |||
feeStats.Write(fileout); | |||
priStats.Write(fileout); | |||
} | |||
void CBlockPolicyEstimator::Read(CAutoFile& filein) | |||
{ | |||
int nFileBestSeenHeight; | |||
filein >> nFileBestSeenHeight; | |||
feeStats.Read(filein); | |||
priStats.Read(filein); | |||
nBestSeenHeight = nFileBestSeenHeight; | |||
} |
@@ -0,0 +1,276 @@ | |||
// Copyright (c) 2009-2010 Satoshi Nakamoto | |||
// Copyright (c) 2009-2015 The Bitcoin developers | |||
// Distributed under the MIT software license, see the accompanying | |||
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | |||
#ifndef BITCOIN_POLICYESTIMATOR_H | |||
#define BITCOIN_POLICYESTIMATOR_H | |||
#include "amount.h" | |||
#include "uint256.h" | |||
#include <map> | |||
#include <string> | |||
#include <vector> | |||
class CAutoFile; | |||
class CFeeRate; | |||
class CTxMemPoolEntry; | |||
/** \class CBlockPolicyEstimator | |||
* The BlockPolicyEstimator is used for estimating the fee or priority needed | |||
* for a transaction to be included in a block within a certain number of | |||
* blocks. | |||
* | |||
* At a high level the algorithm works by grouping transactions into buckets | |||
* based on having similar priorities or fees and then tracking how long it | |||
* takes transactions in the various buckets to be mined. It operates under | |||
* the assumption that in general transactions of higher fee/priority will be | |||
* included in blocks before transactions of lower fee/priority. So for | |||
* example if you wanted to know what fee you should put on a transaction to | |||
* be included in a block within the next 5 blocks, you would start by looking | |||
* at the bucket with with the highest fee transactions and verifying that a | |||
* sufficiently high percentage of them were confirmed within 5 blocks and | |||
* then you would look at the next highest fee bucket, and so on, stopping at | |||
* the last bucket to pass the test. The average fee of transactions in this | |||
* bucket will give you an indication of the lowest fee you can put on a | |||
* transaction and still have a sufficiently high chance of being confirmed | |||
* within your desired 5 blocks. | |||
* | |||
* When a transaction enters the mempool or is included within a block we | |||
* decide whether it can be used as a data point for fee estimation, priority | |||
* estimation or neither. If the value of exactly one of those properties was | |||
* below the required minimum it can be used to estimate the other. In | |||
* addition, if a priori our estimation code would indicate that the | |||
* transaction would be much more quickly included in a block because of one | |||
* of the properties compared to the other, we can also decide to use it as | |||
* an estimate for that property. | |||
* | |||
* Here is a brief description of the implementation for fee estimation. | |||
* When a transaction that counts for fee estimation enters the mempool, we | |||
* track the height of the block chain at entry. Whenever a block comes in, | |||
* we count the number of transactions in each bucket and the total amount of fee | |||
* paid in each bucket. Then we calculate how many blocks Y it took each | |||
* transaction to be mined and we track an array of counters in each bucket | |||
* for how long it to took transactions to get confirmed from 1 to a max of 25 | |||
* and we increment all the counters from Y up to 25. This is because for any | |||
* number Z>=Y the transaction was successfully mined within Z blocks. We | |||
* want to save a history of this information, so at any time we have a | |||
* counter of the total number of transactions that happened in a given fee | |||
* bucket and the total number that were confirmed in each number 1-25 blocks | |||
* or less for any bucket. We save this history by keeping an exponentially | |||
* decaying moving average of each one of these stats. Furthermore we also | |||
* keep track of the number unmined (in mempool) transactions in each bucket | |||
* and for how many blocks they have been outstanding and use that to increase | |||
* the number of transactions we've seen in that fee bucket when calculating | |||
* an estimate for any number of confirmations below the number of blocks | |||
* they've been outstanding. | |||
*/ | |||
/** | |||
* We will instantiate two instances of this class, one to track transactions | |||
* that were included in a block due to fee, and one for tx's included due to | |||
* priority. We will lump transactions into a bucket according to their approximate | |||
* fee or priority and then track how long it took for those txs to be included in a block | |||
* | |||
* The tracking of unconfirmed (mempool) transactions is completely independent of the | |||
* historical tracking of transactions that have been confirmed in a block. | |||
*/ | |||
class TxConfirmStats | |||
{ | |||
private: | |||
//Define the buckets we will group transactions into (both fee buckets and priority buckets) | |||
std::vector<double> buckets; // The upper-bound of the range for the bucket (inclusive) | |||
std::map<double, unsigned int> bucketMap; // Map of bucket upper-bound to index into all vectors by bucket | |||
// For each bucket X: | |||
// Count the total # of txs in each bucket | |||
// Track the historical moving average of this total over blocks | |||
std::vector<double> txCtAvg; | |||
// and calcuate the total for the current block to update the moving average | |||
std::vector<int> curBlockTxCt; | |||
// Count the total # of txs confirmed within Y blocks in each bucket | |||
// Track the historical moving average of theses totals over blocks | |||
std::vector<std::vector<double> > confAvg; // confAvg[Y][X] | |||
// and calcuate the totals for the current block to update the moving averages | |||
std::vector<std::vector<int> > curBlockConf; // curBlockConf[Y][X] | |||
// Sum the total priority/fee of all tx's in each bucket | |||
// Track the historical moving average of this total over blocks | |||
std::vector<double> avg; | |||
// and calculate the total for the current block to update the moving average | |||
std::vector<double> curBlockVal; | |||
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X | |||
// Combine the total value with the tx counts to calculate the avg fee/priority per bucket | |||
std::string dataTypeString; | |||
double decay; | |||
// Mempool counts of outstanding transactions | |||
// For each bucket X, track the number of transactions in the mempool | |||
// that are unconfirmed for each possible confirmation value Y | |||
std::vector<std::vector<int> > unconfTxs; //unconfTxs[Y][X] | |||
// transactions still unconfirmed after MAX_CONFIRMS for each bucket | |||
std::vector<int> oldUnconfTxs; | |||
public: | |||
/** | |||
* Initialize the data structures. This is called by BlockPolicyEstimator's | |||
* constructor with default values. | |||
* @param defaultBuckets contains the upper limits for the bucket boundries | |||
* @param maxConfirms max number of confirms to track | |||
* @param decay how much to decay the historical moving average per block | |||
* @param dataTypeString for logging purposes | |||
*/ | |||
void Initialize(std::vector<double>& defaultBuckets, unsigned int maxConfirms, double decay, std::string dataTypeString); | |||
/** Clear the state of the curBlock variables to start counting for the new block */ | |||
void ClearCurrent(unsigned int nBlockHeight); | |||
/** | |||
* Record a new transaction data point in the current block stats | |||
* @param blocksToConfirm the number of blocks it took this transaction to confirm | |||
* @param val either the fee or the priority when entered of the transaction | |||
* @warning blocksToConfirm is 1-based and has to be >= 1 | |||
*/ | |||
void Record(int blocksToConfirm, double val); | |||
/** Record a new transaction entering the mempool*/ | |||
unsigned int NewTx(unsigned int nBlockHeight, double val); | |||
/** Remove a transaction from mempool tracking stats*/ | |||
void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, | |||
unsigned int bucketIndex); | |||
/** Update our estimates by decaying our historical moving average and updating | |||
with the data gathered from the current block */ | |||
void UpdateMovingAverages(); | |||
/** | |||
* Calculate a fee or priority estimate. Find the lowest value bucket (or range of buckets | |||
* to make sure we have enough data points) whose transactions still have sufficient likelihood | |||
* of being confirmed within the target number of confirmations | |||
* @param confTarget target number of confirmations | |||
* @param sufficientTxVal required average number of transactions per block in a bucket range | |||
* @param minSuccess the success probability we require | |||
* @param requireGreater return the lowest fee/pri such that all higher values pass minSuccess OR | |||
* return the highest fee/pri such that all lower values fail minSuccess | |||
* @param nBlockHeight the current block height | |||
*/ | |||
double EstimateMedianVal(int confTarget, double sufficientTxVal, | |||
double minSuccess, bool requireGreater, unsigned int nBlockHeight); | |||
/** Return the max number of confirms we're tracking */ | |||
unsigned int GetMaxConfirms() { return confAvg.size(); } | |||
/** Write state of estimation data to a file*/ | |||
void Write(CAutoFile& fileout); | |||
/** | |||
* Read saved state of estimation data from a file and replace all internal data structures and | |||
* variables with this state. | |||
*/ | |||
void Read(CAutoFile& filein); | |||
}; | |||
/** Track confirm delays up to 25 blocks, can't estimate beyond that */ | |||
static const unsigned int MAX_BLOCK_CONFIRMS = 25; | |||
/** Decay of .998 is a half-life of 346 blocks or about 2.4 days */ | |||
static const double DEFAULT_DECAY = .998; | |||
/** Require greater than 85% of X fee transactions to be confirmed within Y blocks for X to be big enough */ | |||
static const double MIN_SUCCESS_PCT = .85; | |||
static const double UNLIKELY_PCT = .5; | |||
/** Require an avg of 1 tx in the combined fee bucket per block to have stat significance */ | |||
static const double SUFFICIENT_FEETXS = 1; | |||
/** Require only an avg of 1 tx every 5 blocks in the combined pri bucket (way less pri txs) */ | |||
static const double SUFFICIENT_PRITXS = .2; | |||
// Minimum and Maximum values for tracking fees and priorities | |||
static const double MIN_FEERATE = 10; | |||
static const double MAX_FEERATE = 1e7; | |||
static const double INF_FEERATE = MAX_MONEY; | |||
static const double MIN_PRIORITY = 10; | |||
static const double MAX_PRIORITY = 1e16; | |||
static const double INF_PRIORITY = 1e9 * MAX_MONEY; | |||
// We have to lump transactions into buckets based on fee or priority, but we want to be able | |||
// to give accurate estimates over a large range of potential fees and priorities | |||
// Therefore it makes sense to exponentially space the buckets | |||
/** Spacing of FeeRate buckets */ | |||
static const double FEE_SPACING = 1.1; | |||
/** Spacing of Priority buckets */ | |||
static const double PRI_SPACING = 2; | |||
/** | |||
* We want to be able to estimate fees or priorities that are needed on tx's to be included in | |||
* a certain number of blocks. Every time a block is added to the best chain, this class records | |||
* stats on the transactions included in that block | |||
*/ | |||
class CBlockPolicyEstimator | |||
{ | |||
public: | |||
/** Create new BlockPolicyEstimator and initialize stats tracking classes with default values */ | |||
CBlockPolicyEstimator(const CFeeRate& minRelayFee); | |||
/** Process all the transactions that have been included in a block */ | |||
void processBlock(unsigned int nBlockHeight, | |||
std::vector<CTxMemPoolEntry>& entries, bool fCurrentEstimate); | |||
/** Process a transaction confirmed in a block*/ | |||
void processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry& entry); | |||
/** Process a transaction accepted to the mempool*/ | |||
void processTransaction(const CTxMemPoolEntry& entry, bool fCurrentEstimate); | |||
/** Remove a transaction from the mempool tracking stats*/ | |||
void removeTx(uint256 hash); | |||
/** Is this transaction likely included in a block because of its fee?*/ | |||
bool isFeeDataPoint(const CFeeRate &fee, double pri); | |||
/** Is this transaction likely included in a block because of its priority?*/ | |||
bool isPriDataPoint(const CFeeRate &fee, double pri); | |||
/** Return a fee estimate */ | |||
CFeeRate estimateFee(int confTarget); | |||
/** Return a priority estimate */ | |||
double estimatePriority(int confTarget); | |||
/** Write estimation data to a file */ | |||
void Write(CAutoFile& fileout); | |||
/** Read estimation data from a file */ | |||
void Read(CAutoFile& filein); | |||
private: | |||
CFeeRate minTrackedFee; //! Passed to constructor to avoid dependency on main | |||
double minTrackedPriority; //! Set to AllowFreeThreshold | |||
unsigned int nBestSeenHeight; | |||
struct TxStatsInfo | |||
{ | |||
TxConfirmStats *stats; | |||
unsigned int blockHeight; | |||
unsigned int bucketIndex; | |||
TxStatsInfo() : stats(NULL), blockHeight(0), bucketIndex(0) {} | |||
}; | |||
// map of txids to information about that transaction | |||
std::map<uint256, TxStatsInfo> mapMemPoolTxs; | |||
/** Classes to track historical data on transaction confirmations */ | |||
TxConfirmStats feeStats, priStats; | |||
/** Breakpoints to help determine whether a transaction was confirmed by priority or Fee */ | |||
CFeeRate feeLikely, feeUnlikely; | |||
double priLikely, priUnlikely; | |||
}; | |||
#endif /*BITCOIN_POLICYESTIMATOR_H */ |
@@ -0,0 +1,186 @@ | |||
// Copyright (c) 2011-2015 The Bitcoin Core developers | |||
// Distributed under the MIT software license, see the accompanying | |||
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | |||
#include "policy/fees.h" | |||
#include "txmempool.h" | |||
#include "uint256.h" | |||
#include "util.h" | |||
#include "test/test_bitcoin.h" | |||
#include <boost/test/unit_test.hpp> | |||
BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, BasicTestingSetup) | |||
BOOST_AUTO_TEST_CASE(BlockPolicyEstimates) | |||
{ | |||
CTxMemPool mpool(CFeeRate(1000)); | |||
CAmount basefee(2000); | |||
double basepri = 1e6; | |||
CAmount deltaFee(100); | |||
double deltaPri=5e5; | |||
std::vector<CAmount> feeV[2]; | |||
std::vector<double> priV[2]; | |||
// Populate vectors of increasing fees or priorities | |||
for (int j = 0; j < 10; j++) { | |||
//V[0] is for fee transactions | |||
feeV[0].push_back(basefee * (j+1)); | |||
priV[0].push_back(0); | |||
//V[1] is for priority transactions | |||
feeV[1].push_back(CAmount(0)); | |||
priV[1].push_back(basepri * pow(10, j+1)); | |||
} | |||
// Store the hashes of transactions that have been | |||
// added to the mempool by their associate fee/pri | |||
// txHashes[j] is populated with transactions either of | |||
// fee = basefee * (j+1) OR pri = 10^6 * 10^(j+1) | |||
std::vector<uint256> txHashes[10]; | |||
// Create a transaction template | |||
CScript garbage; | |||
for (unsigned int i = 0; i < 128; i++) | |||
garbage.push_back('X'); | |||
CMutableTransaction tx; | |||
std::list<CTransaction> dummyConflicted; | |||
tx.vin.resize(1); | |||
tx.vin[0].scriptSig = garbage; | |||
tx.vout.resize(1); | |||
tx.vout[0].nValue=0LL; | |||
CFeeRate baseRate(basefee, ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION)); | |||
// Create a fake block | |||
std::vector<CTransaction> block; | |||
int blocknum = 0; | |||
// Loop through 200 blocks | |||
// At a decay .998 and 4 fee transactions per block | |||
// This makes the tx count about 1.33 per bucket, above the 1 threshold | |||
while (blocknum < 200) { | |||
for (int j = 0; j < 10; j++) { // For each fee/pri multiple | |||
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx | |||
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; // make transaction unique | |||
uint256 hash = tx.GetHash(); | |||
mpool.addUnchecked(hash, CTxMemPoolEntry(tx, feeV[k/4][j], GetTime(), priV[k/4][j], blocknum, mpool.HasNoInputsOf(tx))); | |||
txHashes[j].push_back(hash); | |||
} | |||
} | |||
//Create blocks where higher fee/pri txs are included more often | |||
for (int h = 0; h <= blocknum%10; h++) { | |||
// 10/10 blocks add highest fee/pri transactions | |||
// 9/10 blocks add 2nd highest and so on until ... | |||
// 1/10 blocks add lowest fee/pri transactions | |||
while (txHashes[9-h].size()) { | |||
CTransaction btx; | |||
if (mpool.lookup(txHashes[9-h].back(), btx)) | |||
block.push_back(btx); | |||
txHashes[9-h].pop_back(); | |||
} | |||
} | |||
mpool.removeForBlock(block, ++blocknum, dummyConflicted); | |||
block.clear(); | |||
if (blocknum == 30) { | |||
// At this point we should need to combine 5 buckets to get enough data points | |||
// So estimateFee(1) should fail and estimateFee(2) should return somewhere around | |||
// 8*baserate | |||
BOOST_CHECK(mpool.estimateFee(1) == CFeeRate(0)); | |||
BOOST_CHECK(mpool.estimateFee(2).GetFeePerK() < 8*baseRate.GetFeePerK() + deltaFee); | |||
BOOST_CHECK(mpool.estimateFee(2).GetFeePerK() > 8*baseRate.GetFeePerK() - deltaFee); | |||
} | |||
} | |||
std::vector<CAmount> origFeeEst; | |||
std::vector<double> origPriEst; | |||
// Highest feerate is 10*baseRate and gets in all blocks, | |||
// second highest feerate is 9*baseRate and gets in 9/10 blocks = 90%, | |||
// third highest feerate is 8*base rate, and gets in 8/10 blocks = 80%, | |||
// so estimateFee(1) should return 9*baseRate. | |||
// Third highest feerate has 90% chance of being included by 2 blocks, | |||
// so estimateFee(2) should return 8*baseRate etc... | |||
for (int i = 1; i < 10;i++) { | |||
origFeeEst.push_back(mpool.estimateFee(i).GetFeePerK()); | |||
origPriEst.push_back(mpool.estimatePriority(i)); | |||
if (i > 1) { // Fee estimates should be monotonically decreasing | |||
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]); | |||
BOOST_CHECK(origPriEst[i-1] <= origPriEst[i-2]); | |||
} | |||
BOOST_CHECK(origFeeEst[i-1] < (10-i)*baseRate.GetFeePerK() + deltaFee); | |||
BOOST_CHECK(origFeeEst[i-1] > (10-i)*baseRate.GetFeePerK() - deltaFee); | |||
BOOST_CHECK(origPriEst[i-1] < pow(10,10-i) * basepri + deltaPri); | |||
BOOST_CHECK(origPriEst[i-1] > pow(10,10-i) * basepri - deltaPri); | |||
} | |||
// Mine 50 more blocks with no transactions happening, estimates shouldn't change | |||
// We haven't decayed the moving average enough so we still have enough data points in every bucket | |||
while (blocknum < 250) | |||
mpool.removeForBlock(block, ++blocknum, dummyConflicted); | |||
for (int i = 1; i < 10;i++) { | |||
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] + deltaFee); | |||
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); | |||
BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] + deltaPri); | |||
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri); | |||
} | |||
// Mine 15 more blocks with lots of transactions happening and not getting mined | |||
// Estimates should go up | |||
while (blocknum < 265) { | |||
for (int j = 0; j < 10; j++) { // For each fee/pri multiple | |||
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx | |||
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; | |||
uint256 hash = tx.GetHash(); | |||
mpool.addUnchecked(hash, CTxMemPoolEntry(tx, feeV[k/4][j], GetTime(), priV[k/4][j], blocknum, mpool.HasNoInputsOf(tx))); | |||
txHashes[j].push_back(hash); | |||
} | |||
} | |||
mpool.removeForBlock(block, ++blocknum, dummyConflicted); | |||
} | |||
for (int i = 1; i < 10;i++) { | |||
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); | |||
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri); | |||
} | |||
// Mine all those transactions | |||
// Estimates should still not be below original | |||
for (int j = 0; j < 10; j++) { | |||
while(txHashes[j].size()) { | |||
CTransaction btx; | |||
if (mpool.lookup(txHashes[j].back(), btx)) | |||
block.push_back(btx); | |||
txHashes[j].pop_back(); | |||
} | |||
} | |||
mpool.removeForBlock(block, 265, dummyConflicted); | |||
block.clear(); | |||
for (int i = 1; i < 10;i++) { | |||
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() > origFeeEst[i-1] - deltaFee); | |||
BOOST_CHECK(mpool.estimatePriority(i) > origPriEst[i-1] - deltaPri); | |||
} | |||
// Mine 100 more blocks where everything is mined every block | |||
// Estimates should be below original estimates (not possible for last estimate) | |||
while (blocknum < 365) { | |||
for (int j = 0; j < 10; j++) { // For each fee/pri multiple | |||
for (int k = 0; k < 5; k++) { // add 4 fee txs for every priority tx | |||
tx.vin[0].prevout.n = 10000*blocknum+100*j+k; | |||
uint256 hash = tx.GetHash(); | |||
mpool.addUnchecked(hash, CTxMemPoolEntry(tx, feeV[k/4][j], GetTime(), priV[k/4][j], blocknum, mpool.HasNoInputsOf(tx))); | |||
CTransaction btx; | |||
if (mpool.lookup(hash, btx)) | |||
block.push_back(btx); | |||
} | |||
} | |||
mpool.removeForBlock(block, ++blocknum, dummyConflicted); | |||
block.clear(); | |||
} | |||
for (int i = 1; i < 9; i++) { | |||
BOOST_CHECK(mpool.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee); | |||
BOOST_CHECK(mpool.estimatePriority(i) < origPriEst[i-1] - deltaPri); | |||
} | |||
} | |||
BOOST_AUTO_TEST_SUITE_END() |
@@ -8,28 +8,27 @@ | |||
#include "clientversion.h" | |||
#include "consensus/consensus.h" | |||
#include "main.h" | |||
#include "policy/fees.h" | |||
#include "streams.h" | |||
#include "util.h" | |||
#include "utilmoneystr.h" | |||
#include "version.h" | |||
#include <boost/circular_buffer.hpp> | |||
using namespace std; | |||
CTxMemPoolEntry::CTxMemPoolEntry(): | |||
nFee(0), nTxSize(0), nModSize(0), nTime(0), dPriority(0.0) | |||
nFee(0), nTxSize(0), nModSize(0), nTime(0), dPriority(0.0), hadNoDependencies(false) | |||
{ | |||
nHeight = MEMPOOL_HEIGHT; | |||
} | |||
CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee, | |||
int64_t _nTime, double _dPriority, | |||
unsigned int _nHeight): | |||
tx(_tx), nFee(_nFee), nTime(_nTime), dPriority(_dPriority), nHeight(_nHeight) | |||
unsigned int _nHeight, bool poolHasNoInputsOf): | |||
tx(_tx), nFee(_nFee), nTime(_nTime), dPriority(_dPriority), nHeight(_nHeight), | |||
hadNoDependencies(poolHasNoInputsOf) | |||
{ | |||
nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); | |||
nModSize = tx.CalculateModifiedSize(nTxSize); | |||
} | |||
@@ -47,346 +46,15 @@ CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const | |||
return dResult; | |||
} | |||
/** | |||
* Keep track of fee/priority for transactions confirmed within N blocks | |||
*/ | |||
class CBlockAverage | |||
{ | |||
private: | |||
boost::circular_buffer<CFeeRate> feeSamples; | |||
boost::circular_buffer<double> prioritySamples; | |||
template<typename T> std::vector<T> buf2vec(boost::circular_buffer<T> buf) const | |||
{ | |||
std::vector<T> vec(buf.begin(), buf.end()); | |||
return vec; | |||
} | |||
public: | |||
CBlockAverage() : feeSamples(100), prioritySamples(100) { } | |||
void RecordFee(const CFeeRate& feeRate) { | |||
feeSamples.push_back(feeRate); | |||
} | |||
void RecordPriority(double priority) { | |||
prioritySamples.push_back(priority); | |||
} | |||
size_t FeeSamples() const { return feeSamples.size(); } | |||
size_t GetFeeSamples(std::vector<CFeeRate>& insertInto) const | |||
{ | |||
BOOST_FOREACH(const CFeeRate& f, feeSamples) | |||
insertInto.push_back(f); | |||
return feeSamples.size(); | |||
} | |||
size_t PrioritySamples() const { return prioritySamples.size(); } | |||
size_t GetPrioritySamples(std::vector<double>& insertInto) const | |||
{ | |||
BOOST_FOREACH(double d, prioritySamples) | |||
insertInto.push_back(d); | |||
return prioritySamples.size(); | |||
} | |||
/** | |||
* Used as belt-and-suspenders check when reading to detect | |||
* file corruption | |||
*/ | |||
static bool AreSane(const CFeeRate fee, const CFeeRate& minRelayFee) | |||
{ | |||
if (fee < CFeeRate(0)) | |||
return false; | |||
if (fee.GetFeePerK() > minRelayFee.GetFeePerK() * 10000) | |||
return false; | |||
return true; | |||
} | |||
static bool AreSane(const std::vector<CFeeRate>& vecFee, const CFeeRate& minRelayFee) | |||
{ | |||
BOOST_FOREACH(CFeeRate fee, vecFee) | |||
{ | |||
if (!AreSane(fee, minRelayFee)) | |||
return false; | |||
} | |||
return true; | |||
} | |||
static bool AreSane(const double priority) | |||
{ | |||
return priority >= 0; | |||
} | |||
static bool AreSane(const std::vector<double> vecPriority) | |||
{ | |||
BOOST_FOREACH(double priority, vecPriority) | |||
{ | |||
if (!AreSane(priority)) | |||
return false; | |||
} | |||
return true; | |||
} | |||
void Write(CAutoFile& fileout) const | |||
{ | |||
std::vector<CFeeRate> vecFee = buf2vec(feeSamples); | |||
fileout << vecFee; | |||
std::vector<double> vecPriority = buf2vec(prioritySamples); | |||
fileout << vecPriority; | |||
} | |||
void Read(CAutoFile& filein, const CFeeRate& minRelayFee) { | |||
std::vector<CFeeRate> vecFee; | |||
filein >> vecFee; | |||
if (AreSane(vecFee, minRelayFee)) | |||
feeSamples.insert(feeSamples.end(), vecFee.begin(), vecFee.end()); | |||
else | |||
throw runtime_error("Corrupt fee value in estimates file."); | |||
std::vector<double> vecPriority; | |||
filein >> vecPriority; | |||
if (AreSane(vecPriority)) | |||
prioritySamples.insert(prioritySamples.end(), vecPriority.begin(), vecPriority.end()); | |||
else | |||
throw runtime_error("Corrupt priority value in estimates file."); | |||
if (feeSamples.size() + prioritySamples.size() > 0) | |||
LogPrint("estimatefee", "Read %d fee samples and %d priority samples\n", | |||
feeSamples.size(), prioritySamples.size()); | |||
} | |||
}; | |||
class CMinerPolicyEstimator | |||
{ | |||
private: | |||
/** | |||
* Records observed averages transactions that confirmed within one block, two blocks, | |||
* three blocks etc. | |||
*/ | |||
std::vector<CBlockAverage> history; | |||
std::vector<CFeeRate> sortedFeeSamples; | |||
std::vector<double> sortedPrioritySamples; | |||
int nBestSeenHeight; | |||
/** | |||
* nBlocksAgo is 0 based, i.e. transactions that confirmed in the highest seen block are | |||
* nBlocksAgo == 0, transactions in the block before that are nBlocksAgo == 1 etc. | |||
*/ | |||
void seenTxConfirm(const CFeeRate& feeRate, const CFeeRate& minRelayFee, double dPriority, int nBlocksAgo) | |||
{ | |||
// Last entry records "everything else". | |||
int nBlocksTruncated = min(nBlocksAgo, (int) history.size() - 1); | |||
assert(nBlocksTruncated >= 0); | |||
// We need to guess why the transaction was included in a block-- either | |||
// because it is high-priority or because it has sufficient fees. | |||
bool sufficientFee = (feeRate > minRelayFee); | |||
bool sufficientPriority = AllowFree(dPriority); | |||
const char* assignedTo = "unassigned"; | |||
if (sufficientFee && !sufficientPriority && CBlockAverage::AreSane(feeRate, minRelayFee)) | |||
{ | |||
history[nBlocksTruncated].RecordFee(feeRate); | |||
assignedTo = "fee"; | |||
} | |||
else if (sufficientPriority && !sufficientFee && CBlockAverage::AreSane(dPriority)) | |||
{ | |||
history[nBlocksTruncated].RecordPriority(dPriority); | |||
assignedTo = "priority"; | |||
} | |||
else | |||
{ | |||
// Neither or both fee and priority sufficient to get confirmed: | |||
// don't know why they got confirmed. | |||
} | |||
LogPrint("estimatefee", "Seen TX confirm: %s: %s fee/%g priority, took %d blocks\n", | |||
assignedTo, feeRate.ToString(), dPriority, nBlocksAgo); | |||
} | |||
public: | |||
CMinerPolicyEstimator(int nEntries) : nBestSeenHeight(0) | |||
{ | |||
history.resize(nEntries); | |||
} | |||
void seenBlock(const std::vector<CTxMemPoolEntry>& entries, int nBlockHeight, const CFeeRate minRelayFee) | |||
{ | |||
if (nBlockHeight <= nBestSeenHeight) | |||
{ | |||
// Ignore side chains and re-orgs; assuming they are random | |||
// they don't affect the estimate. | |||
// And if an attacker can re-org the chain at will, then | |||
// you've got much bigger problems than "attacker can influence | |||
// transaction fees." | |||
return; | |||
} | |||
nBestSeenHeight = nBlockHeight; | |||
// Fill up the history buckets based on how long transactions took | |||
// to confirm. | |||
std::vector<std::vector<const CTxMemPoolEntry*> > entriesByConfirmations; | |||
entriesByConfirmations.resize(history.size()); | |||
BOOST_FOREACH(const CTxMemPoolEntry& entry, entries) | |||
{ | |||
// How many blocks did it take for miners to include this transaction? | |||
int delta = nBlockHeight - entry.GetHeight(); | |||
if (delta <= 0) | |||
{ | |||
// Re-org made us lose height, this should only happen if we happen | |||
// to re-org on a difficulty transition point: very rare! | |||
continue; | |||
} | |||
if ((delta-1) >= (int)history.size()) | |||
delta = history.size(); // Last bucket is catch-all | |||
entriesByConfirmations.at(delta-1).push_back(&entry); | |||
} | |||
for (size_t i = 0; i < entriesByConfirmations.size(); i++) | |||
{ | |||
std::vector<const CTxMemPoolEntry*> &e = entriesByConfirmations.at(i); | |||
// Insert at most 10 random entries per bucket, otherwise a single block | |||
// can dominate an estimate: | |||
if (e.size() > 10) { | |||
std::random_shuffle(e.begin(), e.end()); | |||
e.resize(10); | |||
} | |||
BOOST_FOREACH(const CTxMemPoolEntry* entry, e) | |||
{ | |||
// Fees are stored and reported as BTC-per-kb: | |||
CFeeRate feeRate(entry->GetFee(), entry->GetTxSize()); | |||
double dPriority = entry->GetPriority(entry->GetHeight()); // Want priority when it went IN | |||
seenTxConfirm(feeRate, minRelayFee, dPriority, i); | |||
} | |||
} | |||
// After new samples are added, we have to clear the sorted lists, | |||
// so they'll be resorted the next time someone asks for an estimate | |||
sortedFeeSamples.clear(); | |||
sortedPrioritySamples.clear(); | |||
for (size_t i = 0; i < history.size(); i++) { | |||
if (history[i].FeeSamples() + history[i].PrioritySamples() > 0) | |||
LogPrint("estimatefee", "estimates: for confirming within %d blocks based on %d/%d samples, fee=%s, prio=%g\n", | |||
i, | |||
history[i].FeeSamples(), history[i].PrioritySamples(), | |||
estimateFee(i+1).ToString(), estimatePriority(i+1)); | |||
} | |||
} | |||
/** | |||
* Can return CFeeRate(0) if we don't have any data for that many blocks back. nBlocksToConfirm is 1 based. | |||
*/ | |||
CFeeRate estimateFee(int nBlocksToConfirm) | |||
{ | |||
nBlocksToConfirm--; | |||
if (nBlocksToConfirm < 0 || nBlocksToConfirm >= (int)history.size()) | |||
return CFeeRate(0); | |||
if (sortedFeeSamples.size() == 0) | |||
{ | |||
for (size_t i = 0; i < history.size(); i++) | |||
history.at(i).GetFeeSamples(sortedFeeSamples); | |||
std::sort(sortedFeeSamples.begin(), sortedFeeSamples.end(), | |||
std::greater<CFeeRate>()); | |||
} | |||
if (sortedFeeSamples.size() < 11) | |||
{ | |||
// Eleven is Gavin's Favorite Number | |||
// ... but we also take a maximum of 10 samples per block so eleven means | |||
// we're getting samples from at least two different blocks | |||
return CFeeRate(0); | |||
} | |||
int nBucketSize = history.at(nBlocksToConfirm).FeeSamples(); | |||
// Estimates should not increase as number of confirmations goes up, | |||
// but the estimates are noisy because confirmations happen discretely | |||
// in blocks. To smooth out the estimates, use all samples in the history | |||
// and use the nth highest where n is (number of samples in previous bucket + | |||
// half the samples in nBlocksToConfirm bucket): | |||
size_t nPrevSize = 0; | |||
for (int i = 0; i < nBlocksToConfirm; i++) | |||
nPrevSize += history.at(i).FeeSamples(); | |||
size_t index = min(nPrevSize + nBucketSize/2, sortedFeeSamples.size()-1); | |||
return sortedFeeSamples[index]; | |||
} | |||
double estimatePriority(int nBlocksToConfirm) | |||
{ | |||
nBlocksToConfirm--; | |||
if (nBlocksToConfirm < 0 || nBlocksToConfirm >= (int)history.size()) | |||
return -1; | |||
if (sortedPrioritySamples.size() == 0) | |||
{ | |||
for (size_t i = 0; i < history.size(); i++) | |||
history.at(i).GetPrioritySamples(sortedPrioritySamples); | |||
std::sort(sortedPrioritySamples.begin(), sortedPrioritySamples.end(), | |||
std::greater<double>()); | |||
} | |||
if (sortedPrioritySamples.size() < 11) | |||
return -1.0; | |||
int nBucketSize = history.at(nBlocksToConfirm).PrioritySamples(); | |||
// Estimates should not increase as number of confirmations needed goes up, | |||
// but the estimates are noisy because confirmations happen discretely | |||
// in blocks. To smooth out the estimates, use all samples in the history | |||
// and use the nth highest where n is (number of samples in previous buckets + | |||
// half the samples in nBlocksToConfirm bucket). | |||
size_t nPrevSize = 0; | |||
for (int i = 0; i < nBlocksToConfirm; i++) | |||
nPrevSize += history.at(i).PrioritySamples(); | |||
size_t index = min(nPrevSize + nBucketSize/2, sortedPrioritySamples.size()-1); | |||
return sortedPrioritySamples[index]; | |||
} | |||
void Write(CAutoFile& fileout) const | |||
{ | |||
fileout << nBestSeenHeight; | |||
fileout << (uint32_t)history.size(); | |||
BOOST_FOREACH(const CBlockAverage& entry, history) | |||
{ | |||
entry.Write(fileout); | |||
} | |||
} | |||
void Read(CAutoFile& filein, const CFeeRate& minRelayFee) | |||
{ | |||
int nFileBestSeenHeight; | |||
filein >> nFileBestSeenHeight; | |||
uint32_t numEntries; | |||
filein >> numEntries; | |||
if (numEntries <= 0 || numEntries > 10000) | |||
throw runtime_error("Corrupt estimates file. Must have between 1 and 10k entries."); | |||
std::vector<CBlockAverage> fileHistory; | |||
for (size_t i = 0; i < numEntries; i++) | |||
{ | |||
CBlockAverage entry; | |||
entry.Read(filein, minRelayFee); | |||
fileHistory.push_back(entry); | |||
} | |||
// Now that we've processed the entire fee estimate data file and not | |||
// thrown any errors, we can copy it to our history | |||
nBestSeenHeight = nFileBestSeenHeight; | |||
history = fileHistory; | |||
assert(history.size() > 0); | |||
} | |||
}; | |||
CTxMemPool::CTxMemPool(const CFeeRate& _minRelayFee) : | |||
nTransactionsUpdated(0), | |||
minRelayFee(_minRelayFee) | |||
nTransactionsUpdated(0) | |||
{ | |||
// Sanity checks off by default for performance, because otherwise | |||
// accepting transactions becomes O(N^2) where N is the number | |||
// of transactions in the pool | |||
fSanityCheck = false; | |||
// 25 blocks is a compromise between using a lot of disk/memory and | |||
// trying to give accurate estimates to people who might be willing | |||
// to wait a day or two to save a fraction of a penny in fees. | |||
// Confirmation times for very-low-fee transactions that take more | |||
// than an hour or three to confirm are highly variable. | |||
minerPolicyEstimator = new CMinerPolicyEstimator(25); | |||
minerPolicyEstimator = new CBlockPolicyEstimator(_minRelayFee); | |||
} | |||
CTxMemPool::~CTxMemPool() | |||
@@ -420,20 +88,20 @@ void CTxMemPool::AddTransactionsUpdated(unsigned int n) | |||
} | |||
bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry) | |||
bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate) | |||
{ | |||
// Add to memory pool without checking anything. | |||
// Used by main.cpp AcceptToMemoryPool(), which DOES do | |||
// all the appropriate checks. | |||
LOCK(cs); | |||
{ | |||
mapTx[hash] = entry; | |||
const CTransaction& tx = mapTx[hash].GetTx(); | |||
for (unsigned int i = 0; i < tx.vin.size(); i++) | |||
mapNextTx[tx.vin[i].prevout] = CInPoint(&tx, i); | |||
nTransactionsUpdated++; | |||
totalTxSize += entry.GetTxSize(); | |||
} | |||
mapTx[hash] = entry; | |||
const CTransaction& tx = mapTx[hash].GetTx(); | |||
for (unsigned int i = 0; i < tx.vin.size(); i++) | |||
mapNextTx[tx.vin[i].prevout] = CInPoint(&tx, i); | |||
nTransactionsUpdated++; | |||
totalTxSize += entry.GetTxSize(); | |||
minerPolicyEstimator->processTransaction(entry, fCurrentEstimate); | |||
return true; | |||
} | |||
@@ -479,6 +147,7 @@ void CTxMemPool::remove(const CTransaction &origTx, std::list<CTransaction>& rem | |||
totalTxSize -= mapTx[hash].GetTxSize(); | |||
mapTx.erase(hash); | |||
nTransactionsUpdated++; | |||
minerPolicyEstimator->removeTx(hash); | |||
} | |||
} | |||
} | |||
@@ -529,7 +198,7 @@ void CTxMemPool::removeConflicts(const CTransaction &tx, std::list<CTransaction> | |||
* Called when a block is connected. Removes from mempool and updates the miner fee estimator. | |||
*/ | |||
void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight, | |||
std::list<CTransaction>& conflicts) | |||
std::list<CTransaction>& conflicts, bool fCurrentEstimate) | |||
{ | |||
LOCK(cs); | |||
std::vector<CTxMemPoolEntry> entries; | |||
@@ -539,7 +208,6 @@ void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned i | |||
if (mapTx.count(hash)) | |||
entries.push_back(mapTx[hash]); | |||
} | |||
minerPolicyEstimator->seenBlock(entries, nBlockHeight, minRelayFee); | |||
BOOST_FOREACH(const CTransaction& tx, vtx) | |||
{ | |||
std::list<CTransaction> dummy; | |||
@@ -547,9 +215,10 @@ void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned i | |||
removeConflicts(tx, conflicts); | |||
ClearPrioritisation(tx.GetHash()); | |||
} | |||
// After the txs in the new block have been removed from the mempool, update policy estimates | |||
minerPolicyEstimator->processBlock(nBlockHeight, entries, fCurrentEstimate); | |||
} | |||
void CTxMemPool::clear() | |||
{ | |||
LOCK(cs); | |||
@@ -666,7 +335,7 @@ CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const | |||
{ | |||
try { | |||
LOCK(cs); | |||
fileout << 99900; // version required to read: 0.9.99 or later | |||
fileout << 109900; // version required to read: 0.10.99 or later | |||
fileout << CLIENT_VERSION; // version that wrote the file | |||
minerPolicyEstimator->Write(fileout); | |||
} | |||
@@ -687,7 +356,7 @@ CTxMemPool::ReadFeeEstimates(CAutoFile& filein) | |||
return error("CTxMemPool::ReadFeeEstimates(): up-version (%d) fee estimate file", nVersionRequired); | |||
LOCK(cs); | |||
minerPolicyEstimator->Read(filein, minRelayFee); | |||
minerPolicyEstimator->Read(filein); | |||
} | |||
catch (const std::exception&) { | |||
LogPrintf("CTxMemPool::ReadFeeEstimates(): unable to read policy estimator data (non-fatal)"); | |||
@@ -724,6 +393,13 @@ void CTxMemPool::ClearPrioritisation(const uint256 hash) | |||
mapDeltas.erase(hash); | |||
} | |||
bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const | |||
{ | |||
for (unsigned int i = 0; i < tx.vin.size(); i++) | |||
if (exists(tx.vin[i].prevout.hash)) | |||
return false; | |||
return true; | |||
} | |||
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn, CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { } | |||
@@ -43,10 +43,11 @@ private: | |||
int64_t nTime; //! Local time when entering the mempool | |||
double dPriority; //! Priority when entering the mempool | |||
unsigned int nHeight; //! Chain height when entering the mempool | |||
bool hadNoDependencies; //! Not dependent on any other txs when it entered the mempool | |||
public: | |||
CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee, | |||
int64_t _nTime, double _dPriority, unsigned int _nHeight); | |||
int64_t _nTime, double _dPriority, unsigned int _nHeight, bool poolHasNoInputsOf = false); | |||
CTxMemPoolEntry(); | |||
CTxMemPoolEntry(const CTxMemPoolEntry& other); | |||
@@ -56,9 +57,10 @@ public: | |||
size_t GetTxSize() const { return nTxSize; } | |||
int64_t GetTime() const { return nTime; } | |||
unsigned int GetHeight() const { return nHeight; } | |||
bool WasClearAtEntry() const { return hadNoDependencies; } | |||
}; | |||
class CMinerPolicyEstimator; | |||
class CBlockPolicyEstimator; | |||
/** An inpoint - a combination of a transaction and an index n into its vin */ | |||
class CInPoint | |||
@@ -88,9 +90,8 @@ class CTxMemPool | |||
private: | |||
bool fSanityCheck; //! Normally false, true if -checkmempool or -regtest | |||
unsigned int nTransactionsUpdated; | |||
CMinerPolicyEstimator* minerPolicyEstimator; | |||
CBlockPolicyEstimator* minerPolicyEstimator; | |||
CFeeRate minRelayFee; //! Passed to constructor to avoid dependency on main | |||
uint64_t totalTxSize; //! sum of all mempool tx' byte sizes | |||
public: | |||
@@ -111,17 +112,22 @@ public: | |||
void check(const CCoinsViewCache *pcoins) const; | |||
void setSanityCheck(bool _fSanityCheck) { fSanityCheck = _fSanityCheck; } | |||
bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry); | |||
bool addUnchecked(const uint256& hash, const CTxMemPoolEntry &entry, bool fCurrentEstimate = true); | |||
void remove(const CTransaction &tx, std::list<CTransaction>& removed, bool fRecursive = false); | |||
void removeCoinbaseSpends(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight); | |||
void removeConflicts(const CTransaction &tx, std::list<CTransaction>& removed); | |||
void removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight, | |||
std::list<CTransaction>& conflicts); | |||
std::list<CTransaction>& conflicts, bool fCurrentEstimate = true); | |||
void clear(); | |||
void queryHashes(std::vector<uint256>& vtxid); | |||
void pruneSpent(const uint256& hash, CCoins &coins); | |||
unsigned int GetTransactionsUpdated() const; | |||
void AddTransactionsUpdated(unsigned int n); | |||
/** | |||
* Check that none of this transactions inputs are in the mempool, and thus | |||
* the tx is not dependent on other mempool transactions to be included in a block. | |||
*/ | |||
bool HasNoInputsOf(const CTransaction& tx) const; | |||
/** Affect CreateNewBlock prioritisation of transactions */ | |||
void PrioritiseTransaction(const uint256 hash, const std::string strHash, double dPriorityDelta, const CAmount& nFeeDelta); | |||
@@ -139,7 +145,7 @@ public: | |||
return totalTxSize; | |||
} | |||
bool exists(uint256 hash) | |||
bool exists(uint256 hash) const | |||
{ | |||
LOCK(cs); | |||
return (mapTx.count(hash) != 0); |