]>
Commit | Line | Data |
---|---|---|
1 | // Copyright (c) 2009-2010 Satoshi Nakamoto | |
2 | // Copyright (c) 2009-2014 The Bitcoin Core developers | |
3 | // Distributed under the MIT software license, see the accompanying | |
4 | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | |
5 | ||
6 | #include "main.h" | |
7 | ||
8 | #include "sodium.h" | |
9 | ||
10 | #include "addrman.h" | |
11 | #include "alert.h" | |
12 | #include "arith_uint256.h" | |
13 | #include "chainparams.h" | |
14 | #include "checkpoints.h" | |
15 | #include "checkqueue.h" | |
16 | #include "consensus/validation.h" | |
17 | #include "init.h" | |
18 | #include "merkleblock.h" | |
19 | #include "metrics.h" | |
20 | #include "net.h" | |
21 | #include "pow.h" | |
22 | #include "txdb.h" | |
23 | #include "txmempool.h" | |
24 | #include "ui_interface.h" | |
25 | #include "undo.h" | |
26 | #include "util.h" | |
27 | #include "utilmoneystr.h" | |
28 | #include "validationinterface.h" | |
29 | #include "wallet/asyncrpcoperation_sendmany.h" | |
30 | ||
31 | #include <sstream> | |
32 | ||
33 | #include <boost/algorithm/string/replace.hpp> | |
34 | #include <boost/filesystem.hpp> | |
35 | #include <boost/filesystem/fstream.hpp> | |
36 | #include <boost/math/distributions/poisson.hpp> | |
37 | #include <boost/thread.hpp> | |
38 | #include <boost/static_assert.hpp> | |
39 | ||
40 | using namespace std; | |
41 | ||
42 | #if defined(NDEBUG) | |
43 | # error "Zcash cannot be compiled without assertions." | |
44 | #endif | |
45 | ||
46 | /** | |
47 | * Global state | |
48 | */ | |
49 | ||
50 | CCriticalSection cs_main; | |
51 | ||
52 | BlockMap mapBlockIndex; | |
53 | CChain chainActive; | |
54 | CBlockIndex *pindexBestHeader = NULL; | |
55 | int64_t nTimeBestReceived = 0; | |
56 | CWaitableCriticalSection csBestBlock; | |
57 | CConditionVariable cvBlockChange; | |
58 | int nScriptCheckThreads = 0; | |
59 | bool fExperimentalMode = false; | |
60 | bool fImporting = false; | |
61 | bool fReindex = false; | |
62 | bool fTxIndex = false; | |
63 | bool fHavePruned = false; | |
64 | bool fPruneMode = false; | |
65 | bool fIsBareMultisigStd = true; | |
66 | bool fCheckBlockIndex = false; | |
67 | bool fCheckpointsEnabled = true; | |
68 | bool fCoinbaseEnforcedProtectionEnabled = true; | |
69 | size_t nCoinCacheUsage = 5000 * 300; | |
70 | uint64_t nPruneTarget = 0; | |
71 | bool fAlerts = DEFAULT_ALERTS; | |
72 | ||
73 | /** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */ | |
74 | CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); | |
75 | ||
76 | CTxMemPool mempool(::minRelayTxFee); | |
77 | ||
78 | struct COrphanTx { | |
79 | CTransaction tx; | |
80 | NodeId fromPeer; | |
81 | }; | |
82 | map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);; | |
83 | map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);; | |
84 | void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | |
85 | ||
86 | /** | |
87 | * Returns true if there are nRequired or more blocks of minVersion or above | |
88 | * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards. | |
89 | */ | |
90 | static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams); | |
91 | static void CheckBlockIndex(); | |
92 | ||
93 | /** Constant stuff for coinbase transactions we create: */ | |
94 | CScript COINBASE_FLAGS; | |
95 | ||
96 | const string strMessageMagic = "Zcash Signed Message:\n"; | |
97 | ||
98 | // Internal stuff | |
99 | namespace { | |
100 | ||
101 | struct CBlockIndexWorkComparator | |
102 | { | |
103 | bool operator()(CBlockIndex *pa, CBlockIndex *pb) const { | |
104 | // First sort by most total work, ... | |
105 | if (pa->nChainWork > pb->nChainWork) return false; | |
106 | if (pa->nChainWork < pb->nChainWork) return true; | |
107 | ||
108 | // ... then by earliest time received, ... | |
109 | if (pa->nSequenceId < pb->nSequenceId) return false; | |
110 | if (pa->nSequenceId > pb->nSequenceId) return true; | |
111 | ||
112 | // Use pointer address as tie breaker (should only happen with blocks | |
113 | // loaded from disk, as those all have id 0). | |
114 | if (pa < pb) return false; | |
115 | if (pa > pb) return true; | |
116 | ||
117 | // Identical blocks. | |
118 | return false; | |
119 | } | |
120 | }; | |
121 | ||
122 | CBlockIndex *pindexBestInvalid; | |
123 | ||
124 | /** | |
125 | * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and | |
126 | * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be | |
127 | * missing the data for the block. | |
128 | */ | |
129 | set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates; | |
130 | /** Number of nodes with fSyncStarted. */ | |
131 | int nSyncStarted = 0; | |
132 | /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. | |
133 | * Pruned nodes may have entries where B is missing data. | |
134 | */ | |
135 | multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked; | |
136 | ||
137 | CCriticalSection cs_LastBlockFile; | |
138 | std::vector<CBlockFileInfo> vinfoBlockFile; | |
139 | int nLastBlockFile = 0; | |
140 | /** Global flag to indicate we should check to see if there are | |
141 | * block/undo files that should be deleted. Set on startup | |
142 | * or if we allocate more file space when we're in prune mode | |
143 | */ | |
144 | bool fCheckForPruning = false; | |
145 | ||
146 | /** | |
147 | * Every received block is assigned a unique and increasing identifier, so we | |
148 | * know which one to give priority in case of a fork. | |
149 | */ | |
150 | CCriticalSection cs_nBlockSequenceId; | |
151 | /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */ | |
152 | uint32_t nBlockSequenceId = 1; | |
153 | ||
154 | /** | |
155 | * Sources of received blocks, saved to be able to send them reject | |
156 | * messages or ban them when processing happens afterwards. Protected by | |
157 | * cs_main. | |
158 | */ | |
159 | map<uint256, NodeId> mapBlockSource; | |
160 | ||
161 | /** | |
162 | * Filter for transactions that were recently rejected by | |
163 | * AcceptToMemoryPool. These are not rerequested until the chain tip | |
164 | * changes, at which point the entire filter is reset. Protected by | |
165 | * cs_main. | |
166 | * | |
167 | * Without this filter we'd be re-requesting txs from each of our peers, | |
168 | * increasing bandwidth consumption considerably. For instance, with 100 | |
169 | * peers, half of which relay a tx we don't accept, that might be a 50x | |
170 | * bandwidth increase. A flooding attacker attempting to roll-over the | |
171 | * filter using minimum-sized, 60byte, transactions might manage to send | |
172 | * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a | |
173 | * two minute window to send invs to us. | |
174 | * | |
175 | * Decreasing the false positive rate is fairly cheap, so we pick one in a | |
176 | * million to make it highly unlikely for users to have issues with this | |
177 | * filter. | |
178 | * | |
179 | * Memory used: 1.7MB | |
180 | */ | |
181 | boost::scoped_ptr<CRollingBloomFilter> recentRejects; | |
182 | uint256 hashRecentRejectsChainTip; | |
183 | ||
184 | /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */ | |
185 | struct QueuedBlock { | |
186 | uint256 hash; | |
187 | CBlockIndex *pindex; //! Optional. | |
188 | int64_t nTime; //! Time of "getdata" request in microseconds. | |
189 | bool fValidatedHeaders; //! Whether this block has validated headers at the time of request. | |
190 | int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer) | |
191 | }; | |
192 | map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; | |
193 | ||
194 | /** Number of blocks in flight with validated headers. */ | |
195 | int nQueuedValidatedHeaders = 0; | |
196 | ||
197 | /** Number of preferable block download peers. */ | |
198 | int nPreferredDownload = 0; | |
199 | ||
200 | /** Dirty block index entries. */ | |
201 | set<CBlockIndex*> setDirtyBlockIndex; | |
202 | ||
203 | /** Dirty block file entries. */ | |
204 | set<int> setDirtyFileInfo; | |
205 | } // anon namespace | |
206 | ||
207 | ////////////////////////////////////////////////////////////////////////////// | |
208 | // | |
209 | // Registration of network node signals. | |
210 | // | |
211 | ||
212 | namespace { | |
213 | ||
214 | struct CBlockReject { | |
215 | unsigned char chRejectCode; | |
216 | string strRejectReason; | |
217 | uint256 hashBlock; | |
218 | }; | |
219 | ||
220 | /** | |
221 | * Maintain validation-specific state about nodes, protected by cs_main, instead | |
222 | * by CNode's own locks. This simplifies asynchronous operation, where | |
223 | * processing of incoming data is done after the ProcessMessage call returns, | |
224 | * and we're no longer holding the node's locks. | |
225 | */ | |
226 | struct CNodeState { | |
227 | //! The peer's address | |
228 | CService address; | |
229 | //! Whether we have a fully established connection. | |
230 | bool fCurrentlyConnected; | |
231 | //! Accumulated misbehaviour score for this peer. | |
232 | int nMisbehavior; | |
233 | //! Whether this peer should be disconnected and banned (unless whitelisted). | |
234 | bool fShouldBan; | |
235 | //! String name of this peer (debugging/logging purposes). | |
236 | std::string name; | |
237 | //! List of asynchronously-determined block rejections to notify this peer about. | |
238 | std::vector<CBlockReject> rejects; | |
239 | //! The best known block we know this peer has announced. | |
240 | CBlockIndex *pindexBestKnownBlock; | |
241 | //! The hash of the last unknown block this peer has announced. | |
242 | uint256 hashLastUnknownBlock; | |
243 | //! The last full block we both have. | |
244 | CBlockIndex *pindexLastCommonBlock; | |
245 | //! Whether we've started headers synchronization with this peer. | |
246 | bool fSyncStarted; | |
247 | //! Since when we're stalling block download progress (in microseconds), or 0. | |
248 | int64_t nStallingSince; | |
249 | list<QueuedBlock> vBlocksInFlight; | |
250 | int nBlocksInFlight; | |
251 | int nBlocksInFlightValidHeaders; | |
252 | //! Whether we consider this a preferred download peer. | |
253 | bool fPreferredDownload; | |
254 | ||
255 | CNodeState() { | |
256 | fCurrentlyConnected = false; | |
257 | nMisbehavior = 0; | |
258 | fShouldBan = false; | |
259 | pindexBestKnownBlock = NULL; | |
260 | hashLastUnknownBlock.SetNull(); | |
261 | pindexLastCommonBlock = NULL; | |
262 | fSyncStarted = false; | |
263 | nStallingSince = 0; | |
264 | nBlocksInFlight = 0; | |
265 | nBlocksInFlightValidHeaders = 0; | |
266 | fPreferredDownload = false; | |
267 | } | |
268 | }; | |
269 | ||
270 | /** Map maintaining per-node state. Requires cs_main. */ | |
271 | map<NodeId, CNodeState> mapNodeState; | |
272 | ||
273 | // Requires cs_main. | |
274 | CNodeState *State(NodeId pnode) { | |
275 | map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode); | |
276 | if (it == mapNodeState.end()) | |
277 | return NULL; | |
278 | return &it->second; | |
279 | } | |
280 | ||
281 | int GetHeight() | |
282 | { | |
283 | LOCK(cs_main); | |
284 | return chainActive.Height(); | |
285 | } | |
286 | ||
287 | void UpdatePreferredDownload(CNode* node, CNodeState* state) | |
288 | { | |
289 | nPreferredDownload -= state->fPreferredDownload; | |
290 | ||
291 | // Whether this node should be marked as a preferred download node. | |
292 | state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient; | |
293 | ||
294 | nPreferredDownload += state->fPreferredDownload; | |
295 | } | |
296 | ||
297 | // Returns time at which to timeout block request (nTime in microseconds) | |
298 | int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consensus::Params &consensusParams) | |
299 | { | |
300 | return nTime + 500000 * consensusParams.nPowTargetSpacing * (4 + nValidatedQueuedBefore); | |
301 | } | |
302 | ||
303 | void InitializeNode(NodeId nodeid, const CNode *pnode) { | |
304 | LOCK(cs_main); | |
305 | CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; | |
306 | state.name = pnode->addrName; | |
307 | state.address = pnode->addr; | |
308 | } | |
309 | ||
310 | void FinalizeNode(NodeId nodeid) { | |
311 | LOCK(cs_main); | |
312 | CNodeState *state = State(nodeid); | |
313 | ||
314 | if (state->fSyncStarted) | |
315 | nSyncStarted--; | |
316 | ||
317 | if (state->nMisbehavior == 0 && state->fCurrentlyConnected) { | |
318 | AddressCurrentlyConnected(state->address); | |
319 | } | |
320 | ||
321 | BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) | |
322 | mapBlocksInFlight.erase(entry.hash); | |
323 | EraseOrphansFor(nodeid); | |
324 | nPreferredDownload -= state->fPreferredDownload; | |
325 | ||
326 | mapNodeState.erase(nodeid); | |
327 | } | |
328 | ||
329 | // Requires cs_main. | |
330 | // Returns a bool indicating whether we requested this block. | |
331 | bool MarkBlockAsReceived(const uint256& hash) { | |
332 | map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); | |
333 | if (itInFlight != mapBlocksInFlight.end()) { | |
334 | CNodeState *state = State(itInFlight->second.first); | |
335 | nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders; | |
336 | state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; | |
337 | state->vBlocksInFlight.erase(itInFlight->second.second); | |
338 | state->nBlocksInFlight--; | |
339 | state->nStallingSince = 0; | |
340 | mapBlocksInFlight.erase(itInFlight); | |
341 | return true; | |
342 | } | |
343 | return false; | |
344 | } | |
345 | ||
346 | // Requires cs_main. | |
347 | void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) { | |
348 | CNodeState *state = State(nodeid); | |
349 | assert(state != NULL); | |
350 | ||
351 | // Make sure it's not listed somewhere already. | |
352 | MarkBlockAsReceived(hash); | |
353 | ||
354 | int64_t nNow = GetTimeMicros(); | |
355 | QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)}; | |
356 | nQueuedValidatedHeaders += newentry.fValidatedHeaders; | |
357 | list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry); | |
358 | state->nBlocksInFlight++; | |
359 | state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders; | |
360 | mapBlocksInFlight[hash] = std::make_pair(nodeid, it); | |
361 | } | |
362 | ||
363 | /** Check whether the last unknown block a peer advertized is not yet known. */ | |
364 | void ProcessBlockAvailability(NodeId nodeid) { | |
365 | CNodeState *state = State(nodeid); | |
366 | assert(state != NULL); | |
367 | ||
368 | if (!state->hashLastUnknownBlock.IsNull()) { | |
369 | BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); | |
370 | if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { | |
371 | if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) | |
372 | state->pindexBestKnownBlock = itOld->second; | |
373 | state->hashLastUnknownBlock.SetNull(); | |
374 | } | |
375 | } | |
376 | } | |
377 | ||
378 | /** Update tracking information about which blocks a peer is assumed to have. */ | |
379 | void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { | |
380 | CNodeState *state = State(nodeid); | |
381 | assert(state != NULL); | |
382 | ||
383 | ProcessBlockAvailability(nodeid); | |
384 | ||
385 | BlockMap::iterator it = mapBlockIndex.find(hash); | |
386 | if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { | |
387 | // An actually better block was announced. | |
388 | if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) | |
389 | state->pindexBestKnownBlock = it->second; | |
390 | } else { | |
391 | // An unknown block was announced; just assume that the latest one is the best one. | |
392 | state->hashLastUnknownBlock = hash; | |
393 | } | |
394 | } | |
395 | ||
396 | /** Find the last common ancestor two blocks have. | |
397 | * Both pa and pb must be non-NULL. */ | |
398 | CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) { | |
399 | if (pa->nHeight > pb->nHeight) { | |
400 | pa = pa->GetAncestor(pb->nHeight); | |
401 | } else if (pb->nHeight > pa->nHeight) { | |
402 | pb = pb->GetAncestor(pa->nHeight); | |
403 | } | |
404 | ||
405 | while (pa != pb && pa && pb) { | |
406 | pa = pa->pprev; | |
407 | pb = pb->pprev; | |
408 | } | |
409 | ||
410 | // Eventually all chain branches meet at the genesis block. | |
411 | assert(pa == pb); | |
412 | return pa; | |
413 | } | |
414 | ||
415 | /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has | |
416 | * at most count entries. */ | |
417 | void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) { | |
418 | if (count == 0) | |
419 | return; | |
420 | ||
421 | vBlocks.reserve(vBlocks.size() + count); | |
422 | CNodeState *state = State(nodeid); | |
423 | assert(state != NULL); | |
424 | ||
425 | // Make sure pindexBestKnownBlock is up to date, we'll need it. | |
426 | ProcessBlockAvailability(nodeid); | |
427 | ||
428 | if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { | |
429 | // This peer has nothing interesting. | |
430 | return; | |
431 | } | |
432 | ||
433 | if (state->pindexLastCommonBlock == NULL) { | |
434 | // Bootstrap quickly by guessing a parent of our best tip is the forking point. | |
435 | // Guessing wrong in either direction is not a problem. | |
436 | state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())]; | |
437 | } | |
438 | ||
439 | // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor | |
440 | // of its current tip anymore. Go back enough to fix that. | |
441 | state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); | |
442 | if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) | |
443 | return; | |
444 | ||
445 | std::vector<CBlockIndex*> vToFetch; | |
446 | CBlockIndex *pindexWalk = state->pindexLastCommonBlock; | |
447 | // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last | |
448 | // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to | |
449 | // download that next block if the window were 1 larger. | |
450 | int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; | |
451 | int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); | |
452 | NodeId waitingfor = -1; | |
453 | while (pindexWalk->nHeight < nMaxHeight) { | |
454 | // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards | |
455 | // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive | |
456 | // as iterating over ~100 CBlockIndex* entries anyway. | |
457 | int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); | |
458 | vToFetch.resize(nToFetch); | |
459 | pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); | |
460 | vToFetch[nToFetch - 1] = pindexWalk; | |
461 | for (unsigned int i = nToFetch - 1; i > 0; i--) { | |
462 | vToFetch[i - 1] = vToFetch[i]->pprev; | |
463 | } | |
464 | ||
465 | // Iterate over those blocks in vToFetch (in forward direction), adding the ones that | |
466 | // are not yet downloaded and not in flight to vBlocks. In the mean time, update | |
467 | // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's | |
468 | // already part of our chain (and therefore don't need it even if pruned). | |
469 | BOOST_FOREACH(CBlockIndex* pindex, vToFetch) { | |
470 | if (!pindex->IsValid(BLOCK_VALID_TREE)) { | |
471 | // We consider the chain that this peer is on invalid. | |
472 | return; | |
473 | } | |
474 | if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { | |
475 | if (pindex->nChainTx) | |
476 | state->pindexLastCommonBlock = pindex; | |
477 | } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { | |
478 | // The block is not already downloaded, and not yet in flight. | |
479 | if (pindex->nHeight > nWindowEnd) { | |
480 | // We reached the end of the window. | |
481 | if (vBlocks.size() == 0 && waitingfor != nodeid) { | |
482 | // We aren't able to fetch anything, but we would be if the download window was one larger. | |
483 | nodeStaller = waitingfor; | |
484 | } | |
485 | return; | |
486 | } | |
487 | vBlocks.push_back(pindex); | |
488 | if (vBlocks.size() == count) { | |
489 | return; | |
490 | } | |
491 | } else if (waitingfor == -1) { | |
492 | // This is the first already-in-flight block. | |
493 | waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; | |
494 | } | |
495 | } | |
496 | } | |
497 | } | |
498 | ||
499 | } // anon namespace | |
500 | ||
501 | bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { | |
502 | LOCK(cs_main); | |
503 | CNodeState *state = State(nodeid); | |
504 | if (state == NULL) | |
505 | return false; | |
506 | stats.nMisbehavior = state->nMisbehavior; | |
507 | stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; | |
508 | stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; | |
509 | BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) { | |
510 | if (queue.pindex) | |
511 | stats.vHeightInFlight.push_back(queue.pindex->nHeight); | |
512 | } | |
513 | return true; | |
514 | } | |
515 | ||
516 | void RegisterNodeSignals(CNodeSignals& nodeSignals) | |
517 | { | |
518 | nodeSignals.GetHeight.connect(&GetHeight); | |
519 | nodeSignals.ProcessMessages.connect(&ProcessMessages); | |
520 | nodeSignals.SendMessages.connect(&SendMessages); | |
521 | nodeSignals.InitializeNode.connect(&InitializeNode); | |
522 | nodeSignals.FinalizeNode.connect(&FinalizeNode); | |
523 | } | |
524 | ||
525 | void UnregisterNodeSignals(CNodeSignals& nodeSignals) | |
526 | { | |
527 | nodeSignals.GetHeight.disconnect(&GetHeight); | |
528 | nodeSignals.ProcessMessages.disconnect(&ProcessMessages); | |
529 | nodeSignals.SendMessages.disconnect(&SendMessages); | |
530 | nodeSignals.InitializeNode.disconnect(&InitializeNode); | |
531 | nodeSignals.FinalizeNode.disconnect(&FinalizeNode); | |
532 | } | |
533 | ||
534 | CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) | |
535 | { | |
536 | // Find the first block the caller has in the main chain | |
537 | BOOST_FOREACH(const uint256& hash, locator.vHave) { | |
538 | BlockMap::iterator mi = mapBlockIndex.find(hash); | |
539 | if (mi != mapBlockIndex.end()) | |
540 | { | |
541 | CBlockIndex* pindex = (*mi).second; | |
542 | if (chain.Contains(pindex)) | |
543 | return pindex; | |
544 | } | |
545 | } | |
546 | return chain.Genesis(); | |
547 | } | |
548 | ||
549 | CCoinsViewCache *pcoinsTip = NULL; | |
550 | CBlockTreeDB *pblocktree = NULL; | |
551 | ||
552 | ////////////////////////////////////////////////////////////////////////////// | |
553 | // | |
554 | // mapOrphanTransactions | |
555 | // | |
556 | ||
557 | bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | |
558 | { | |
559 | uint256 hash = tx.GetHash(); | |
560 | if (mapOrphanTransactions.count(hash)) | |
561 | return false; | |
562 | ||
563 | // Ignore big transactions, to avoid a | |
564 | // send-big-orphans memory exhaustion attack. If a peer has a legitimate | |
565 | // large transaction with a missing parent then we assume | |
566 | // it will rebroadcast it later, after the parent transaction(s) | |
567 | // have been mined or received. | |
568 | // 10,000 orphans, each of which is at most 5,000 bytes big is | |
569 | // at most 500 megabytes of orphans: | |
570 | unsigned int sz = tx.GetSerializeSize(SER_NETWORK, tx.nVersion); | |
571 | if (sz > 5000) | |
572 | { | |
573 | LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); | |
574 | return false; | |
575 | } | |
576 | ||
577 | mapOrphanTransactions[hash].tx = tx; | |
578 | mapOrphanTransactions[hash].fromPeer = peer; | |
579 | BOOST_FOREACH(const CTxIn& txin, tx.vin) | |
580 | mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash); | |
581 | ||
582 | LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(), | |
583 | mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); | |
584 | return true; | |
585 | } | |
586 | ||
587 | void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | |
588 | { | |
589 | map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash); | |
590 | if (it == mapOrphanTransactions.end()) | |
591 | return; | |
592 | BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin) | |
593 | { | |
594 | map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash); | |
595 | if (itPrev == mapOrphanTransactionsByPrev.end()) | |
596 | continue; | |
597 | itPrev->second.erase(hash); | |
598 | if (itPrev->second.empty()) | |
599 | mapOrphanTransactionsByPrev.erase(itPrev); | |
600 | } | |
601 | mapOrphanTransactions.erase(it); | |
602 | } | |
603 | ||
604 | void EraseOrphansFor(NodeId peer) | |
605 | { | |
606 | int nErased = 0; | |
607 | map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); | |
608 | while (iter != mapOrphanTransactions.end()) | |
609 | { | |
610 | map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid | |
611 | if (maybeErase->second.fromPeer == peer) | |
612 | { | |
613 | EraseOrphanTx(maybeErase->second.tx.GetHash()); | |
614 | ++nErased; | |
615 | } | |
616 | } | |
617 | if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer); | |
618 | } | |
619 | ||
620 | ||
621 | unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | |
622 | { | |
623 | unsigned int nEvicted = 0; | |
624 | while (mapOrphanTransactions.size() > nMaxOrphans) | |
625 | { | |
626 | // Evict a random orphan: | |
627 | uint256 randomhash = GetRandHash(); | |
628 | map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash); | |
629 | if (it == mapOrphanTransactions.end()) | |
630 | it = mapOrphanTransactions.begin(); | |
631 | EraseOrphanTx(it->first); | |
632 | ++nEvicted; | |
633 | } | |
634 | return nEvicted; | |
635 | } | |
636 | ||
637 | ||
638 | ||
639 | ||
640 | ||
641 | ||
642 | ||
643 | bool IsStandardTx(const CTransaction& tx, string& reason) | |
644 | { | |
645 | if (tx.nVersion > CTransaction::MAX_CURRENT_VERSION || tx.nVersion < CTransaction::MIN_CURRENT_VERSION) { | |
646 | reason = "version"; | |
647 | return false; | |
648 | } | |
649 | ||
650 | BOOST_FOREACH(const CTxIn& txin, tx.vin) | |
651 | { | |
652 | // Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed | |
653 | // keys. (remember the 520 byte limit on redeemScript size) That works | |
654 | // out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627 | |
655 | // bytes of scriptSig, which we round off to 1650 bytes for some minor | |
656 | // future-proofing. That's also enough to spend a 20-of-20 | |
657 | // CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not | |
658 | // considered standard) | |
659 | if (txin.scriptSig.size() > 1650) { | |
660 | reason = "scriptsig-size"; | |
661 | return false; | |
662 | } | |
663 | if (!txin.scriptSig.IsPushOnly()) { | |
664 | reason = "scriptsig-not-pushonly"; | |
665 | return false; | |
666 | } | |
667 | } | |
668 | ||
669 | unsigned int nDataOut = 0; | |
670 | txnouttype whichType; | |
671 | BOOST_FOREACH(const CTxOut& txout, tx.vout) { | |
672 | if (!::IsStandard(txout.scriptPubKey, whichType)) { | |
673 | reason = "scriptpubkey"; | |
674 | return false; | |
675 | } | |
676 | ||
677 | if (whichType == TX_NULL_DATA) | |
678 | nDataOut++; | |
679 | else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) { | |
680 | reason = "bare-multisig"; | |
681 | return false; | |
682 | } else if (txout.IsDust(::minRelayTxFee)) { | |
683 | reason = "dust"; | |
684 | return false; | |
685 | } | |
686 | } | |
687 | ||
688 | // only one OP_RETURN txout is permitted | |
689 | if (nDataOut > 1) { | |
690 | reason = "multi-op-return"; | |
691 | return false; | |
692 | } | |
693 | ||
694 | return true; | |
695 | } | |
696 | ||
697 | bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) | |
698 | { | |
699 | if (tx.nLockTime == 0) | |
700 | return true; | |
701 | if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime)) | |
702 | return true; | |
703 | BOOST_FOREACH(const CTxIn& txin, tx.vin) | |
704 | if (!txin.IsFinal()) | |
705 | return false; | |
706 | return true; | |
707 | } | |
708 | ||
709 | bool CheckFinalTx(const CTransaction &tx, int flags) | |
710 | { | |
711 | AssertLockHeld(cs_main); | |
712 | ||
713 | // By convention a negative value for flags indicates that the | |
714 | // current network-enforced consensus rules should be used. In | |
715 | // a future soft-fork scenario that would mean checking which | |
716 | // rules would be enforced for the next block and setting the | |
717 | // appropriate flags. At the present time no soft-forks are | |
718 | // scheduled, so no flags are set. | |
719 | flags = std::max(flags, 0); | |
720 | ||
721 | // CheckFinalTx() uses chainActive.Height()+1 to evaluate | |
722 | // nLockTime because when IsFinalTx() is called within | |
723 | // CBlock::AcceptBlock(), the height of the block *being* | |
724 | // evaluated is what is used. Thus if we want to know if a | |
725 | // transaction can be part of the *next* block, we need to call | |
726 | // IsFinalTx() with one more than chainActive.Height(). | |
727 | const int nBlockHeight = chainActive.Height() + 1; | |
728 | ||
729 | // Timestamps on the other hand don't get any special treatment, | |
730 | // because we can't know what timestamp the next block will have, | |
731 | // and there aren't timestamp applications where it matters. | |
732 | // However this changes once median past time-locks are enforced: | |
733 | const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) | |
734 | ? chainActive.Tip()->GetMedianTimePast() | |
735 | : GetAdjustedTime(); | |
736 | ||
737 | return IsFinalTx(tx, nBlockHeight, nBlockTime); | |
738 | } | |
739 | ||
740 | /** | |
741 | * Check transaction inputs to mitigate two | |
742 | * potential denial-of-service attacks: | |
743 | * | |
744 | * 1. scriptSigs with extra data stuffed into them, | |
745 | * not consumed by scriptPubKey (or P2SH script) | |
746 | * 2. P2SH scripts with a crazy number of expensive | |
747 | * CHECKSIG/CHECKMULTISIG operations | |
748 | */ | |
749 | bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) | |
750 | { | |
751 | if (tx.IsCoinBase()) | |
752 | return true; // Coinbases don't use vin normally | |
753 | ||
754 | for (unsigned int i = 0; i < tx.vin.size(); i++) | |
755 | { | |
756 | const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]); | |
757 | ||
758 | vector<vector<unsigned char> > vSolutions; | |
759 | txnouttype whichType; | |
760 | // get the scriptPubKey corresponding to this input: | |
761 | const CScript& prevScript = prev.scriptPubKey; | |
762 | if (!Solver(prevScript, whichType, vSolutions)) | |
763 | return false; | |
764 | int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions); | |
765 | if (nArgsExpected < 0) | |
766 | return false; | |
767 | ||
768 | // Transactions with extra stuff in their scriptSigs are | |
769 | // non-standard. Note that this EvalScript() call will | |
770 | // be quick, because if there are any operations | |
771 | // beside "push data" in the scriptSig | |
772 | // IsStandardTx() will have already returned false | |
773 | // and this method isn't called. | |
774 | vector<vector<unsigned char> > stack; | |
775 | if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker())) | |
776 | return false; | |
777 | ||
778 | if (whichType == TX_SCRIPTHASH) | |
779 | { | |
780 | if (stack.empty()) | |
781 | return false; | |
782 | CScript subscript(stack.back().begin(), stack.back().end()); | |
783 | vector<vector<unsigned char> > vSolutions2; | |
784 | txnouttype whichType2; | |
785 | if (Solver(subscript, whichType2, vSolutions2)) | |
786 | { | |
787 | int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2); | |
788 | if (tmpExpected < 0) | |
789 | return false; | |
790 | nArgsExpected += tmpExpected; | |
791 | } | |
792 | else | |
793 | { | |
794 | // Any other Script with less than 15 sigops OK: | |
795 | unsigned int sigops = subscript.GetSigOpCount(true); | |
796 | // ... extra data left on the stack after execution is OK, too: | |
797 | return (sigops <= MAX_P2SH_SIGOPS); | |
798 | } | |
799 | } | |
800 | ||
801 | if (stack.size() != (unsigned int)nArgsExpected) | |
802 | return false; | |
803 | } | |
804 | ||
805 | return true; | |
806 | } | |
807 | ||
808 | unsigned int GetLegacySigOpCount(const CTransaction& tx) | |
809 | { | |
810 | unsigned int nSigOps = 0; | |
811 | BOOST_FOREACH(const CTxIn& txin, tx.vin) | |
812 | { | |
813 | nSigOps += txin.scriptSig.GetSigOpCount(false); | |
814 | } | |
815 | BOOST_FOREACH(const CTxOut& txout, tx.vout) | |
816 | { | |
817 | nSigOps += txout.scriptPubKey.GetSigOpCount(false); | |
818 | } | |
819 | return nSigOps; | |
820 | } | |
821 | ||
822 | unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs) | |
823 | { | |
824 | if (tx.IsCoinBase()) | |
825 | return 0; | |
826 | ||
827 | unsigned int nSigOps = 0; | |
828 | for (unsigned int i = 0; i < tx.vin.size(); i++) | |
829 | { | |
830 | const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]); | |
831 | if (prevout.scriptPubKey.IsPayToScriptHash()) | |
832 | nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig); | |
833 | } | |
834 | return nSigOps; | |
835 | } | |
836 | ||
837 | bool CheckTransaction(const CTransaction& tx, CValidationState &state, | |
838 | libzcash::ProofVerifier& verifier) | |
839 | { | |
840 | // Don't count coinbase transactions because mining skews the count | |
841 | if (!tx.IsCoinBase()) { | |
842 | transactionsValidated.increment(); | |
843 | } | |
844 | ||
845 | if (!CheckTransactionWithoutProofVerification(tx, state)) { | |
846 | return false; | |
847 | } else { | |
848 | // Ensure that zk-SNARKs verify | |
849 | BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { | |
850 | if (!joinsplit.Verify(*pzcashParams, verifier, tx.joinSplitPubKey)) { | |
851 | return state.DoS(100, error("CheckTransaction(): joinsplit does not verify"), | |
852 | REJECT_INVALID, "bad-txns-joinsplit-verification-failed"); | |
853 | } | |
854 | } | |
855 | return true; | |
856 | } | |
857 | } | |
858 | ||
859 | bool CheckTransactionWithoutProofVerification(const CTransaction& tx, CValidationState &state) | |
860 | { | |
861 | // Basic checks that don't depend on any context | |
862 | ||
863 | // Check transaction version | |
864 | if (tx.nVersion < MIN_TX_VERSION) { | |
865 | return state.DoS(100, error("CheckTransaction(): version too low"), | |
866 | REJECT_INVALID, "bad-txns-version-too-low"); | |
867 | } | |
868 | ||
869 | // Transactions can contain empty `vin` and `vout` so long as | |
870 | // `vjoinsplit` is non-empty. | |
871 | if (tx.vin.empty() && tx.vjoinsplit.empty()) | |
872 | return state.DoS(10, error("CheckTransaction(): vin empty"), | |
873 | REJECT_INVALID, "bad-txns-vin-empty"); | |
874 | if (tx.vout.empty() && tx.vjoinsplit.empty()) | |
875 | return state.DoS(10, error("CheckTransaction(): vout empty"), | |
876 | REJECT_INVALID, "bad-txns-vout-empty"); | |
877 | ||
878 | // Size limits | |
879 | BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE > MAX_TX_SIZE); // sanity | |
880 | if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE) | |
881 | return state.DoS(100, error("CheckTransaction(): size limits failed"), | |
882 | REJECT_INVALID, "bad-txns-oversize"); | |
883 | ||
884 | // Check for negative or overflow output values | |
885 | CAmount nValueOut = 0; | |
886 | BOOST_FOREACH(const CTxOut& txout, tx.vout) | |
887 | { | |
888 | if (txout.nValue < 0) | |
889 | return state.DoS(100, error("CheckTransaction(): txout.nValue negative"), | |
890 | REJECT_INVALID, "bad-txns-vout-negative"); | |
891 | if (txout.nValue > MAX_MONEY) | |
892 | return state.DoS(100, error("CheckTransaction(): txout.nValue too high"), | |
893 | REJECT_INVALID, "bad-txns-vout-toolarge"); | |
894 | nValueOut += txout.nValue; | |
895 | if (!MoneyRange(nValueOut)) | |
896 | return state.DoS(100, error("CheckTransaction(): txout total out of range"), | |
897 | REJECT_INVALID, "bad-txns-txouttotal-toolarge"); | |
898 | } | |
899 | ||
900 | // Ensure that joinsplit values are well-formed | |
901 | BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit) | |
902 | { | |
903 | if (joinsplit.vpub_old < 0) { | |
904 | return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old negative"), | |
905 | REJECT_INVALID, "bad-txns-vpub_old-negative"); | |
906 | } | |
907 | ||
908 | if (joinsplit.vpub_new < 0) { | |
909 | return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new negative"), | |
910 | REJECT_INVALID, "bad-txns-vpub_new-negative"); | |
911 | } | |
912 | ||
913 | if (joinsplit.vpub_old > MAX_MONEY) { | |
914 | return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old too high"), | |
915 | REJECT_INVALID, "bad-txns-vpub_old-toolarge"); | |
916 | } | |
917 | ||
918 | if (joinsplit.vpub_new > MAX_MONEY) { | |
919 | return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new too high"), | |
920 | REJECT_INVALID, "bad-txns-vpub_new-toolarge"); | |
921 | } | |
922 | ||
923 | if (joinsplit.vpub_new != 0 && joinsplit.vpub_old != 0) { | |
924 | return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new and joinsplit.vpub_old both nonzero"), | |
925 | REJECT_INVALID, "bad-txns-vpubs-both-nonzero"); | |
926 | } | |
927 | ||
928 | nValueOut += joinsplit.vpub_old; | |
929 | if (!MoneyRange(nValueOut)) { | |
930 | return state.DoS(100, error("CheckTransaction(): txout total out of range"), | |
931 | REJECT_INVALID, "bad-txns-txouttotal-toolarge"); | |
932 | } | |
933 | } | |
934 | ||
935 | // Ensure input values do not exceed MAX_MONEY | |
936 | // We have not resolved the txin values at this stage, | |
937 | // but we do know what the joinsplits claim to add | |
938 | // to the value pool. | |
939 | { | |
940 | CAmount nValueIn = 0; | |
941 | for (std::vector<JSDescription>::const_iterator it(tx.vjoinsplit.begin()); it != tx.vjoinsplit.end(); ++it) | |
942 | { | |
943 | nValueIn += it->vpub_new; | |
944 | ||
945 | if (!MoneyRange(it->vpub_new) || !MoneyRange(nValueIn)) { | |
946 | return state.DoS(100, error("CheckTransaction(): txin total out of range"), | |
947 | REJECT_INVALID, "bad-txns-txintotal-toolarge"); | |
948 | } | |
949 | } | |
950 | } | |
951 | ||
952 | ||
953 | // Check for duplicate inputs | |
954 | set<COutPoint> vInOutPoints; | |
955 | BOOST_FOREACH(const CTxIn& txin, tx.vin) | |
956 | { | |
957 | if (vInOutPoints.count(txin.prevout)) | |
958 | return state.DoS(100, error("CheckTransaction(): duplicate inputs"), | |
959 | REJECT_INVALID, "bad-txns-inputs-duplicate"); | |
960 | vInOutPoints.insert(txin.prevout); | |
961 | } | |
962 | ||
963 | // Check for duplicate joinsplit nullifiers in this transaction | |
964 | set<uint256> vJoinSplitNullifiers; | |
965 | BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit) | |
966 | { | |
967 | BOOST_FOREACH(const uint256& nf, joinsplit.nullifiers) | |
968 | { | |
969 | if (vJoinSplitNullifiers.count(nf)) | |
970 | return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"), | |
971 | REJECT_INVALID, "bad-joinsplits-nullifiers-duplicate"); | |
972 | ||
973 | vJoinSplitNullifiers.insert(nf); | |
974 | } | |
975 | } | |
976 | ||
977 | if (tx.IsCoinBase()) | |
978 | { | |
979 | // There should be no joinsplits in a coinbase transaction | |
980 | if (tx.vjoinsplit.size() > 0) | |
981 | return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"), | |
982 | REJECT_INVALID, "bad-cb-has-joinsplits"); | |
983 | ||
984 | if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) | |
985 | return state.DoS(100, error("CheckTransaction(): coinbase script size"), | |
986 | REJECT_INVALID, "bad-cb-length"); | |
987 | } | |
988 | else | |
989 | { | |
990 | BOOST_FOREACH(const CTxIn& txin, tx.vin) | |
991 | if (txin.prevout.IsNull()) | |
992 | return state.DoS(10, error("CheckTransaction(): prevout is null"), | |
993 | REJECT_INVALID, "bad-txns-prevout-null"); | |
994 | ||
995 | if (tx.vjoinsplit.size() > 0) { | |
996 | // Empty output script. | |
997 | CScript scriptCode; | |
998 | uint256 dataToBeSigned; | |
999 | try { | |
1000 | dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL); | |
1001 | } catch (std::logic_error ex) { | |
1002 | return state.DoS(100, error("CheckTransaction(): error computing signature hash"), | |
1003 | REJECT_INVALID, "error-computing-signature-hash"); | |
1004 | } | |
1005 | ||
1006 | BOOST_STATIC_ASSERT(crypto_sign_PUBLICKEYBYTES == 32); | |
1007 | ||
1008 | // We rely on libsodium to check that the signature is canonical. | |
1009 | // https://github.com/jedisct1/libsodium/commit/62911edb7ff2275cccd74bf1c8aefcc4d76924e0 | |
1010 | if (crypto_sign_verify_detached(&tx.joinSplitSig[0], | |
1011 | dataToBeSigned.begin(), 32, | |
1012 | tx.joinSplitPubKey.begin() | |
1013 | ) != 0) { | |
1014 | return state.DoS(100, error("CheckTransaction(): invalid joinsplit signature"), | |
1015 | REJECT_INVALID, "bad-txns-invalid-joinsplit-signature"); | |
1016 | } | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | return true; | |
1021 | } | |
1022 | ||
1023 | CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowFree) | |
1024 | { | |
1025 | { | |
1026 | LOCK(mempool.cs); | |
1027 | uint256 hash = tx.GetHash(); | |
1028 | double dPriorityDelta = 0; | |
1029 | CAmount nFeeDelta = 0; | |
1030 | mempool.ApplyDeltas(hash, dPriorityDelta, nFeeDelta); | |
1031 | if (dPriorityDelta > 0 || nFeeDelta > 0) | |
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | CAmount nMinFee = ::minRelayTxFee.GetFee(nBytes); | |
1036 | ||
1037 | if (fAllowFree) | |
1038 | { | |
1039 | // There is a free transaction area in blocks created by most miners, | |
1040 | // * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000 | |
1041 | // to be considered to fall into this category. We don't want to encourage sending | |
1042 | // multiple transactions instead of one big transaction to avoid fees. | |
1043 | if (nBytes < (DEFAULT_BLOCK_PRIORITY_SIZE - 1000)) | |
1044 | nMinFee = 0; | |
1045 | } | |
1046 | ||
1047 | if (!MoneyRange(nMinFee)) | |
1048 | nMinFee = MAX_MONEY; | |
1049 | return nMinFee; | |
1050 | } | |
1051 | ||
1052 | ||
1053 | bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, | |
1054 | bool* pfMissingInputs, bool fRejectAbsurdFee) | |
1055 | { | |
1056 | AssertLockHeld(cs_main); | |
1057 | if (pfMissingInputs) | |
1058 | *pfMissingInputs = false; | |
1059 | ||
1060 | auto verifier = libzcash::ProofVerifier::Strict(); | |
1061 | if (!CheckTransaction(tx, state, verifier)) | |
1062 | return error("AcceptToMemoryPool: CheckTransaction failed"); | |
1063 | ||
1064 | // Coinbase is only valid in a block, not as a loose transaction | |
1065 | if (tx.IsCoinBase()) | |
1066 | return state.DoS(100, error("AcceptToMemoryPool: coinbase as individual tx"), | |
1067 | REJECT_INVALID, "coinbase"); | |
1068 | ||
1069 | // Rather not work on nonstandard transactions (unless -testnet/-regtest) | |
1070 | string reason; | |
1071 | if (Params().RequireStandard() && !IsStandardTx(tx, reason)) | |
1072 | return state.DoS(0, | |
1073 | error("AcceptToMemoryPool: nonstandard transaction: %s", reason), | |
1074 | REJECT_NONSTANDARD, reason); | |
1075 | ||
1076 | // Only accept nLockTime-using transactions that can be mined in the next | |
1077 | // block; we don't want our mempool filled up with transactions that can't | |
1078 | // be mined yet. | |
1079 | if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) | |
1080 | return state.DoS(0, false, REJECT_NONSTANDARD, "non-final"); | |
1081 | ||
1082 | // is it already in the memory pool? | |
1083 | uint256 hash = tx.GetHash(); | |
1084 | if (pool.exists(hash)) | |
1085 | return false; | |
1086 | ||
1087 | // Check for conflicts with in-memory transactions | |
1088 | { | |
1089 | LOCK(pool.cs); // protect pool.mapNextTx | |
1090 | for (unsigned int i = 0; i < tx.vin.size(); i++) | |
1091 | { | |
1092 | COutPoint outpoint = tx.vin[i].prevout; | |
1093 | if (pool.mapNextTx.count(outpoint)) | |
1094 | { | |
1095 | // Disable replacement feature for now | |
1096 | return false; | |
1097 | } | |
1098 | } | |
1099 | BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { | |
1100 | BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) { | |
1101 | if (pool.mapNullifiers.count(nf)) | |
1102 | { | |
1103 | return false; | |
1104 | } | |
1105 | } | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | { | |
1110 | CCoinsView dummy; | |
1111 | CCoinsViewCache view(&dummy); | |
1112 | ||
1113 | CAmount nValueIn = 0; | |
1114 | { | |
1115 | LOCK(pool.cs); | |
1116 | CCoinsViewMemPool viewMemPool(pcoinsTip, pool); | |
1117 | view.SetBackend(viewMemPool); | |
1118 | ||
1119 | // do we already have it? | |
1120 | if (view.HaveCoins(hash)) | |
1121 | return false; | |
1122 | ||
1123 | // do all inputs exist? | |
1124 | // Note that this does not check for the presence of actual outputs (see the next check for that), | |
1125 | // and only helps with filling in pfMissingInputs (to determine missing vs spent). | |
1126 | BOOST_FOREACH(const CTxIn txin, tx.vin) { | |
1127 | if (!view.HaveCoins(txin.prevout.hash)) { | |
1128 | if (pfMissingInputs) | |
1129 | *pfMissingInputs = true; | |
1130 | return false; | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | // are the actual inputs available? | |
1135 | if (!view.HaveInputs(tx)) | |
1136 | return state.Invalid(error("AcceptToMemoryPool: inputs already spent"), | |
1137 | REJECT_DUPLICATE, "bad-txns-inputs-spent"); | |
1138 | ||
1139 | // are the joinsplit's requirements met? | |
1140 | if (!view.HaveJoinSplitRequirements(tx)) | |
1141 | return state.Invalid(error("AcceptToMemoryPool: joinsplit requirements not met"), | |
1142 | REJECT_DUPLICATE, "bad-txns-joinsplit-requirements-not-met"); | |
1143 | ||
1144 | // Bring the best block into scope | |
1145 | view.GetBestBlock(); | |
1146 | ||
1147 | nValueIn = view.GetValueIn(tx); | |
1148 | ||
1149 | // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool | |
1150 | view.SetBackend(dummy); | |
1151 | } | |
1152 | ||
1153 | // Check for non-standard pay-to-script-hash in inputs | |
1154 | if (Params().RequireStandard() && !AreInputsStandard(tx, view)) | |
1155 | return error("AcceptToMemoryPool: nonstandard transaction input"); | |
1156 | ||
1157 | // Check that the transaction doesn't have an excessive number of | |
1158 | // sigops, making it impossible to mine. Since the coinbase transaction | |
1159 | // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than | |
1160 | // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than | |
1161 | // merely non-standard transaction. | |
1162 | unsigned int nSigOps = GetLegacySigOpCount(tx); | |
1163 | nSigOps += GetP2SHSigOpCount(tx, view); | |
1164 | if (nSigOps > MAX_STANDARD_TX_SIGOPS) | |
1165 | return state.DoS(0, | |
1166 | error("AcceptToMemoryPool: too many sigops %s, %d > %d", | |
1167 | hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS), | |
1168 | REJECT_NONSTANDARD, "bad-txns-too-many-sigops"); | |
1169 | ||
1170 | CAmount nValueOut = tx.GetValueOut(); | |
1171 | CAmount nFees = nValueIn-nValueOut; | |
1172 | double dPriority = view.GetPriority(tx, chainActive.Height()); | |
1173 | ||
1174 | CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx)); | |
1175 | unsigned int nSize = entry.GetTxSize(); | |
1176 | ||
1177 | // Accept a tx if it contains joinsplits and has at least the default fee specified by z_sendmany. | |
1178 | if (tx.vjoinsplit.size() > 0 && nFees >= ASYNC_RPC_OPERATION_DEFAULT_MINERS_FEE) { | |
1179 | // In future we will we have more accurate and dynamic computation of fees for tx with joinsplits. | |
1180 | } else { | |
1181 | // Don't accept it if it can't get into a block | |
1182 | CAmount txMinFee = GetMinRelayFee(tx, nSize, true); | |
1183 | if (fLimitFree && nFees < txMinFee) | |
1184 | return state.DoS(0, error("AcceptToMemoryPool: not enough fees %s, %d < %d", | |
1185 | hash.ToString(), nFees, txMinFee), | |
1186 | REJECT_INSUFFICIENTFEE, "insufficient fee"); | |
1187 | } | |
1188 | ||
1189 | // Require that free transactions have sufficient priority to be mined in the next block. | |
1190 | if (GetBoolArg("-relaypriority", false) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) { | |
1191 | return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); | |
1192 | } | |
1193 | ||
1194 | // Continuously rate-limit free (really, very-low-fee) transactions | |
1195 | // This mitigates 'penny-flooding' -- sending thousands of free transactions just to | |
1196 | // be annoying or make others' transactions take longer to confirm. | |
1197 | if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize)) | |
1198 | { | |
1199 | static CCriticalSection csFreeLimiter; | |
1200 | static double dFreeCount; | |
1201 | static int64_t nLastTime; | |
1202 | int64_t nNow = GetTime(); | |
1203 | ||
1204 | LOCK(csFreeLimiter); | |
1205 | ||
1206 | // Use an exponentially decaying ~10-minute window: | |
1207 | dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime)); | |
1208 | nLastTime = nNow; | |
1209 | // -limitfreerelay unit is thousand-bytes-per-minute | |
1210 | // At default rate it would take over a month to fill 1GB | |
1211 | if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000) | |
1212 | return state.DoS(0, error("AcceptToMemoryPool: free transaction rejected by rate limiter"), | |
1213 | REJECT_INSUFFICIENTFEE, "rate limited free transaction"); | |
1214 | LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize); | |
1215 | dFreeCount += nSize; | |
1216 | } | |
1217 | ||
1218 | if (fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000) | |
1219 | return error("AcceptToMemoryPool: absurdly high fees %s, %d > %d", | |
1220 | hash.ToString(), | |
1221 | nFees, ::minRelayTxFee.GetFee(nSize) * 10000); | |
1222 | ||
1223 | // Check against previous transactions | |
1224 | // This is done last to help prevent CPU exhaustion denial-of-service attacks. | |
1225 | if (!ContextualCheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true, Params().GetConsensus())) | |
1226 | { | |
1227 | return error("AcceptToMemoryPool: ConnectInputs failed %s", hash.ToString()); | |
1228 | } | |
1229 | ||
1230 | // Check again against just the consensus-critical mandatory script | |
1231 | // verification flags, in case of bugs in the standard flags that cause | |
1232 | // transactions to pass as valid when they're actually invalid. For | |
1233 | // instance the STRICTENC flag was incorrectly allowing certain | |
1234 | // CHECKSIG NOT scripts to pass, even though they were invalid. | |
1235 | // | |
1236 | // There is a similar check in CreateNewBlock() to prevent creating | |
1237 | // invalid blocks, however allowing such transactions into the mempool | |
1238 | // can be exploited as a DoS attack. | |
1239 | if (!ContextualCheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, Params().GetConsensus())) | |
1240 | { | |
1241 | return error("AcceptToMemoryPool: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString()); | |
1242 | } | |
1243 | ||
1244 | // Store transaction in memory | |
1245 | pool.addUnchecked(hash, entry, !IsInitialBlockDownload()); | |
1246 | } | |
1247 | ||
1248 | SyncWithWallets(tx, NULL); | |
1249 | ||
1250 | return true; | |
1251 | } | |
1252 | ||
1253 | /** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */ | |
1254 | bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow) | |
1255 | { | |
1256 | CBlockIndex *pindexSlow = NULL; | |
1257 | ||
1258 | LOCK(cs_main); | |
1259 | ||
1260 | if (mempool.lookup(hash, txOut)) | |
1261 | { | |
1262 | return true; | |
1263 | } | |
1264 | ||
1265 | if (fTxIndex) { | |
1266 | CDiskTxPos postx; | |
1267 | if (pblocktree->ReadTxIndex(hash, postx)) { | |
1268 | CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); | |
1269 | if (file.IsNull()) | |
1270 | return error("%s: OpenBlockFile failed", __func__); | |
1271 | CBlockHeader header; | |
1272 | try { | |
1273 | file >> header; | |
1274 | fseek(file.Get(), postx.nTxOffset, SEEK_CUR); | |
1275 | file >> txOut; | |
1276 | } catch (const std::exception& e) { | |
1277 | return error("%s: Deserialize or I/O error - %s", __func__, e.what()); | |
1278 | } | |
1279 | hashBlock = header.GetHash(); | |
1280 | if (txOut.GetHash() != hash) | |
1281 | return error("%s: txid mismatch", __func__); | |
1282 | return true; | |
1283 | } | |
1284 | } | |
1285 | ||
1286 | if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it | |
1287 | int nHeight = -1; | |
1288 | { | |
1289 | CCoinsViewCache &view = *pcoinsTip; | |
1290 | const CCoins* coins = view.AccessCoins(hash); | |
1291 | if (coins) | |
1292 | nHeight = coins->nHeight; | |
1293 | } | |
1294 | if (nHeight > 0) | |
1295 | pindexSlow = chainActive[nHeight]; | |
1296 | } | |
1297 | ||
1298 | if (pindexSlow) { | |
1299 | CBlock block; | |
1300 | if (ReadBlockFromDisk(block, pindexSlow)) { | |
1301 | BOOST_FOREACH(const CTransaction &tx, block.vtx) { | |
1302 | if (tx.GetHash() == hash) { | |
1303 | txOut = tx; | |
1304 | hashBlock = pindexSlow->GetBlockHash(); | |
1305 | return true; | |
1306 | } | |
1307 | } | |
1308 | } | |
1309 | } | |
1310 | ||
1311 | return false; | |
1312 | } | |
1313 | ||
1314 | ||
1315 | ||
1316 | ||
1317 | ||
1318 | ||
1319 | ////////////////////////////////////////////////////////////////////////////// | |
1320 | // | |
1321 | // CBlock and CBlockIndex | |
1322 | // | |
1323 | ||
1324 | bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart) | |
1325 | { | |
1326 | // Open history file to append | |
1327 | CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); | |
1328 | if (fileout.IsNull()) | |
1329 | return error("WriteBlockToDisk: OpenBlockFile failed"); | |
1330 | ||
1331 | // Write index header | |
1332 | unsigned int nSize = fileout.GetSerializeSize(block); | |
1333 | fileout << FLATDATA(messageStart) << nSize; | |
1334 | ||
1335 | // Write block | |
1336 | long fileOutPos = ftell(fileout.Get()); | |
1337 | if (fileOutPos < 0) | |
1338 | return error("WriteBlockToDisk: ftell failed"); | |
1339 | pos.nPos = (unsigned int)fileOutPos; | |
1340 | fileout << block; | |
1341 | ||
1342 | return true; | |
1343 | } | |
1344 | ||
1345 | bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos) | |
1346 | { | |
1347 | block.SetNull(); | |
1348 | ||
1349 | // Open history file to read | |
1350 | CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); | |
1351 | if (filein.IsNull()) | |
1352 | return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); | |
1353 | ||
1354 | // Read block | |
1355 | try { | |
1356 | filein >> block; | |
1357 | } | |
1358 | catch (const std::exception& e) { | |
1359 | return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); | |
1360 | } | |
1361 | ||
1362 | // Check the header | |
1363 | if (!(CheckEquihashSolution(&block, Params()) && | |
1364 | CheckProofOfWork(block.GetHash(), block.nBits, Params().GetConsensus()))) | |
1365 | return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); | |
1366 | ||
1367 | return true; | |
1368 | } | |
1369 | ||
1370 | bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex) | |
1371 | { | |
1372 | if (!ReadBlockFromDisk(block, pindex->GetBlockPos())) | |
1373 | return false; | |
1374 | if (block.GetHash() != pindex->GetBlockHash()) | |
1375 | return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", | |
1376 | pindex->ToString(), pindex->GetBlockPos().ToString()); | |
1377 | return true; | |
1378 | } | |
1379 | ||
1380 | CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) | |
1381 | { | |
1382 | CAmount nSubsidy = 12.5 * COIN; | |
1383 | ||
1384 | // Mining slow start | |
1385 | // The subsidy is ramped up linearly, skipping the middle payout of | |
1386 | // MAX_SUBSIDY/2 to keep the monetary curve consistent with no slow start. | |
1387 | if (nHeight < consensusParams.nSubsidySlowStartInterval / 2) { | |
1388 | nSubsidy /= consensusParams.nSubsidySlowStartInterval; | |
1389 | nSubsidy *= nHeight; | |
1390 | return nSubsidy; | |
1391 | } else if (nHeight < consensusParams.nSubsidySlowStartInterval) { | |
1392 | nSubsidy /= consensusParams.nSubsidySlowStartInterval; | |
1393 | nSubsidy *= (nHeight+1); | |
1394 | return nSubsidy; | |
1395 | } | |
1396 | ||
1397 | assert(nHeight > consensusParams.SubsidySlowStartShift()); | |
1398 | int halvings = (nHeight - consensusParams.SubsidySlowStartShift()) / consensusParams.nSubsidyHalvingInterval; | |
1399 | // Force block reward to zero when right shift is undefined. | |
1400 | if (halvings >= 64) | |
1401 | return 0; | |
1402 | ||
1403 | // Subsidy is cut in half every 840,000 blocks which will occur approximately every 4 years. | |
1404 | nSubsidy >>= halvings; | |
1405 | return nSubsidy; | |
1406 | } | |
1407 | ||
1408 | bool IsInitialBlockDownload() | |
1409 | { | |
1410 | const CChainParams& chainParams = Params(); | |
1411 | LOCK(cs_main); | |
1412 | if (fImporting || fReindex) | |
1413 | return true; | |
1414 | if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints())) | |
1415 | return true; | |
1416 | static bool lockIBDState = false; | |
1417 | if (lockIBDState) | |
1418 | return false; | |
1419 | bool state = (chainActive.Height() < pindexBestHeader->nHeight - 24 * 6 || | |
1420 | pindexBestHeader->GetBlockTime() < GetTime() - chainParams.MaxTipAge()); | |
1421 | if (!state) | |
1422 | lockIBDState = true; | |
1423 | return state; | |
1424 | } | |
1425 | ||
1426 | bool fLargeWorkForkFound = false; | |
1427 | bool fLargeWorkInvalidChainFound = false; | |
1428 | CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL; | |
1429 | ||
1430 | void CheckForkWarningConditions() | |
1431 | { | |
1432 | AssertLockHeld(cs_main); | |
1433 | // Before we get past initial download, we cannot reliably alert about forks | |
1434 | // (we assume we don't get stuck on a fork before the last checkpoint) | |
1435 | if (IsInitialBlockDownload()) | |
1436 | return; | |
1437 | ||
1438 | // If our best fork is no longer within 288 blocks (+/- 12 hours if no one mines it) | |
1439 | // of our head, drop it | |
1440 | if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 288) | |
1441 | pindexBestForkTip = NULL; | |
1442 | ||
1443 | if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) | |
1444 | { | |
1445 | if (!fLargeWorkForkFound && pindexBestForkBase) | |
1446 | { | |
1447 | std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") + | |
1448 | pindexBestForkBase->phashBlock->ToString() + std::string("'"); | |
1449 | CAlert::Notify(warning, true); | |
1450 | } | |
1451 | if (pindexBestForkTip && pindexBestForkBase) | |
1452 | { | |
1453 | LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__, | |
1454 | pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(), | |
1455 | pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString()); | |
1456 | fLargeWorkForkFound = true; | |
1457 | } | |
1458 | else | |
1459 | { | |
1460 | std::string warning = std::string("Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely."); | |
1461 | LogPrintf("%s: %s\n", warning.c_str(), __func__); | |
1462 | CAlert::Notify(warning, true); | |
1463 | fLargeWorkInvalidChainFound = true; | |
1464 | } | |
1465 | } | |
1466 | else | |
1467 | { | |
1468 | fLargeWorkForkFound = false; | |
1469 | fLargeWorkInvalidChainFound = false; | |
1470 | } | |
1471 | } | |
1472 | ||
1473 | void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) | |
1474 | { | |
1475 | AssertLockHeld(cs_main); | |
1476 | // If we are on a fork that is sufficiently large, set a warning flag | |
1477 | CBlockIndex* pfork = pindexNewForkTip; | |
1478 | CBlockIndex* plonger = chainActive.Tip(); | |
1479 | while (pfork && pfork != plonger) | |
1480 | { | |
1481 | while (plonger && plonger->nHeight > pfork->nHeight) | |
1482 | plonger = plonger->pprev; | |
1483 | if (pfork == plonger) | |
1484 | break; | |
1485 | pfork = pfork->pprev; | |
1486 | } | |
1487 | ||
1488 | // We define a condition where we should warn the user about as a fork of at least 7 blocks | |
1489 | // with a tip within 72 blocks (+/- 3 hours if no one mines it) of ours | |
1490 | // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network | |
1491 | // hash rate operating on the fork. | |
1492 | // or a chain that is entirely longer than ours and invalid (note that this should be detected by both) | |
1493 | // We define it this way because it allows us to only store the highest fork tip (+ base) which meets | |
1494 | // the 7-block condition and from this always have the most-likely-to-cause-warning fork | |
1495 | if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) && | |
1496 | pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && | |
1497 | chainActive.Height() - pindexNewForkTip->nHeight < 72) | |
1498 | { | |
1499 | pindexBestForkTip = pindexNewForkTip; | |
1500 | pindexBestForkBase = pfork; | |
1501 | } | |
1502 | ||
1503 | CheckForkWarningConditions(); | |
1504 | } | |
1505 | ||
1506 | // Requires cs_main. | |
1507 | void Misbehaving(NodeId pnode, int howmuch) | |
1508 | { | |
1509 | if (howmuch == 0) | |
1510 | return; | |
1511 | ||
1512 | CNodeState *state = State(pnode); | |
1513 | if (state == NULL) | |
1514 | return; | |
1515 | ||
1516 | state->nMisbehavior += howmuch; | |
1517 | int banscore = GetArg("-banscore", 100); | |
1518 | if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) | |
1519 | { | |
1520 | LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior); | |
1521 | state->fShouldBan = true; | |
1522 | } else | |
1523 | LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior); | |
1524 | } | |
1525 | ||
1526 | void static InvalidChainFound(CBlockIndex* pindexNew) | |
1527 | { | |
1528 | if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) | |
1529 | pindexBestInvalid = pindexNew; | |
1530 | ||
1531 | LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, | |
1532 | pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, | |
1533 | log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", | |
1534 | pindexNew->GetBlockTime())); | |
1535 | CBlockIndex *tip = chainActive.Tip(); | |
1536 | assert (tip); | |
1537 | LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, | |
1538 | tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0), | |
1539 | DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime())); | |
1540 | CheckForkWarningConditions(); | |
1541 | } | |
1542 | ||
1543 | void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { | |
1544 | int nDoS = 0; | |
1545 | if (state.IsInvalid(nDoS)) { | |
1546 | std::map<uint256, NodeId>::iterator it = mapBlockSource.find(pindex->GetBlockHash()); | |
1547 | if (it != mapBlockSource.end() && State(it->second)) { | |
1548 | CBlockReject reject = {state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), pindex->GetBlockHash()}; | |
1549 | State(it->second)->rejects.push_back(reject); | |
1550 | if (nDoS > 0) | |
1551 | Misbehaving(it->second, nDoS); | |
1552 | } | |
1553 | } | |
1554 | if (!state.CorruptionPossible()) { | |
1555 | pindex->nStatus |= BLOCK_FAILED_VALID; | |
1556 | setDirtyBlockIndex.insert(pindex); | |
1557 | setBlockIndexCandidates.erase(pindex); | |
1558 | InvalidChainFound(pindex); | |
1559 | } | |
1560 | } | |
1561 | ||
1562 | void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, CTxUndo &txundo, int nHeight) | |
1563 | { | |
1564 | // mark inputs spent | |
1565 | if (!tx.IsCoinBase()) { | |
1566 | txundo.vprevout.reserve(tx.vin.size()); | |
1567 | BOOST_FOREACH(const CTxIn &txin, tx.vin) { | |
1568 | CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash); | |
1569 | unsigned nPos = txin.prevout.n; | |
1570 | ||
1571 | if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull()) | |
1572 | assert(false); | |
1573 | // mark an outpoint spent, and construct undo information | |
1574 | txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos])); | |
1575 | coins->Spend(nPos); | |
1576 | if (coins->vout.size() == 0) { | |
1577 | CTxInUndo& undo = txundo.vprevout.back(); | |
1578 | undo.nHeight = coins->nHeight; | |
1579 | undo.fCoinBase = coins->fCoinBase; | |
1580 | undo.nVersion = coins->nVersion; | |
1581 | } | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | // spend nullifiers | |
1586 | BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { | |
1587 | BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) { | |
1588 | inputs.SetNullifier(nf, true); | |
1589 | } | |
1590 | } | |
1591 | ||
1592 | // add outputs | |
1593 | inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); | |
1594 | } | |
1595 | ||
1596 | void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, int nHeight) | |
1597 | { | |
1598 | CTxUndo txundo; | |
1599 | UpdateCoins(tx, state, inputs, txundo, nHeight); | |
1600 | } | |
1601 | ||
1602 | bool CScriptCheck::operator()() { | |
1603 | const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; | |
1604 | if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, cacheStore), &error)) { | |
1605 | return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error)); | |
1606 | } | |
1607 | return true; | |
1608 | } | |
1609 | ||
1610 | int GetSpendHeight(const CCoinsViewCache& inputs) | |
1611 | { | |
1612 | LOCK(cs_main); | |
1613 | CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; | |
1614 | return pindexPrev->nHeight + 1; | |
1615 | } | |
1616 | ||
1617 | namespace Consensus { | |
1618 | bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, const Consensus::Params& consensusParams) | |
1619 | { | |
1620 | // This doesn't trigger the DoS code on purpose; if it did, it would make it easier | |
1621 | // for an attacker to attempt to split the network. | |
1622 | if (!inputs.HaveInputs(tx)) | |
1623 | return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString())); | |
1624 | ||
1625 | // are the JoinSplit's requirements met? | |
1626 | if (!inputs.HaveJoinSplitRequirements(tx)) | |
1627 | return state.Invalid(error("CheckInputs(): %s JoinSplit requirements not met", tx.GetHash().ToString())); | |
1628 | ||
1629 | CAmount nValueIn = 0; | |
1630 | CAmount nFees = 0; | |
1631 | for (unsigned int i = 0; i < tx.vin.size(); i++) | |
1632 | { | |
1633 | const COutPoint &prevout = tx.vin[i].prevout; | |
1634 | const CCoins *coins = inputs.AccessCoins(prevout.hash); | |
1635 | assert(coins); | |
1636 | ||
1637 | if (coins->IsCoinBase()) { | |
1638 | // Ensure that coinbases are matured | |
1639 | if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) { | |
1640 | return state.Invalid( | |
1641 | error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight), | |
1642 | REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); | |
1643 | } | |
1644 | ||
1645 | // Ensure that coinbases cannot be spent to transparent outputs | |
1646 | // Disabled on regtest | |
1647 | if (fCoinbaseEnforcedProtectionEnabled && | |
1648 | consensusParams.fCoinbaseMustBeProtected && | |
1649 | !tx.vout.empty()) { | |
1650 | return state.Invalid( | |
1651 | error("CheckInputs(): tried to spend coinbase with transparent outputs"), | |
1652 | REJECT_INVALID, "bad-txns-coinbase-spend-has-transparent-outputs"); | |
1653 | } | |
1654 | } | |
1655 | ||
1656 | // Check for negative or overflow input values | |
1657 | nValueIn += coins->vout[prevout.n].nValue; | |
1658 | if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn)) | |
1659 | return state.DoS(100, error("CheckInputs(): txin values out of range"), | |
1660 | REJECT_INVALID, "bad-txns-inputvalues-outofrange"); | |
1661 | ||
1662 | } | |
1663 | ||
1664 | nValueIn += tx.GetJoinSplitValueIn(); | |
1665 | if (!MoneyRange(nValueIn)) | |
1666 | return state.DoS(100, error("CheckInputs(): vpub_old values out of range"), | |
1667 | REJECT_INVALID, "bad-txns-inputvalues-outofrange"); | |
1668 | ||
1669 | if (nValueIn < tx.GetValueOut()) | |
1670 | return state.DoS(100, error("CheckInputs(): %s value in (%s) < value out (%s)", | |
1671 | tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())), | |
1672 | REJECT_INVALID, "bad-txns-in-belowout"); | |
1673 | ||
1674 | // Tally transaction fees | |
1675 | CAmount nTxFee = nValueIn - tx.GetValueOut(); | |
1676 | if (nTxFee < 0) | |
1677 | return state.DoS(100, error("CheckInputs(): %s nTxFee < 0", tx.GetHash().ToString()), | |
1678 | REJECT_INVALID, "bad-txns-fee-negative"); | |
1679 | nFees += nTxFee; | |
1680 | if (!MoneyRange(nFees)) | |
1681 | return state.DoS(100, error("CheckInputs(): nFees out of range"), | |
1682 | REJECT_INVALID, "bad-txns-fee-outofrange"); | |
1683 | return true; | |
1684 | } | |
1685 | }// namespace Consensus | |
1686 | ||
1687 | bool ContextualCheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, const Consensus::Params& consensusParams, std::vector<CScriptCheck> *pvChecks) | |
1688 | { | |
1689 | if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs), consensusParams)) | |
1690 | return false; | |
1691 | ||
1692 | if (!tx.IsCoinBase()) | |
1693 | { | |
1694 | if (pvChecks) | |
1695 | pvChecks->reserve(tx.vin.size()); | |
1696 | ||
1697 | // The first loop above does all the inexpensive checks. | |
1698 | // Only if ALL inputs pass do we perform expensive ECDSA signature checks. | |
1699 | // Helps prevent CPU exhaustion attacks. | |
1700 | ||
1701 | // Skip ECDSA signature verification when connecting blocks | |
1702 | // before the last block chain checkpoint. This is safe because block merkle hashes are | |
1703 | // still computed and checked, and any change will be caught at the next checkpoint. | |
1704 | if (fScriptChecks) { | |
1705 | for (unsigned int i = 0; i < tx.vin.size(); i++) { | |
1706 | const COutPoint &prevout = tx.vin[i].prevout; | |
1707 | const CCoins* coins = inputs.AccessCoins(prevout.hash); | |
1708 | assert(coins); | |
1709 | ||
1710 | // Verify signature | |
1711 | CScriptCheck check(*coins, tx, i, flags, cacheStore); | |
1712 | if (pvChecks) { | |
1713 | pvChecks->push_back(CScriptCheck()); | |
1714 | check.swap(pvChecks->back()); | |
1715 | } else if (!check()) { | |
1716 | if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { | |
1717 | // Check whether the failure was caused by a | |
1718 | // non-mandatory script verification check, such as | |
1719 | // non-standard DER encodings or non-null dummy | |
1720 | // arguments; if so, don't trigger DoS protection to | |
1721 | // avoid splitting the network between upgraded and | |
1722 | // non-upgraded nodes. | |
1723 | CScriptCheck check(*coins, tx, i, | |
1724 | flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore); | |
1725 | if (check()) | |
1726 | return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); | |
1727 | } | |
1728 | // Failures of other flags indicate a transaction that is | |
1729 | // invalid in new blocks, e.g. a invalid P2SH. We DoS ban | |
1730 | // such nodes as they are not following the protocol. That | |
1731 | // said during an upgrade careful thought should be taken | |
1732 | // as to the correct behavior - we may want to continue | |
1733 | // peering with non-upgraded nodes even after a soft-fork | |
1734 | // super-majority vote has passed. | |
1735 | return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); | |
1736 | } | |
1737 | } | |
1738 | } | |
1739 | } | |
1740 | ||
1741 | return true; | |
1742 | } | |
1743 | ||
1744 | namespace { | |
1745 | ||
1746 | bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) | |
1747 | { | |
1748 | // Open history file to append | |
1749 | CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); | |
1750 | if (fileout.IsNull()) | |
1751 | return error("%s: OpenUndoFile failed", __func__); | |
1752 | ||
1753 | // Write index header | |
1754 | unsigned int nSize = fileout.GetSerializeSize(blockundo); | |
1755 | fileout << FLATDATA(messageStart) << nSize; | |
1756 | ||
1757 | // Write undo data | |
1758 | long fileOutPos = ftell(fileout.Get()); | |
1759 | if (fileOutPos < 0) | |
1760 | return error("%s: ftell failed", __func__); | |
1761 | pos.nPos = (unsigned int)fileOutPos; | |
1762 | fileout << blockundo; | |
1763 | ||
1764 | // calculate & write checksum | |
1765 | CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); | |
1766 | hasher << hashBlock; | |
1767 | hasher << blockundo; | |
1768 | fileout << hasher.GetHash(); | |
1769 | ||
1770 | return true; | |
1771 | } | |
1772 | ||
1773 | bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock) | |
1774 | { | |
1775 | // Open history file to read | |
1776 | CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); | |
1777 | if (filein.IsNull()) | |
1778 | return error("%s: OpenBlockFile failed", __func__); | |
1779 | ||
1780 | // Read block | |
1781 | uint256 hashChecksum; | |
1782 | try { | |
1783 | filein >> blockundo; | |
1784 | filein >> hashChecksum; | |
1785 | } | |
1786 | catch (const std::exception& e) { | |
1787 | return error("%s: Deserialize or I/O error - %s", __func__, e.what()); | |
1788 | } | |
1789 | ||
1790 | // Verify checksum | |
1791 | CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); | |
1792 | hasher << hashBlock; | |
1793 | hasher << blockundo; | |
1794 | if (hashChecksum != hasher.GetHash()) | |
1795 | return error("%s: Checksum mismatch", __func__); | |
1796 | ||
1797 | return true; | |
1798 | } | |
1799 | ||
1800 | /** Abort with a message */ | |
1801 | bool AbortNode(const std::string& strMessage, const std::string& userMessage="") | |
1802 | { | |
1803 | strMiscWarning = strMessage; | |
1804 | LogPrintf("*** %s\n", strMessage); | |
1805 | uiInterface.ThreadSafeMessageBox( | |
1806 | userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage, | |
1807 | "", CClientUIInterface::MSG_ERROR); | |
1808 | StartShutdown(); | |
1809 | return false; | |
1810 | } | |
1811 | ||
1812 | bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="") | |
1813 | { | |
1814 | AbortNode(strMessage, userMessage); | |
1815 | return state.Error(strMessage); | |
1816 | } | |
1817 | ||
1818 | } // anon namespace | |
1819 | ||
1820 | /** | |
1821 | * Apply the undo operation of a CTxInUndo to the given chain state. | |
1822 | * @param undo The undo object. | |
1823 | * @param view The coins view to which to apply the changes. | |
1824 | * @param out The out point that corresponds to the tx input. | |
1825 | * @return True on success. | |
1826 | */ | |
1827 | static bool ApplyTxInUndo(const CTxInUndo& undo, CCoinsViewCache& view, const COutPoint& out) | |
1828 | { | |
1829 | bool fClean = true; | |
1830 | ||
1831 | CCoinsModifier coins = view.ModifyCoins(out.hash); | |
1832 | if (undo.nHeight != 0) { | |
1833 | // undo data contains height: this is the last output of the prevout tx being spent | |
1834 | if (!coins->IsPruned()) | |
1835 | fClean = fClean && error("%s: undo data overwriting existing transaction", __func__); | |
1836 | coins->Clear(); | |
1837 | coins->fCoinBase = undo.fCoinBase; | |
1838 | coins->nHeight = undo.nHeight; | |
1839 | coins->nVersion = undo.nVersion; | |
1840 | } else { | |
1841 | if (coins->IsPruned()) | |
1842 | fClean = fClean && error("%s: undo data adding output to missing transaction", __func__); | |
1843 | } | |
1844 | if (coins->IsAvailable(out.n)) | |
1845 | fClean = fClean && error("%s: undo data overwriting existing output", __func__); | |
1846 | if (coins->vout.size() < out.n+1) | |
1847 | coins->vout.resize(out.n+1); | |
1848 | coins->vout[out.n] = undo.txout; | |
1849 | ||
1850 | return fClean; | |
1851 | } | |
1852 | ||
1853 | bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean) | |
1854 | { | |
1855 | assert(pindex->GetBlockHash() == view.GetBestBlock()); | |
1856 | ||
1857 | if (pfClean) | |
1858 | *pfClean = false; | |
1859 | ||
1860 | bool fClean = true; | |
1861 | ||
1862 | CBlockUndo blockUndo; | |
1863 | CDiskBlockPos pos = pindex->GetUndoPos(); | |
1864 | if (pos.IsNull()) | |
1865 | return error("DisconnectBlock(): no undo data available"); | |
1866 | if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) | |
1867 | return error("DisconnectBlock(): failure reading undo data"); | |
1868 | ||
1869 | if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) | |
1870 | return error("DisconnectBlock(): block and undo data inconsistent"); | |
1871 | ||
1872 | // undo transactions in reverse order | |
1873 | for (int i = block.vtx.size() - 1; i >= 0; i--) { | |
1874 | const CTransaction &tx = block.vtx[i]; | |
1875 | uint256 hash = tx.GetHash(); | |
1876 | ||
1877 | // Check that all outputs are available and match the outputs in the block itself | |
1878 | // exactly. | |
1879 | { | |
1880 | CCoinsModifier outs = view.ModifyCoins(hash); | |
1881 | outs->ClearUnspendable(); | |
1882 | ||
1883 | CCoins outsBlock(tx, pindex->nHeight); | |
1884 | // The CCoins serialization does not serialize negative numbers. | |
1885 | // No network rules currently depend on the version here, so an inconsistency is harmless | |
1886 | // but it must be corrected before txout nversion ever influences a network rule. | |
1887 | if (outsBlock.nVersion < 0) | |
1888 | outs->nVersion = outsBlock.nVersion; | |
1889 | if (*outs != outsBlock) | |
1890 | fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted"); | |
1891 | ||
1892 | // remove outputs | |
1893 | outs->Clear(); | |
1894 | } | |
1895 | ||
1896 | // unspend nullifiers | |
1897 | BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { | |
1898 | BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) { | |
1899 | view.SetNullifier(nf, false); | |
1900 | } | |
1901 | } | |
1902 | ||
1903 | // restore inputs | |
1904 | if (i > 0) { // not coinbases | |
1905 | const CTxUndo &txundo = blockUndo.vtxundo[i-1]; | |
1906 | if (txundo.vprevout.size() != tx.vin.size()) | |
1907 | return error("DisconnectBlock(): transaction and undo data inconsistent"); | |
1908 | for (unsigned int j = tx.vin.size(); j-- > 0;) { | |
1909 | const COutPoint &out = tx.vin[j].prevout; | |
1910 | const CTxInUndo &undo = txundo.vprevout[j]; | |
1911 | if (!ApplyTxInUndo(undo, view, out)) | |
1912 | fClean = false; | |
1913 | } | |
1914 | } | |
1915 | } | |
1916 | ||
1917 | // set the old best anchor back | |
1918 | view.PopAnchor(blockUndo.old_tree_root); | |
1919 | ||
1920 | // move best block pointer to prevout block | |
1921 | view.SetBestBlock(pindex->pprev->GetBlockHash()); | |
1922 | ||
1923 | if (pfClean) { | |
1924 | *pfClean = fClean; | |
1925 | return true; | |
1926 | } | |
1927 | ||
1928 | return fClean; | |
1929 | } | |
1930 | ||
1931 | void static FlushBlockFile(bool fFinalize = false) | |
1932 | { | |
1933 | LOCK(cs_LastBlockFile); | |
1934 | ||
1935 | CDiskBlockPos posOld(nLastBlockFile, 0); | |
1936 | ||
1937 | FILE *fileOld = OpenBlockFile(posOld); | |
1938 | if (fileOld) { | |
1939 | if (fFinalize) | |
1940 | TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); | |
1941 | FileCommit(fileOld); | |
1942 | fclose(fileOld); | |
1943 | } | |
1944 | ||
1945 | fileOld = OpenUndoFile(posOld); | |
1946 | if (fileOld) { | |
1947 | if (fFinalize) | |
1948 | TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); | |
1949 | FileCommit(fileOld); | |
1950 | fclose(fileOld); | |
1951 | } | |
1952 | } | |
1953 | ||
1954 | bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); | |
1955 | ||
1956 | static CCheckQueue<CScriptCheck> scriptcheckqueue(128); | |
1957 | ||
1958 | void ThreadScriptCheck() { | |
1959 | RenameThread("zcash-scriptch"); | |
1960 | scriptcheckqueue.Thread(); | |
1961 | } | |
1962 | ||
1963 | // | |
1964 | // Called periodically asynchronously; alerts if it smells like | |
1965 | // we're being fed a bad chain (blocks being generated much | |
1966 | // too slowly or too quickly). | |
1967 | // | |
1968 | void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CBlockIndex *const &bestHeader, | |
1969 | int64_t nPowTargetSpacing) | |
1970 | { | |
1971 | if (bestHeader == NULL || initialDownloadCheck()) return; | |
1972 | ||
1973 | static int64_t lastAlertTime = 0; | |
1974 | int64_t now = GetAdjustedTime(); | |
1975 | if (lastAlertTime > now-60*60*24) return; // Alert at most once per day | |
1976 | ||
1977 | const int SPAN_HOURS=4; | |
1978 | const int SPAN_SECONDS=SPAN_HOURS*60*60; | |
1979 | int BLOCKS_EXPECTED = SPAN_SECONDS / nPowTargetSpacing; | |
1980 | ||
1981 | boost::math::poisson_distribution<double> poisson(BLOCKS_EXPECTED); | |
1982 | ||
1983 | std::string strWarning; | |
1984 | int64_t startTime = GetAdjustedTime()-SPAN_SECONDS; | |
1985 | ||
1986 | LOCK(cs); | |
1987 | const CBlockIndex* i = bestHeader; | |
1988 | int nBlocks = 0; | |
1989 | while (i->GetBlockTime() >= startTime) { | |
1990 | ++nBlocks; | |
1991 | i = i->pprev; | |
1992 | if (i == NULL) return; // Ran out of chain, we must not be fully sync'ed | |
1993 | } | |
1994 | ||
1995 | // How likely is it to find that many by chance? | |
1996 | double p = boost::math::pdf(poisson, nBlocks); | |
1997 | ||
1998 | LogPrint("partitioncheck", "%s : Found %d blocks in the last %d hours\n", __func__, nBlocks, SPAN_HOURS); | |
1999 | LogPrint("partitioncheck", "%s : likelihood: %g\n", __func__, p); | |
2000 | ||
2001 | // Aim for one false-positive about every fifty years of normal running: | |
2002 | const int FIFTY_YEARS = 50*365*24*60*60; | |
2003 | double alertThreshold = 1.0 / (FIFTY_YEARS / SPAN_SECONDS); | |
2004 | ||
2005 | if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED) | |
2006 | { | |
2007 | // Many fewer blocks than expected: alert! | |
2008 | strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"), | |
2009 | nBlocks, SPAN_HOURS, BLOCKS_EXPECTED); | |
2010 | } | |
2011 | else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED) | |
2012 | { | |
2013 | // Many more blocks than expected: alert! | |
2014 | strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"), | |
2015 | nBlocks, SPAN_HOURS, BLOCKS_EXPECTED); | |
2016 | } | |
2017 | if (!strWarning.empty()) | |
2018 | { | |
2019 | strMiscWarning = strWarning; | |
2020 | CAlert::Notify(strWarning, true); | |
2021 | lastAlertTime = now; | |
2022 | } | |
2023 | } | |
2024 | ||
2025 | static int64_t nTimeVerify = 0; | |
2026 | static int64_t nTimeConnect = 0; | |
2027 | static int64_t nTimeIndex = 0; | |
2028 | static int64_t nTimeCallbacks = 0; | |
2029 | static int64_t nTimeTotal = 0; | |
2030 | ||
2031 | bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck) | |
2032 | { | |
2033 | const CChainParams& chainparams = Params(); | |
2034 | AssertLockHeld(cs_main); | |
2035 | ||
2036 | bool fExpensiveChecks = true; | |
2037 | if (fCheckpointsEnabled) { | |
2038 | CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints()); | |
2039 | if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->nHeight) == pindex) { | |
2040 | // This block is an ancestor of a checkpoint: disable script checks | |
2041 | fExpensiveChecks = false; | |
2042 | } | |
2043 | } | |
2044 | ||
2045 | auto verifier = libzcash::ProofVerifier::Strict(); | |
2046 | auto disabledVerifier = libzcash::ProofVerifier::Disabled(); | |
2047 | ||
2048 | // Check it again to verify JoinSplit proofs, and in case a previous version let a bad block in | |
2049 | if (!CheckBlock(block, state, fExpensiveChecks ? verifier : disabledVerifier, !fJustCheck, !fJustCheck)) | |
2050 | return false; | |
2051 | ||
2052 | // verify that the view's current state corresponds to the previous block | |
2053 | uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash(); | |
2054 | assert(hashPrevBlock == view.GetBestBlock()); | |
2055 | ||
2056 | // Special case for the genesis block, skipping connection of its transactions | |
2057 | // (its coinbase is unspendable) | |
2058 | if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) { | |
2059 | if (!fJustCheck) { | |
2060 | view.SetBestBlock(pindex->GetBlockHash()); | |
2061 | // Before the genesis block, there was an empty tree | |
2062 | ZCIncrementalMerkleTree tree; | |
2063 | pindex->hashAnchor = tree.root(); | |
2064 | } | |
2065 | return true; | |
2066 | } | |
2067 | ||
2068 | // Do not allow blocks that contain transactions which 'overwrite' older transactions, | |
2069 | // unless those are already completely spent. | |
2070 | BOOST_FOREACH(const CTransaction& tx, block.vtx) { | |
2071 | const CCoins* coins = view.AccessCoins(tx.GetHash()); | |
2072 | if (coins && !coins->IsPruned()) | |
2073 | return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"), | |
2074 | REJECT_INVALID, "bad-txns-BIP30"); | |
2075 | } | |
2076 | ||
2077 | unsigned int flags = SCRIPT_VERIFY_P2SH; | |
2078 | ||
2079 | // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks, | |
2080 | // when 75% of the network has upgraded: | |
2081 | if (block.nVersion >= 3) { | |
2082 | flags |= SCRIPT_VERIFY_DERSIG; | |
2083 | } | |
2084 | ||
2085 | // Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4 | |
2086 | // blocks, when 75% of the network has upgraded: | |
2087 | if (block.nVersion >= 4) { | |
2088 | flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; | |
2089 | } | |
2090 | ||
2091 | CBlockUndo blockundo; | |
2092 | ||
2093 | CCheckQueueControl<CScriptCheck> control(fExpensiveChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); | |
2094 | ||
2095 | int64_t nTimeStart = GetTimeMicros(); | |
2096 | CAmount nFees = 0; | |
2097 | int nInputs = 0; | |
2098 | unsigned int nSigOps = 0; | |
2099 | CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); | |
2100 | std::vector<std::pair<uint256, CDiskTxPos> > vPos; | |
2101 | vPos.reserve(block.vtx.size()); | |
2102 | blockundo.vtxundo.reserve(block.vtx.size() - 1); | |
2103 | ||
2104 | // Construct the incremental merkle tree at the current | |
2105 | // block position, | |
2106 | auto old_tree_root = view.GetBestAnchor(); | |
2107 | // saving the top anchor in the block index as we go. | |
2108 | if (!fJustCheck) { | |
2109 | pindex->hashAnchor = old_tree_root; | |
2110 | } | |
2111 | ZCIncrementalMerkleTree tree; | |
2112 | // This should never fail: we should always be able to get the root | |
2113 | // that is on the tip of our chain | |
2114 | assert(view.GetAnchorAt(old_tree_root, tree)); | |
2115 | ||
2116 | { | |
2117 | // Consistency check: the root of the tree we're given should | |
2118 | // match what we asked for. | |
2119 | assert(tree.root() == old_tree_root); | |
2120 | } | |
2121 | ||
2122 | for (unsigned int i = 0; i < block.vtx.size(); i++) | |
2123 | { | |
2124 | const CTransaction &tx = block.vtx[i]; | |
2125 | ||
2126 | nInputs += tx.vin.size(); | |
2127 | nSigOps += GetLegacySigOpCount(tx); | |
2128 | if (nSigOps > MAX_BLOCK_SIGOPS) | |
2129 | return state.DoS(100, error("ConnectBlock(): too many sigops"), | |
2130 | REJECT_INVALID, "bad-blk-sigops"); | |
2131 | ||
2132 | if (!tx.IsCoinBase()) | |
2133 | { | |
2134 | if (!view.HaveInputs(tx)) | |
2135 | return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), | |
2136 | REJECT_INVALID, "bad-txns-inputs-missingorspent"); | |
2137 | ||
2138 | // are the JoinSplit's requirements met? | |
2139 | if (!view.HaveJoinSplitRequirements(tx)) | |
2140 | return state.DoS(100, error("ConnectBlock(): JoinSplit requirements not met"), | |
2141 | REJECT_INVALID, "bad-txns-joinsplit-requirements-not-met"); | |
2142 | ||
2143 | // Add in sigops done by pay-to-script-hash inputs; | |
2144 | // this is to prevent a "rogue miner" from creating | |
2145 | // an incredibly-expensive-to-validate block. | |
2146 | nSigOps += GetP2SHSigOpCount(tx, view); | |
2147 | if (nSigOps > MAX_BLOCK_SIGOPS) | |
2148 | return state.DoS(100, error("ConnectBlock(): too many sigops"), | |
2149 | REJECT_INVALID, "bad-blk-sigops"); | |
2150 | ||
2151 | nFees += view.GetValueIn(tx)-tx.GetValueOut(); | |
2152 | ||
2153 | std::vector<CScriptCheck> vChecks; | |
2154 | if (!ContextualCheckInputs(tx, state, view, fExpensiveChecks, flags, false, chainparams.GetConsensus(), nScriptCheckThreads ? &vChecks : NULL)) | |
2155 | return false; | |
2156 | control.Add(vChecks); | |
2157 | } | |
2158 | ||
2159 | CTxUndo undoDummy; | |
2160 | if (i > 0) { | |
2161 | blockundo.vtxundo.push_back(CTxUndo()); | |
2162 | } | |
2163 | UpdateCoins(tx, state, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight); | |
2164 | ||
2165 | BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { | |
2166 | BOOST_FOREACH(const uint256 ¬e_commitment, joinsplit.commitments) { | |
2167 | // Insert the note commitments into our temporary tree. | |
2168 | ||
2169 | tree.append(note_commitment); | |
2170 | } | |
2171 | } | |
2172 | ||
2173 | vPos.push_back(std::make_pair(tx.GetHash(), pos)); | |
2174 | pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); | |
2175 | } | |
2176 | ||
2177 | view.PushAnchor(tree); | |
2178 | blockundo.old_tree_root = old_tree_root; | |
2179 | ||
2180 | int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart; | |
2181 | LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001); | |
2182 | ||
2183 | CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); | |
2184 | if (block.vtx[0].GetValueOut() > blockReward) | |
2185 | return state.DoS(100, | |
2186 | error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", | |
2187 | block.vtx[0].GetValueOut(), blockReward), | |
2188 | REJECT_INVALID, "bad-cb-amount"); | |
2189 | ||
2190 | if (!control.Wait()) | |
2191 | return state.DoS(100, false); | |
2192 | int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart; | |
2193 | LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs-1), nTimeVerify * 0.000001); | |
2194 | ||
2195 | if (fJustCheck) | |
2196 | return true; | |
2197 | ||
2198 | // Write undo information to disk | |
2199 | if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS)) | |
2200 | { | |
2201 | if (pindex->GetUndoPos().IsNull()) { | |
2202 | CDiskBlockPos pos; | |
2203 | if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) | |
2204 | return error("ConnectBlock(): FindUndoPos failed"); | |
2205 | if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) | |
2206 | return AbortNode(state, "Failed to write undo data"); | |
2207 | ||
2208 | // update nUndoPos in block index | |
2209 | pindex->nUndoPos = pos.nPos; | |
2210 | pindex->nStatus |= BLOCK_HAVE_UNDO; | |
2211 | } | |
2212 | ||
2213 | pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); | |
2214 | setDirtyBlockIndex.insert(pindex); | |
2215 | } | |
2216 | ||
2217 | if (fTxIndex) | |
2218 | if (!pblocktree->WriteTxIndex(vPos)) | |
2219 | return AbortNode(state, "Failed to write transaction index"); | |
2220 | ||
2221 | // add this block to the view's block chain | |
2222 | view.SetBestBlock(pindex->GetBlockHash()); | |
2223 | ||
2224 | int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2; | |
2225 | LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001); | |
2226 | ||
2227 | // Watch for changes to the previous coinbase transaction. | |
2228 | static uint256 hashPrevBestCoinBase; | |
2229 | GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase); | |
2230 | hashPrevBestCoinBase = block.vtx[0].GetHash(); | |
2231 | ||
2232 | int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3; | |
2233 | LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001); | |
2234 | ||
2235 | return true; | |
2236 | } | |
2237 | ||
2238 | enum FlushStateMode { | |
2239 | FLUSH_STATE_NONE, | |
2240 | FLUSH_STATE_IF_NEEDED, | |
2241 | FLUSH_STATE_PERIODIC, | |
2242 | FLUSH_STATE_ALWAYS | |
2243 | }; | |
2244 | ||
2245 | /** | |
2246 | * Update the on-disk chain state. | |
2247 | * The caches and indexes are flushed depending on the mode we're called with | |
2248 | * if they're too large, if it's been a while since the last write, | |
2249 | * or always and in all cases if we're in prune mode and are deleting files. | |
2250 | */ | |
2251 | bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { | |
2252 | LOCK2(cs_main, cs_LastBlockFile); | |
2253 | static int64_t nLastWrite = 0; | |
2254 | static int64_t nLastFlush = 0; | |
2255 | static int64_t nLastSetChain = 0; | |
2256 | std::set<int> setFilesToPrune; | |
2257 | bool fFlushForPrune = false; | |
2258 | try { | |
2259 | if (fPruneMode && fCheckForPruning && !fReindex) { | |
2260 | FindFilesToPrune(setFilesToPrune); | |
2261 | fCheckForPruning = false; | |
2262 | if (!setFilesToPrune.empty()) { | |
2263 | fFlushForPrune = true; | |
2264 | if (!fHavePruned) { | |
2265 | pblocktree->WriteFlag("prunedblockfiles", true); | |
2266 | fHavePruned = true; | |
2267 | } | |
2268 | } | |
2269 | } | |
2270 | int64_t nNow = GetTimeMicros(); | |
2271 | // Avoid writing/flushing immediately after startup. | |
2272 | if (nLastWrite == 0) { | |
2273 | nLastWrite = nNow; | |
2274 | } | |
2275 | if (nLastFlush == 0) { | |
2276 | nLastFlush = nNow; | |
2277 | } | |
2278 | if (nLastSetChain == 0) { | |
2279 | nLastSetChain = nNow; | |
2280 | } | |
2281 | size_t cacheSize = pcoinsTip->DynamicMemoryUsage(); | |
2282 | // The cache is large and close to the limit, but we have time now (not in the middle of a block processing). | |
2283 | bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage; | |
2284 | // The cache is over the limit, we have to write now. | |
2285 | bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage; | |
2286 | // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. | |
2287 | bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; | |
2288 | // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. | |
2289 | bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; | |
2290 | // Combine all conditions that result in a full cache flush. | |
2291 | bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; | |
2292 | // Write blocks and block index to disk. | |
2293 | if (fDoFullFlush || fPeriodicWrite) { | |
2294 | // Depend on nMinDiskSpace to ensure we can write block index | |
2295 | if (!CheckDiskSpace(0)) | |
2296 | return state.Error("out of disk space"); | |
2297 | // First make sure all block and undo data is flushed to disk. | |
2298 | FlushBlockFile(); | |
2299 | // Then update all block file information (which may refer to block and undo files). | |
2300 | { | |
2301 | std::vector<std::pair<int, const CBlockFileInfo*> > vFiles; | |
2302 | vFiles.reserve(setDirtyFileInfo.size()); | |
2303 | for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { | |
2304 | vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it])); | |
2305 | setDirtyFileInfo.erase(it++); | |
2306 | } | |
2307 | std::vector<const CBlockIndex*> vBlocks; | |
2308 | vBlocks.reserve(setDirtyBlockIndex.size()); | |
2309 | for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { | |
2310 | vBlocks.push_back(*it); | |
2311 | setDirtyBlockIndex.erase(it++); | |
2312 | } | |
2313 | if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { | |
2314 | return AbortNode(state, "Files to write to block index database"); | |
2315 | } | |
2316 | } | |
2317 | // Finally remove any pruned files | |
2318 | if (fFlushForPrune) | |
2319 | UnlinkPrunedFiles(setFilesToPrune); | |
2320 | nLastWrite = nNow; | |
2321 | } | |
2322 | // Flush best chain related state. This can only be done if the blocks / block index write was also done. | |
2323 | if (fDoFullFlush) { | |
2324 | // Typical CCoins structures on disk are around 128 bytes in size. | |
2325 | // Pushing a new one to the database can cause it to be written | |
2326 | // twice (once in the log, and once in the tables). This is already | |
2327 | // an overestimation, as most will delete an existing entry or | |
2328 | // overwrite one. Still, use a conservative safety factor of 2. | |
2329 | if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize())) | |
2330 | return state.Error("out of disk space"); | |
2331 | // Flush the chainstate (which may refer to block index entries). | |
2332 | if (!pcoinsTip->Flush()) | |
2333 | return AbortNode(state, "Failed to write to coin database"); | |
2334 | nLastFlush = nNow; | |
2335 | } | |
2336 | if ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000) { | |
2337 | // Update best block in wallet (so we can detect restored wallets). | |
2338 | GetMainSignals().SetBestChain(chainActive.GetLocator()); | |
2339 | nLastSetChain = nNow; | |
2340 | } | |
2341 | } catch (const std::runtime_error& e) { | |
2342 | return AbortNode(state, std::string("System error while flushing: ") + e.what()); | |
2343 | } | |
2344 | return true; | |
2345 | } | |
2346 | ||
2347 | void FlushStateToDisk() { | |
2348 | CValidationState state; | |
2349 | FlushStateToDisk(state, FLUSH_STATE_ALWAYS); | |
2350 | } | |
2351 | ||
2352 | void PruneAndFlush() { | |
2353 | CValidationState state; | |
2354 | fCheckForPruning = true; | |
2355 | FlushStateToDisk(state, FLUSH_STATE_NONE); | |
2356 | } | |
2357 | ||
2358 | /** Update chainActive and related internal data structures. */ | |
2359 | void static UpdateTip(CBlockIndex *pindexNew) { | |
2360 | const CChainParams& chainParams = Params(); | |
2361 | chainActive.SetTip(pindexNew); | |
2362 | ||
2363 | // New best block | |
2364 | nTimeBestReceived = GetTime(); | |
2365 | mempool.AddTransactionsUpdated(1); | |
2366 | ||
2367 | LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__, | |
2368 | chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx, | |
2369 | DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), | |
2370 | Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); | |
2371 | ||
2372 | cvBlockChange.notify_all(); | |
2373 | ||
2374 | // Check the version of the last 100 blocks to see if we need to upgrade: | |
2375 | static bool fWarned = false; | |
2376 | if (!IsInitialBlockDownload() && !fWarned) | |
2377 | { | |
2378 | int nUpgraded = 0; | |
2379 | const CBlockIndex* pindex = chainActive.Tip(); | |
2380 | for (int i = 0; i < 100 && pindex != NULL; i++) | |
2381 | { | |
2382 | if (pindex->nVersion > CBlock::CURRENT_VERSION) | |
2383 | ++nUpgraded; | |
2384 | pindex = pindex->pprev; | |
2385 | } | |
2386 | if (nUpgraded > 0) | |
2387 | LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__, nUpgraded, (int)CBlock::CURRENT_VERSION); | |
2388 | if (nUpgraded > 100/2) | |
2389 | { | |
2390 | // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user: | |
2391 | strMiscWarning = _("Warning: This version is obsolete; upgrade required!"); | |
2392 | CAlert::Notify(strMiscWarning, true); | |
2393 | fWarned = true; | |
2394 | } | |
2395 | } | |
2396 | } | |
2397 | ||
2398 | /** Disconnect chainActive's tip. */ | |
2399 | bool static DisconnectTip(CValidationState &state) { | |
2400 | CBlockIndex *pindexDelete = chainActive.Tip(); | |
2401 | assert(pindexDelete); | |
2402 | mempool.check(pcoinsTip); | |
2403 | // Read block from disk. | |
2404 | CBlock block; | |
2405 | if (!ReadBlockFromDisk(block, pindexDelete)) | |
2406 | return AbortNode(state, "Failed to read block"); | |
2407 | // Apply the block atomically to the chain state. | |
2408 | uint256 anchorBeforeDisconnect = pcoinsTip->GetBestAnchor(); | |
2409 | int64_t nStart = GetTimeMicros(); | |
2410 | { | |
2411 | CCoinsViewCache view(pcoinsTip); | |
2412 | if (!DisconnectBlock(block, state, pindexDelete, view)) | |
2413 | return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); | |
2414 | assert(view.Flush()); | |
2415 | } | |
2416 | LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); | |
2417 | uint256 anchorAfterDisconnect = pcoinsTip->GetBestAnchor(); | |
2418 | // Write the chain state to disk, if necessary. | |
2419 | if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) | |
2420 | return false; | |
2421 | // Resurrect mempool transactions from the disconnected block. | |
2422 | BOOST_FOREACH(const CTransaction &tx, block.vtx) { | |
2423 | // ignore validation errors in resurrected transactions | |
2424 | list<CTransaction> removed; | |
2425 | CValidationState stateDummy; | |
2426 | if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL)) | |
2427 | mempool.remove(tx, removed, true); | |
2428 | } | |
2429 | if (anchorBeforeDisconnect != anchorAfterDisconnect) { | |
2430 | // The anchor may not change between block disconnects, | |
2431 | // in which case we don't want to evict from the mempool yet! | |
2432 | mempool.removeWithAnchor(anchorBeforeDisconnect); | |
2433 | } | |
2434 | mempool.removeCoinbaseSpends(pcoinsTip, pindexDelete->nHeight); | |
2435 | mempool.check(pcoinsTip); | |
2436 | // Update chainActive and related variables. | |
2437 | UpdateTip(pindexDelete->pprev); | |
2438 | // Get the current commitment tree | |
2439 | ZCIncrementalMerkleTree newTree; | |
2440 | assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), newTree)); | |
2441 | // Let wallets know transactions went from 1-confirmed to | |
2442 | // 0-confirmed or conflicted: | |
2443 | BOOST_FOREACH(const CTransaction &tx, block.vtx) { | |
2444 | SyncWithWallets(tx, NULL); | |
2445 | } | |
2446 | // Update cached incremental witnesses | |
2447 | GetMainSignals().ChainTip(pindexDelete, &block, newTree, false); | |
2448 | return true; | |
2449 | } | |
2450 | ||
2451 | static int64_t nTimeReadFromDisk = 0; | |
2452 | static int64_t nTimeConnectTotal = 0; | |
2453 | static int64_t nTimeFlush = 0; | |
2454 | static int64_t nTimeChainState = 0; | |
2455 | static int64_t nTimePostConnect = 0; | |
2456 | ||
2457 | /** | |
2458 | * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock | |
2459 | * corresponding to pindexNew, to bypass loading it again from disk. | |
2460 | */ | |
2461 | bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *pblock) { | |
2462 | assert(pindexNew->pprev == chainActive.Tip()); | |
2463 | mempool.check(pcoinsTip); | |
2464 | // Read block from disk. | |
2465 | int64_t nTime1 = GetTimeMicros(); | |
2466 | CBlock block; | |
2467 | if (!pblock) { | |
2468 | if (!ReadBlockFromDisk(block, pindexNew)) | |
2469 | return AbortNode(state, "Failed to read block"); | |
2470 | pblock = █ | |
2471 | } | |
2472 | // Get the current commitment tree | |
2473 | ZCIncrementalMerkleTree oldTree; | |
2474 | assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), oldTree)); | |
2475 | // Apply the block atomically to the chain state. | |
2476 | int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; | |
2477 | int64_t nTime3; | |
2478 | LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); | |
2479 | { | |
2480 | CCoinsViewCache view(pcoinsTip); | |
2481 | bool rv = ConnectBlock(*pblock, state, pindexNew, view); | |
2482 | GetMainSignals().BlockChecked(*pblock, state); | |
2483 | if (!rv) { | |
2484 | if (state.IsInvalid()) | |
2485 | InvalidBlockFound(pindexNew, state); | |
2486 | return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); | |
2487 | } | |
2488 | mapBlockSource.erase(pindexNew->GetBlockHash()); | |
2489 | nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; | |
2490 | LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); | |
2491 | assert(view.Flush()); | |
2492 | } | |
2493 | int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; | |
2494 | LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); | |
2495 | // Write the chain state to disk, if necessary. | |
2496 | if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) | |
2497 | return false; | |
2498 | int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; | |
2499 | LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); | |
2500 | // Remove conflicting transactions from the mempool. | |
2501 | list<CTransaction> txConflicted; | |
2502 | mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload()); | |
2503 | mempool.check(pcoinsTip); | |
2504 | // Update chainActive & related variables. | |
2505 | UpdateTip(pindexNew); | |
2506 | // Tell wallet about transactions that went from mempool | |
2507 | // to conflicted: | |
2508 | BOOST_FOREACH(const CTransaction &tx, txConflicted) { | |
2509 | SyncWithWallets(tx, NULL); | |
2510 | } | |
2511 | // ... and about transactions that got confirmed: | |
2512 | BOOST_FOREACH(const CTransaction &tx, pblock->vtx) { | |
2513 | SyncWithWallets(tx, pblock); | |
2514 | } | |
2515 | // Update cached incremental witnesses | |
2516 | GetMainSignals().ChainTip(pindexNew, pblock, oldTree, true); | |
2517 | ||
2518 | int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; | |
2519 | LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); | |
2520 | LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); | |
2521 | return true; | |
2522 | } | |
2523 | ||
2524 | /** | |
2525 | * Return the tip of the chain with the most work in it, that isn't | |
2526 | * known to be invalid (it's however far from certain to be valid). | |
2527 | */ | |
2528 | static CBlockIndex* FindMostWorkChain() { | |
2529 | do { | |
2530 | CBlockIndex *pindexNew = NULL; | |
2531 | ||
2532 | // Find the best candidate header. | |
2533 | { | |
2534 | std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin(); | |
2535 | if (it == setBlockIndexCandidates.rend()) | |
2536 | return NULL; | |
2537 | pindexNew = *it; | |
2538 | } | |
2539 | ||
2540 | // Check whether all blocks on the path between the currently active chain and the candidate are valid. | |
2541 | // Just going until the active chain is an optimization, as we know all blocks in it are valid already. | |
2542 | CBlockIndex *pindexTest = pindexNew; | |
2543 | bool fInvalidAncestor = false; | |
2544 | while (pindexTest && !chainActive.Contains(pindexTest)) { | |
2545 | assert(pindexTest->nChainTx || pindexTest->nHeight == 0); | |
2546 | ||
2547 | // Pruned nodes may have entries in setBlockIndexCandidates for | |
2548 | // which block files have been deleted. Remove those as candidates | |
2549 | // for the most work chain if we come across them; we can't switch | |
2550 | // to a chain unless we have all the non-active-chain parent blocks. | |
2551 | bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; | |
2552 | bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); | |
2553 | if (fFailedChain || fMissingData) { | |
2554 | // Candidate chain is not usable (either invalid or missing data) | |
2555 | if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) | |
2556 | pindexBestInvalid = pindexNew; | |
2557 | CBlockIndex *pindexFailed = pindexNew; | |
2558 | // Remove the entire chain from the set. | |
2559 | while (pindexTest != pindexFailed) { | |
2560 | if (fFailedChain) { | |
2561 | pindexFailed->nStatus |= BLOCK_FAILED_CHILD; | |
2562 | } else if (fMissingData) { | |
2563 | // If we're missing data, then add back to mapBlocksUnlinked, | |
2564 | // so that if the block arrives in the future we can try adding | |
2565 | // to setBlockIndexCandidates again. | |
2566 | mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); | |
2567 | } | |
2568 | setBlockIndexCandidates.erase(pindexFailed); | |
2569 | pindexFailed = pindexFailed->pprev; | |
2570 | } | |
2571 | setBlockIndexCandidates.erase(pindexTest); | |
2572 | fInvalidAncestor = true; | |
2573 | break; | |
2574 | } | |
2575 | pindexTest = pindexTest->pprev; | |
2576 | } | |
2577 | if (!fInvalidAncestor) | |
2578 | return pindexNew; | |
2579 | } while(true); | |
2580 | } | |
2581 | ||
2582 | /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ | |
2583 | static void PruneBlockIndexCandidates() { | |
2584 | // Note that we can't delete the current block itself, as we may need to return to it later in case a | |
2585 | // reorganization to a better block fails. | |
2586 | std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin(); | |
2587 | while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) { | |
2588 | setBlockIndexCandidates.erase(it++); | |
2589 | } | |
2590 | // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. | |
2591 | assert(!setBlockIndexCandidates.empty()); | |
2592 | } | |
2593 | ||
2594 | /** | |
2595 | * Try to make some progress towards making pindexMostWork the active block. | |
2596 | * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork. | |
2597 | */ | |
2598 | static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMostWork, CBlock *pblock) { | |
2599 | AssertLockHeld(cs_main); | |
2600 | bool fInvalidFound = false; | |
2601 | const CBlockIndex *pindexOldTip = chainActive.Tip(); | |
2602 | const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); | |
2603 | ||
2604 | // Disconnect active blocks which are no longer in the best chain. | |
2605 | while (chainActive.Tip() && chainActive.Tip() != pindexFork) { | |
2606 | if (!DisconnectTip(state)) | |
2607 | return false; | |
2608 | } | |
2609 | ||
2610 | // Build list of new blocks to connect. | |
2611 | std::vector<CBlockIndex*> vpindexToConnect; | |
2612 | bool fContinue = true; | |
2613 | int nHeight = pindexFork ? pindexFork->nHeight : -1; | |
2614 | while (fContinue && nHeight != pindexMostWork->nHeight) { | |
2615 | // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need | |
2616 | // a few blocks along the way. | |
2617 | int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); | |
2618 | vpindexToConnect.clear(); | |
2619 | vpindexToConnect.reserve(nTargetHeight - nHeight); | |
2620 | CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); | |
2621 | while (pindexIter && pindexIter->nHeight != nHeight) { | |
2622 | vpindexToConnect.push_back(pindexIter); | |
2623 | pindexIter = pindexIter->pprev; | |
2624 | } | |
2625 | nHeight = nTargetHeight; | |
2626 | ||
2627 | // Connect new blocks. | |
2628 | BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) { | |
2629 | if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) { | |
2630 | if (state.IsInvalid()) { | |
2631 | // The block violates a consensus rule. | |
2632 | if (!state.CorruptionPossible()) | |
2633 | InvalidChainFound(vpindexToConnect.back()); | |
2634 | state = CValidationState(); | |
2635 | fInvalidFound = true; | |
2636 | fContinue = false; | |
2637 | break; | |
2638 | } else { | |
2639 | // A system error occurred (disk space, database error, ...). | |
2640 | return false; | |
2641 | } | |
2642 | } else { | |
2643 | PruneBlockIndexCandidates(); | |
2644 | if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) { | |
2645 | // We're in a better position than we were. Return temporarily to release the lock. | |
2646 | fContinue = false; | |
2647 | break; | |
2648 | } | |
2649 | } | |
2650 | } | |
2651 | } | |
2652 | ||
2653 | // Callbacks/notifications for a new best chain. | |
2654 | if (fInvalidFound) | |
2655 | CheckForkWarningConditionsOnNewFork(vpindexToConnect.back()); | |
2656 | else | |
2657 | CheckForkWarningConditions(); | |
2658 | ||
2659 | return true; | |
2660 | } | |
2661 | ||
2662 | /** | |
2663 | * Make the best chain active, in multiple steps. The result is either failure | |
2664 | * or an activated best chain. pblock is either NULL or a pointer to a block | |
2665 | * that is already loaded (to avoid loading it again from disk). | |
2666 | */ | |
2667 | bool ActivateBestChain(CValidationState &state, CBlock *pblock) { | |
2668 | CBlockIndex *pindexNewTip = NULL; | |
2669 | CBlockIndex *pindexMostWork = NULL; | |
2670 | const CChainParams& chainParams = Params(); | |
2671 | do { | |
2672 | boost::this_thread::interruption_point(); | |
2673 | ||
2674 | bool fInitialDownload; | |
2675 | { | |
2676 | LOCK(cs_main); | |
2677 | pindexMostWork = FindMostWorkChain(); | |
2678 | ||
2679 | // Whether we have anything to do at all. | |
2680 | if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip()) | |
2681 | return true; | |
2682 | ||
2683 | if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL)) | |
2684 | return false; | |
2685 | ||
2686 | pindexNewTip = chainActive.Tip(); | |
2687 | fInitialDownload = IsInitialBlockDownload(); | |
2688 | } | |
2689 | // When we reach this point, we switched to a new tip (stored in pindexNewTip). | |
2690 | ||
2691 | // Notifications/callbacks that can run without cs_main | |
2692 | if (!fInitialDownload) { | |
2693 | uint256 hashNewTip = pindexNewTip->GetBlockHash(); | |
2694 | // Relay inventory, but don't relay old inventory during initial block download. | |
2695 | int nBlockEstimate = 0; | |
2696 | if (fCheckpointsEnabled) | |
2697 | nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()); | |
2698 | // Don't relay blocks if pruning -- could cause a peer to try to download, resulting | |
2699 | // in a stalled download if the block file is pruned before the request. | |
2700 | if (nLocalServices & NODE_NETWORK) { | |
2701 | LOCK(cs_vNodes); | |
2702 | BOOST_FOREACH(CNode* pnode, vNodes) | |
2703 | if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) | |
2704 | pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip)); | |
2705 | } | |
2706 | // Notify external listeners about the new tip. | |
2707 | GetMainSignals().UpdatedBlockTip(pindexNewTip); | |
2708 | uiInterface.NotifyBlockTip(hashNewTip); | |
2709 | } | |
2710 | } while(pindexMostWork != chainActive.Tip()); | |
2711 | CheckBlockIndex(); | |
2712 | ||
2713 | // Write changes periodically to disk, after relay. | |
2714 | if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) { | |
2715 | return false; | |
2716 | } | |
2717 | ||
2718 | return true; | |
2719 | } | |
2720 | ||
2721 | bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) { | |
2722 | AssertLockHeld(cs_main); | |
2723 | ||
2724 | // Mark the block itself as invalid. | |
2725 | pindex->nStatus |= BLOCK_FAILED_VALID; | |
2726 | setDirtyBlockIndex.insert(pindex); | |
2727 | setBlockIndexCandidates.erase(pindex); | |
2728 | ||
2729 | while (chainActive.Contains(pindex)) { | |
2730 | CBlockIndex *pindexWalk = chainActive.Tip(); | |
2731 | pindexWalk->nStatus |= BLOCK_FAILED_CHILD; | |
2732 | setDirtyBlockIndex.insert(pindexWalk); | |
2733 | setBlockIndexCandidates.erase(pindexWalk); | |
2734 | // ActivateBestChain considers blocks already in chainActive | |
2735 | // unconditionally valid already, so force disconnect away from it. | |
2736 | if (!DisconnectTip(state)) { | |
2737 | return false; | |
2738 | } | |
2739 | } | |
2740 | ||
2741 | // The resulting new best tip may not be in setBlockIndexCandidates anymore, so | |
2742 | // add it again. | |
2743 | BlockMap::iterator it = mapBlockIndex.begin(); | |
2744 | while (it != mapBlockIndex.end()) { | |
2745 | if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) { | |
2746 | setBlockIndexCandidates.insert(it->second); | |
2747 | } | |
2748 | it++; | |
2749 | } | |
2750 | ||
2751 | InvalidChainFound(pindex); | |
2752 | return true; | |
2753 | } | |
2754 | ||
2755 | bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) { | |
2756 | AssertLockHeld(cs_main); | |
2757 | ||
2758 | int nHeight = pindex->nHeight; | |
2759 | ||
2760 | // Remove the invalidity flag from this block and all its descendants. | |
2761 | BlockMap::iterator it = mapBlockIndex.begin(); | |
2762 | while (it != mapBlockIndex.end()) { | |
2763 | if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { | |
2764 | it->second->nStatus &= ~BLOCK_FAILED_MASK; | |
2765 | setDirtyBlockIndex.insert(it->second); | |
2766 | if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) { | |
2767 | setBlockIndexCandidates.insert(it->second); | |
2768 | } | |
2769 | if (it->second == pindexBestInvalid) { | |
2770 | // Reset invalid block marker if it was pointing to one of those. | |
2771 | pindexBestInvalid = NULL; | |
2772 | } | |
2773 | } | |
2774 | it++; | |
2775 | } | |
2776 | ||
2777 | // Remove the invalidity flag from all ancestors too. | |
2778 | while (pindex != NULL) { | |
2779 | if (pindex->nStatus & BLOCK_FAILED_MASK) { | |
2780 | pindex->nStatus &= ~BLOCK_FAILED_MASK; | |
2781 | setDirtyBlockIndex.insert(pindex); | |
2782 | } | |
2783 | pindex = pindex->pprev; | |
2784 | } | |
2785 | return true; | |
2786 | } | |
2787 | ||
2788 | CBlockIndex* AddToBlockIndex(const CBlockHeader& block) | |
2789 | { | |
2790 | // Check for duplicate | |
2791 | uint256 hash = block.GetHash(); | |
2792 | BlockMap::iterator it = mapBlockIndex.find(hash); | |
2793 | if (it != mapBlockIndex.end()) | |
2794 | return it->second; | |
2795 | ||
2796 | // Construct new block index object | |
2797 | CBlockIndex* pindexNew = new CBlockIndex(block); | |
2798 | assert(pindexNew); | |
2799 | // We assign the sequence id to blocks only when the full data is available, | |
2800 | // to avoid miners withholding blocks but broadcasting headers, to get a | |
2801 | // competitive advantage. | |
2802 | pindexNew->nSequenceId = 0; | |
2803 | BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; | |
2804 | pindexNew->phashBlock = &((*mi).first); | |
2805 | BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); | |
2806 | if (miPrev != mapBlockIndex.end()) | |
2807 | { | |
2808 | pindexNew->pprev = (*miPrev).second; | |
2809 | pindexNew->nHeight = pindexNew->pprev->nHeight + 1; | |
2810 | pindexNew->BuildSkip(); | |
2811 | } | |
2812 | pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); | |
2813 | pindexNew->RaiseValidity(BLOCK_VALID_TREE); | |
2814 | if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork) | |
2815 | pindexBestHeader = pindexNew; | |
2816 | ||
2817 | setDirtyBlockIndex.insert(pindexNew); | |
2818 | ||
2819 | return pindexNew; | |
2820 | } | |
2821 | ||
2822 | /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */ | |
2823 | bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos) | |
2824 | { | |
2825 | pindexNew->nTx = block.vtx.size(); | |
2826 | pindexNew->nChainTx = 0; | |
2827 | pindexNew->nFile = pos.nFile; | |
2828 | pindexNew->nDataPos = pos.nPos; | |
2829 | pindexNew->nUndoPos = 0; | |
2830 | pindexNew->nStatus |= BLOCK_HAVE_DATA; | |
2831 | pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); | |
2832 | setDirtyBlockIndex.insert(pindexNew); | |
2833 | ||
2834 | if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) { | |
2835 | // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. | |
2836 | deque<CBlockIndex*> queue; | |
2837 | queue.push_back(pindexNew); | |
2838 | ||
2839 | // Recursively process any descendant blocks that now may be eligible to be connected. | |
2840 | while (!queue.empty()) { | |
2841 | CBlockIndex *pindex = queue.front(); | |
2842 | queue.pop_front(); | |
2843 | pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; | |
2844 | { | |
2845 | LOCK(cs_nBlockSequenceId); | |
2846 | pindex->nSequenceId = nBlockSequenceId++; | |
2847 | } | |
2848 | if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { | |
2849 | setBlockIndexCandidates.insert(pindex); | |
2850 | } | |
2851 | std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex); | |
2852 | while (range.first != range.second) { | |
2853 | std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first; | |
2854 | queue.push_back(it->second); | |
2855 | range.first++; | |
2856 | mapBlocksUnlinked.erase(it); | |
2857 | } | |
2858 | } | |
2859 | } else { | |
2860 | if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { | |
2861 | mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); | |
2862 | } | |
2863 | } | |
2864 | ||
2865 | return true; | |
2866 | } | |
2867 | ||
2868 | bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false) | |
2869 | { | |
2870 | LOCK(cs_LastBlockFile); | |
2871 | ||
2872 | unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile; | |
2873 | if (vinfoBlockFile.size() <= nFile) { | |
2874 | vinfoBlockFile.resize(nFile + 1); | |
2875 | } | |
2876 | ||
2877 | if (!fKnown) { | |
2878 | while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { | |
2879 | nFile++; | |
2880 | if (vinfoBlockFile.size() <= nFile) { | |
2881 | vinfoBlockFile.resize(nFile + 1); | |
2882 | } | |
2883 | } | |
2884 | pos.nFile = nFile; | |
2885 | pos.nPos = vinfoBlockFile[nFile].nSize; | |
2886 | } | |
2887 | ||
2888 | if (nFile != nLastBlockFile) { | |
2889 | if (!fKnown) { | |
2890 | LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString()); | |
2891 | } | |
2892 | FlushBlockFile(!fKnown); | |
2893 | nLastBlockFile = nFile; | |
2894 | } | |
2895 | ||
2896 | vinfoBlockFile[nFile].AddBlock(nHeight, nTime); | |
2897 | if (fKnown) | |
2898 | vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize); | |
2899 | else | |
2900 | vinfoBlockFile[nFile].nSize += nAddSize; | |
2901 | ||
2902 | if (!fKnown) { | |
2903 | unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; | |
2904 | unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; | |
2905 | if (nNewChunks > nOldChunks) { | |
2906 | if (fPruneMode) | |
2907 | fCheckForPruning = true; | |
2908 | if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { | |
2909 | FILE *file = OpenBlockFile(pos); | |
2910 | if (file) { | |
2911 | LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); | |
2912 | AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); | |
2913 | fclose(file); | |
2914 | } | |
2915 | } | |
2916 | else | |
2917 | return state.Error("out of disk space"); | |
2918 | } | |
2919 | } | |
2920 | ||
2921 | setDirtyFileInfo.insert(nFile); | |
2922 | return true; | |
2923 | } | |
2924 | ||
2925 | bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize) | |
2926 | { | |
2927 | pos.nFile = nFile; | |
2928 | ||
2929 | LOCK(cs_LastBlockFile); | |
2930 | ||
2931 | unsigned int nNewSize; | |
2932 | pos.nPos = vinfoBlockFile[nFile].nUndoSize; | |
2933 | nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize; | |
2934 | setDirtyFileInfo.insert(nFile); | |
2935 | ||
2936 | unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; | |
2937 | unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; | |
2938 | if (nNewChunks > nOldChunks) { | |
2939 | if (fPruneMode) | |
2940 | fCheckForPruning = true; | |
2941 | if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { | |
2942 | FILE *file = OpenUndoFile(pos); | |
2943 | if (file) { | |
2944 | LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); | |
2945 | AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); | |
2946 | fclose(file); | |
2947 | } | |
2948 | } | |
2949 | else | |
2950 | return state.Error("out of disk space"); | |
2951 | } | |
2952 | ||
2953 | return true; | |
2954 | } | |
2955 | ||
2956 | bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW) | |
2957 | { | |
2958 | // Check block version | |
2959 | if (block.nVersion < MIN_BLOCK_VERSION) | |
2960 | return state.DoS(100, error("CheckBlockHeader(): block version too low"), | |
2961 | REJECT_INVALID, "version-too-low"); | |
2962 | ||
2963 | // Check Equihash solution is valid | |
2964 | if (fCheckPOW && !CheckEquihashSolution(&block, Params())) | |
2965 | return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"), | |
2966 | REJECT_INVALID, "invalid-solution"); | |
2967 | ||
2968 | // Check proof of work matches claimed amount | |
2969 | if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, Params().GetConsensus())) | |
2970 | return state.DoS(50, error("CheckBlockHeader(): proof of work failed"), | |
2971 | REJECT_INVALID, "high-hash"); | |
2972 | ||
2973 | // Check timestamp | |
2974 | if (block.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60) | |
2975 | return state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"), | |
2976 | REJECT_INVALID, "time-too-new"); | |
2977 | ||
2978 | return true; | |
2979 | } | |
2980 | ||
2981 | bool CheckBlock(const CBlock& block, CValidationState& state, | |
2982 | libzcash::ProofVerifier& verifier, | |
2983 | bool fCheckPOW, bool fCheckMerkleRoot) | |
2984 | { | |
2985 | // These are checks that are independent of context. | |
2986 | ||
2987 | // Check that the header is valid (particularly PoW). This is mostly | |
2988 | // redundant with the call in AcceptBlockHeader. | |
2989 | if (!CheckBlockHeader(block, state, fCheckPOW)) | |
2990 | return false; | |
2991 | ||
2992 | // Check the merkle root. | |
2993 | if (fCheckMerkleRoot) { | |
2994 | bool mutated; | |
2995 | uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated); | |
2996 | if (block.hashMerkleRoot != hashMerkleRoot2) | |
2997 | return state.DoS(100, error("CheckBlock(): hashMerkleRoot mismatch"), | |
2998 | REJECT_INVALID, "bad-txnmrklroot", true); | |
2999 | ||
3000 | // Check for merkle tree malleability (CVE-2012-2459): repeating sequences | |
3001 | // of transactions in a block without affecting the merkle root of a block, | |
3002 | // while still invalidating it. | |
3003 | if (mutated) | |
3004 | return state.DoS(100, error("CheckBlock(): duplicate transaction"), | |
3005 | REJECT_INVALID, "bad-txns-duplicate", true); | |
3006 | } | |
3007 | ||
3008 | // All potential-corruption validation must be done before we do any | |
3009 | // transaction validation, as otherwise we may mark the header as invalid | |
3010 | // because we receive the wrong transactions for it. | |
3011 | ||
3012 | // Size limits | |
3013 | if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE) | |
3014 | return state.DoS(100, error("CheckBlock(): size limits failed"), | |
3015 | REJECT_INVALID, "bad-blk-length"); | |
3016 | ||
3017 | // First transaction must be coinbase, the rest must not be | |
3018 | if (block.vtx.empty() || !block.vtx[0].IsCoinBase()) | |
3019 | return state.DoS(100, error("CheckBlock(): first tx is not coinbase"), | |
3020 | REJECT_INVALID, "bad-cb-missing"); | |
3021 | for (unsigned int i = 1; i < block.vtx.size(); i++) | |
3022 | if (block.vtx[i].IsCoinBase()) | |
3023 | return state.DoS(100, error("CheckBlock(): more than one coinbase"), | |
3024 | REJECT_INVALID, "bad-cb-multiple"); | |
3025 | ||
3026 | // Check transactions | |
3027 | BOOST_FOREACH(const CTransaction& tx, block.vtx) | |
3028 | if (!CheckTransaction(tx, state, verifier)) | |
3029 | return error("CheckBlock(): CheckTransaction failed"); | |
3030 | ||
3031 | unsigned int nSigOps = 0; | |
3032 | BOOST_FOREACH(const CTransaction& tx, block.vtx) | |
3033 | { | |
3034 | nSigOps += GetLegacySigOpCount(tx); | |
3035 | } | |
3036 | if (nSigOps > MAX_BLOCK_SIGOPS) | |
3037 | return state.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"), | |
3038 | REJECT_INVALID, "bad-blk-sigops", true); | |
3039 | ||
3040 | return true; | |
3041 | } | |
3042 | ||
3043 | bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev) | |
3044 | { | |
3045 | const CChainParams& chainParams = Params(); | |
3046 | const Consensus::Params& consensusParams = chainParams.GetConsensus(); | |
3047 | uint256 hash = block.GetHash(); | |
3048 | if (hash == consensusParams.hashGenesisBlock) | |
3049 | return true; | |
3050 | ||
3051 | assert(pindexPrev); | |
3052 | ||
3053 | int nHeight = pindexPrev->nHeight+1; | |
3054 | ||
3055 | // Check proof of work | |
3056 | if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) | |
3057 | return state.DoS(100, error("%s: incorrect proof of work", __func__), | |
3058 | REJECT_INVALID, "bad-diffbits"); | |
3059 | ||
3060 | // Check timestamp against prev | |
3061 | if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) | |
3062 | return state.Invalid(error("%s: block's timestamp is too early", __func__), | |
3063 | REJECT_INVALID, "time-too-old"); | |
3064 | ||
3065 | if (fCheckpointsEnabled) | |
3066 | { | |
3067 | // Don't accept any forks from the main chain prior to last checkpoint | |
3068 | CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints()); | |
3069 | if (pcheckpoint && nHeight < pcheckpoint->nHeight) | |
3070 | return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight)); | |
3071 | } | |
3072 | ||
3073 | // Reject block.nVersion < 4 blocks | |
3074 | if (block.nVersion < 4) | |
3075 | return state.Invalid(error("%s : rejected nVersion<4 block", __func__), | |
3076 | REJECT_OBSOLETE, "bad-version"); | |
3077 | ||
3078 | return true; | |
3079 | } | |
3080 | ||
3081 | bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev) | |
3082 | { | |
3083 | const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1; | |
3084 | const Consensus::Params& consensusParams = Params().GetConsensus(); | |
3085 | ||
3086 | // Check that all transactions are finalized | |
3087 | BOOST_FOREACH(const CTransaction& tx, block.vtx) { | |
3088 | int nLockTimeFlags = 0; | |
3089 | int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST) | |
3090 | ? pindexPrev->GetMedianTimePast() | |
3091 | : block.GetBlockTime(); | |
3092 | if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) { | |
3093 | return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); | |
3094 | } | |
3095 | } | |
3096 | ||
3097 | // Enforce block.nVersion=2 rule that the coinbase starts with serialized block height | |
3098 | // if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet): | |
3099 | // Since MIN_BLOCK_VERSION = 4 all blocks with nHeight > 0 should satisfy this. | |
3100 | // This rule is not applied to the genesis block, which didn't include the height | |
3101 | // in the coinbase. | |
3102 | if (nHeight > 0) | |
3103 | { | |
3104 | CScript expect = CScript() << nHeight; | |
3105 | if (block.vtx[0].vin[0].scriptSig.size() < expect.size() || | |
3106 | !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) { | |
3107 | return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height"); | |
3108 | } | |
3109 | } | |
3110 | ||
3111 | // Coinbase transaction must include an output sending 20% of | |
3112 | // the block reward to a founders reward script, until the last founders | |
3113 | // reward block is reached, with exception of the genesis block. | |
3114 | // The last founders reward block is defined as the block just before the | |
3115 | // first subsidy halving block, which occurs at halving_interval + slow_start_shift | |
3116 | if ((nHeight > 0) && (nHeight <= consensusParams.GetLastFoundersRewardBlockHeight())) { | |
3117 | bool found = false; | |
3118 | ||
3119 | BOOST_FOREACH(const CTxOut& output, block.vtx[0].vout) { | |
3120 | if (output.scriptPubKey == Params().GetFoundersRewardScriptAtHeight(nHeight)) { | |
3121 | if (output.nValue == (GetBlockSubsidy(nHeight, consensusParams) / 5)) { | |
3122 | found = true; | |
3123 | break; | |
3124 | } | |
3125 | } | |
3126 | } | |
3127 | ||
3128 | if (!found) { | |
3129 | return state.DoS(100, error("%s: founders reward missing", __func__), REJECT_INVALID, "cb-no-founders-reward"); | |
3130 | } | |
3131 | } | |
3132 | ||
3133 | return true; | |
3134 | } | |
3135 | ||
3136 | bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex) | |
3137 | { | |
3138 | const CChainParams& chainparams = Params(); | |
3139 | AssertLockHeld(cs_main); | |
3140 | // Check for duplicate | |
3141 | uint256 hash = block.GetHash(); | |
3142 | BlockMap::iterator miSelf = mapBlockIndex.find(hash); | |
3143 | CBlockIndex *pindex = NULL; | |
3144 | if (miSelf != mapBlockIndex.end()) { | |
3145 | // Block header is already known. | |
3146 | pindex = miSelf->second; | |
3147 | if (ppindex) | |
3148 | *ppindex = pindex; | |
3149 | if (pindex->nStatus & BLOCK_FAILED_MASK) | |
3150 | return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate"); | |
3151 | return true; | |
3152 | } | |
3153 | ||
3154 | if (!CheckBlockHeader(block, state)) | |
3155 | return false; | |
3156 | ||
3157 | // Get prev block index | |
3158 | CBlockIndex* pindexPrev = NULL; | |
3159 | if (hash != chainparams.GetConsensus().hashGenesisBlock) { | |
3160 | BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); | |
3161 | if (mi == mapBlockIndex.end()) | |
3162 | return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk"); | |
3163 | pindexPrev = (*mi).second; | |
3164 | if (pindexPrev->nStatus & BLOCK_FAILED_MASK) | |
3165 | return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); | |
3166 | } | |
3167 | ||
3168 | if (!ContextualCheckBlockHeader(block, state, pindexPrev)) | |
3169 | return false; | |
3170 | ||
3171 | if (pindex == NULL) | |
3172 | pindex = AddToBlockIndex(block); | |
3173 | ||
3174 | if (ppindex) | |
3175 | *ppindex = pindex; | |
3176 | ||
3177 | return true; | |
3178 | } | |
3179 | ||
3180 | bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp) | |
3181 | { | |
3182 | const CChainParams& chainparams = Params(); | |
3183 | AssertLockHeld(cs_main); | |
3184 | ||
3185 | CBlockIndex *&pindex = *ppindex; | |
3186 | ||
3187 | if (!AcceptBlockHeader(block, state, &pindex)) | |
3188 | return false; | |
3189 | ||
3190 | // Try to process all requested blocks that we don't have, but only | |
3191 | // process an unrequested block if it's new and has enough work to | |
3192 | // advance our tip, and isn't too many blocks ahead. | |
3193 | bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; | |
3194 | bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); | |
3195 | // Blocks that are too out-of-order needlessly limit the effectiveness of | |
3196 | // pruning, because pruning will not delete block files that contain any | |
3197 | // blocks which are too close in height to the tip. Apply this test | |
3198 | // regardless of whether pruning is enabled; it should generally be safe to | |
3199 | // not process unrequested blocks. | |
3200 | bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP)); | |
3201 | ||
3202 | // TODO: deal better with return value and error conditions for duplicate | |
3203 | // and unrequested blocks. | |
3204 | if (fAlreadyHave) return true; | |
3205 | if (!fRequested) { // If we didn't ask for it: | |
3206 | if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned | |
3207 | if (!fHasMoreWork) return true; // Don't process less-work chains | |
3208 | if (fTooFarAhead) return true; // Block height is too high | |
3209 | } | |
3210 | ||
3211 | // See method docstring for why this is always disabled | |
3212 | auto verifier = libzcash::ProofVerifier::Disabled(); | |
3213 | if ((!CheckBlock(block, state, verifier)) || !ContextualCheckBlock(block, state, pindex->pprev)) { | |
3214 | if (state.IsInvalid() && !state.CorruptionPossible()) { | |
3215 | pindex->nStatus |= BLOCK_FAILED_VALID; | |
3216 | setDirtyBlockIndex.insert(pindex); | |
3217 | } | |
3218 | return false; | |
3219 | } | |
3220 | ||
3221 | int nHeight = pindex->nHeight; | |
3222 | ||
3223 | // Write block to history file | |
3224 | try { | |
3225 | unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); | |
3226 | CDiskBlockPos blockPos; | |
3227 | if (dbp != NULL) | |
3228 | blockPos = *dbp; | |
3229 | if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL)) | |
3230 | return error("AcceptBlock(): FindBlockPos failed"); | |
3231 | if (dbp == NULL) | |
3232 | if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) | |
3233 | AbortNode(state, "Failed to write block"); | |
3234 | if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) | |
3235 | return error("AcceptBlock(): ReceivedBlockTransactions failed"); | |
3236 | } catch (const std::runtime_error& e) { | |
3237 | return AbortNode(state, std::string("System error: ") + e.what()); | |
3238 | } | |
3239 | ||
3240 | if (fCheckForPruning) | |
3241 | FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files | |
3242 | ||
3243 | return true; | |
3244 | } | |
3245 | ||
3246 | static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams) | |
3247 | { | |
3248 | unsigned int nFound = 0; | |
3249 | for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++) | |
3250 | { | |
3251 | if (pstart->nVersion >= minVersion) | |
3252 | ++nFound; | |
3253 | pstart = pstart->pprev; | |
3254 | } | |
3255 | return (nFound >= nRequired); | |
3256 | } | |
3257 | ||
3258 | ||
3259 | bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, bool fForceProcessing, CDiskBlockPos *dbp) | |
3260 | { | |
3261 | // Preliminary checks | |
3262 | auto verifier = libzcash::ProofVerifier::Disabled(); | |
3263 | bool checked = CheckBlock(*pblock, state, verifier); | |
3264 | ||
3265 | { | |
3266 | LOCK(cs_main); | |
3267 | bool fRequested = MarkBlockAsReceived(pblock->GetHash()); | |
3268 | fRequested |= fForceProcessing; | |
3269 | if (!checked) { | |
3270 | return error("%s: CheckBlock FAILED", __func__); | |
3271 | } | |
3272 | ||
3273 | // Store to disk | |
3274 | CBlockIndex *pindex = NULL; | |
3275 | bool ret = AcceptBlock(*pblock, state, &pindex, fRequested, dbp); | |
3276 | if (pindex && pfrom) { | |
3277 | mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId(); | |
3278 | } | |
3279 | CheckBlockIndex(); | |
3280 | if (!ret) | |
3281 | return error("%s: AcceptBlock FAILED", __func__); | |
3282 | } | |
3283 | ||
3284 | if (!ActivateBestChain(state, pblock)) | |
3285 | return error("%s: ActivateBestChain failed", __func__); | |
3286 | ||
3287 | return true; | |
3288 | } | |
3289 | ||
3290 | bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot) | |
3291 | { | |
3292 | AssertLockHeld(cs_main); | |
3293 | assert(pindexPrev == chainActive.Tip()); | |
3294 | ||
3295 | CCoinsViewCache viewNew(pcoinsTip); | |
3296 | CBlockIndex indexDummy(block); | |
3297 | indexDummy.pprev = pindexPrev; | |
3298 | indexDummy.nHeight = pindexPrev->nHeight + 1; | |
3299 | // JoinSplit proofs are verified in ConnectBlock | |
3300 | auto verifier = libzcash::ProofVerifier::Disabled(); | |
3301 | ||
3302 | // NOTE: CheckBlockHeader is called by CheckBlock | |
3303 | if (!ContextualCheckBlockHeader(block, state, pindexPrev)) | |
3304 | return false; | |
3305 | if (!CheckBlock(block, state, verifier, fCheckPOW, fCheckMerkleRoot)) | |
3306 | return false; | |
3307 | if (!ContextualCheckBlock(block, state, pindexPrev)) | |
3308 | return false; | |
3309 | if (!ConnectBlock(block, state, &indexDummy, viewNew, true)) | |
3310 | return false; | |
3311 | assert(state.IsValid()); | |
3312 | ||
3313 | return true; | |
3314 | } | |
3315 | ||
3316 | /** | |
3317 | * BLOCK PRUNING CODE | |
3318 | */ | |
3319 | ||
3320 | /* Calculate the amount of disk space the block & undo files currently use */ | |
3321 | uint64_t CalculateCurrentUsage() | |
3322 | { | |
3323 | uint64_t retval = 0; | |
3324 | BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) { | |
3325 | retval += file.nSize + file.nUndoSize; | |
3326 | } | |
3327 | return retval; | |
3328 | } | |
3329 | ||
3330 | /* Prune a block file (modify associated database entries)*/ | |
3331 | void PruneOneBlockFile(const int fileNumber) | |
3332 | { | |
3333 | for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) { | |
3334 | CBlockIndex* pindex = it->second; | |
3335 | if (pindex->nFile == fileNumber) { | |
3336 | pindex->nStatus &= ~BLOCK_HAVE_DATA; | |
3337 | pindex->nStatus &= ~BLOCK_HAVE_UNDO; | |
3338 | pindex->nFile = 0; | |
3339 | pindex->nDataPos = 0; | |
3340 | pindex->nUndoPos = 0; | |
3341 | setDirtyBlockIndex.insert(pindex); | |
3342 | ||
3343 | // Prune from mapBlocksUnlinked -- any block we prune would have | |
3344 | // to be downloaded again in order to consider its chain, at which | |
3345 | // point it would be considered as a candidate for | |
3346 | // mapBlocksUnlinked or setBlockIndexCandidates. | |
3347 | std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); | |
3348 | while (range.first != range.second) { | |
3349 | std::multimap<CBlockIndex *, CBlockIndex *>::iterator it = range.first; | |
3350 | range.first++; | |
3351 | if (it->second == pindex) { | |
3352 | mapBlocksUnlinked.erase(it); | |
3353 | } | |
3354 | } | |
3355 | } | |
3356 | } | |
3357 | ||
3358 | vinfoBlockFile[fileNumber].SetNull(); | |
3359 | setDirtyFileInfo.insert(fileNumber); | |
3360 | } | |
3361 | ||
3362 | ||
3363 | void UnlinkPrunedFiles(std::set<int>& setFilesToPrune) | |
3364 | { | |
3365 | for (set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { | |
3366 | CDiskBlockPos pos(*it, 0); | |
3367 | boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); | |
3368 | boost::filesystem::remove(GetBlockPosFilename(pos, "rev")); | |
3369 | LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it); | |
3370 | } | |
3371 | } | |
3372 | ||
3373 | /* Calculate the block/rev files that should be deleted to remain under target*/ | |
3374 | void FindFilesToPrune(std::set<int>& setFilesToPrune) | |
3375 | { | |
3376 | LOCK2(cs_main, cs_LastBlockFile); | |
3377 | if (chainActive.Tip() == NULL || nPruneTarget == 0) { | |
3378 | return; | |
3379 | } | |
3380 | if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) { | |
3381 | return; | |
3382 | } | |
3383 | ||
3384 | unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP; | |
3385 | uint64_t nCurrentUsage = CalculateCurrentUsage(); | |
3386 | // We don't check to prune until after we've allocated new space for files | |
3387 | // So we should leave a buffer under our target to account for another allocation | |
3388 | // before the next pruning. | |
3389 | uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; | |
3390 | uint64_t nBytesToPrune; | |
3391 | int count=0; | |
3392 | ||
3393 | if (nCurrentUsage + nBuffer >= nPruneTarget) { | |
3394 | for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { | |
3395 | nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; | |
3396 | ||
3397 | if (vinfoBlockFile[fileNumber].nSize == 0) | |
3398 | continue; | |
3399 | ||
3400 | if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target? | |
3401 | break; | |
3402 | ||
3403 | // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning | |
3404 | if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) | |
3405 | continue; | |
3406 | ||
3407 | PruneOneBlockFile(fileNumber); | |
3408 | // Queue up the files for removal | |
3409 | setFilesToPrune.insert(fileNumber); | |
3410 | nCurrentUsage -= nBytesToPrune; | |
3411 | count++; | |
3412 | } | |
3413 | } | |
3414 | ||
3415 | LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", | |
3416 | nPruneTarget/1024/1024, nCurrentUsage/1024/1024, | |
3417 | ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, | |
3418 | nLastBlockWeCanPrune, count); | |
3419 | } | |
3420 | ||
3421 | bool CheckDiskSpace(uint64_t nAdditionalBytes) | |
3422 | { | |
3423 | uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available; | |
3424 | ||
3425 | // Check for nMinDiskSpace bytes (currently 50MB) | |
3426 | if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) | |
3427 | return AbortNode("Disk space is low!", _("Error: Disk space is low!")); | |
3428 | ||
3429 | return true; | |
3430 | } | |
3431 | ||
3432 | FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) | |
3433 | { | |
3434 | if (pos.IsNull()) | |
3435 | return NULL; | |
3436 | boost::filesystem::path path = GetBlockPosFilename(pos, prefix); | |
3437 | boost::filesystem::create_directories(path.parent_path()); | |
3438 | FILE* file = fopen(path.string().c_str(), "rb+"); | |
3439 | if (!file && !fReadOnly) | |
3440 | file = fopen(path.string().c_str(), "wb+"); | |
3441 | if (!file) { | |
3442 | LogPrintf("Unable to open file %s\n", path.string()); | |
3443 | return NULL; | |
3444 | } | |
3445 | if (pos.nPos) { | |
3446 | if (fseek(file, pos.nPos, SEEK_SET)) { | |
3447 | LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); | |
3448 | fclose(file); | |
3449 | return NULL; | |
3450 | } | |
3451 | } | |
3452 | return file; | |
3453 | } | |
3454 | ||
3455 | FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) { | |
3456 | return OpenDiskFile(pos, "blk", fReadOnly); | |
3457 | } | |
3458 | ||
3459 | FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { | |
3460 | return OpenDiskFile(pos, "rev", fReadOnly); | |
3461 | } | |
3462 | ||
3463 | boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) | |
3464 | { | |
3465 | return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); | |
3466 | } | |
3467 | ||
3468 | CBlockIndex * InsertBlockIndex(uint256 hash) | |
3469 | { | |
3470 | if (hash.IsNull()) | |
3471 | return NULL; | |
3472 | ||
3473 | // Return existing | |
3474 | BlockMap::iterator mi = mapBlockIndex.find(hash); | |
3475 | if (mi != mapBlockIndex.end()) | |
3476 | return (*mi).second; | |
3477 | ||
3478 | // Create new | |
3479 | CBlockIndex* pindexNew = new CBlockIndex(); | |
3480 | if (!pindexNew) | |
3481 | throw runtime_error("LoadBlockIndex(): new CBlockIndex failed"); | |
3482 | mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; | |
3483 | pindexNew->phashBlock = &((*mi).first); | |
3484 | ||
3485 | return pindexNew; | |
3486 | } | |
3487 | ||
3488 | bool static LoadBlockIndexDB() | |
3489 | { | |
3490 | const CChainParams& chainparams = Params(); | |
3491 | if (!pblocktree->LoadBlockIndexGuts()) | |
3492 | return false; | |
3493 | ||
3494 | boost::this_thread::interruption_point(); | |
3495 | ||
3496 | // Calculate nChainWork | |
3497 | vector<pair<int, CBlockIndex*> > vSortedByHeight; | |
3498 | vSortedByHeight.reserve(mapBlockIndex.size()); | |
3499 | BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex) | |
3500 | { | |
3501 | CBlockIndex* pindex = item.second; | |
3502 | vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex)); | |
3503 | } | |
3504 | sort(vSortedByHeight.begin(), vSortedByHeight.end()); | |
3505 | BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight) | |
3506 | { | |
3507 | CBlockIndex* pindex = item.second; | |
3508 | pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); | |
3509 | // We can link the chain of blocks for which we've received transactions at some point. | |
3510 | // Pruned nodes may have deleted the block. | |
3511 | if (pindex->nTx > 0) { | |
3512 | if (pindex->pprev) { | |
3513 | if (pindex->pprev->nChainTx) { | |
3514 | pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; | |
3515 | } else { | |
3516 | pindex->nChainTx = 0; | |
3517 | mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex)); | |
3518 | } | |
3519 | } else { | |
3520 | pindex->nChainTx = pindex->nTx; | |
3521 | } | |
3522 | } | |
3523 | if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL)) | |
3524 | setBlockIndexCandidates.insert(pindex); | |
3525 | if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) | |
3526 | pindexBestInvalid = pindex; | |
3527 | if (pindex->pprev) | |
3528 | pindex->BuildSkip(); | |
3529 | if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) | |
3530 | pindexBestHeader = pindex; | |
3531 | } | |
3532 | ||
3533 | // Load block file info | |
3534 | pblocktree->ReadLastBlockFile(nLastBlockFile); | |
3535 | vinfoBlockFile.resize(nLastBlockFile + 1); | |
3536 | LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); | |
3537 | for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { | |
3538 | pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); | |
3539 | } | |
3540 | LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); | |
3541 | for (int nFile = nLastBlockFile + 1; true; nFile++) { | |
3542 | CBlockFileInfo info; | |
3543 | if (pblocktree->ReadBlockFileInfo(nFile, info)) { | |
3544 | vinfoBlockFile.push_back(info); | |
3545 | } else { | |
3546 | break; | |
3547 | } | |
3548 | } | |
3549 | ||
3550 | // Check presence of blk files | |
3551 | LogPrintf("Checking all blk files are present...\n"); | |
3552 | set<int> setBlkDataFiles; | |
3553 | BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex) | |
3554 | { | |
3555 | CBlockIndex* pindex = item.second; | |
3556 | if (pindex->nStatus & BLOCK_HAVE_DATA) { | |
3557 | setBlkDataFiles.insert(pindex->nFile); | |
3558 | } | |
3559 | } | |
3560 | for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) | |
3561 | { | |
3562 | CDiskBlockPos pos(*it, 0); | |
3563 | if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) { | |
3564 | return false; | |
3565 | } | |
3566 | } | |
3567 | ||
3568 | // Check whether we have ever pruned block & undo files | |
3569 | pblocktree->ReadFlag("prunedblockfiles", fHavePruned); | |
3570 | if (fHavePruned) | |
3571 | LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n"); | |
3572 | ||
3573 | // Check whether we need to continue reindexing | |
3574 | bool fReindexing = false; | |
3575 | pblocktree->ReadReindexing(fReindexing); | |
3576 | fReindex |= fReindexing; | |
3577 | ||
3578 | // Check whether we have a transaction index | |
3579 | pblocktree->ReadFlag("txindex", fTxIndex); | |
3580 | LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled"); | |
3581 | ||
3582 | // Load pointer to end of best chain | |
3583 | BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); | |
3584 | if (it == mapBlockIndex.end()) | |
3585 | return true; | |
3586 | chainActive.SetTip(it->second); | |
3587 | ||
3588 | PruneBlockIndexCandidates(); | |
3589 | ||
3590 | LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__, | |
3591 | chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), | |
3592 | DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), | |
3593 | Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.Tip())); | |
3594 | ||
3595 | return true; | |
3596 | } | |
3597 | ||
3598 | CVerifyDB::CVerifyDB() | |
3599 | { | |
3600 | uiInterface.ShowProgress(_("Verifying blocks..."), 0); | |
3601 | } | |
3602 | ||
3603 | CVerifyDB::~CVerifyDB() | |
3604 | { | |
3605 | uiInterface.ShowProgress("", 100); | |
3606 | } | |
3607 | ||
3608 | bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) | |
3609 | { | |
3610 | LOCK(cs_main); | |
3611 | if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL) | |
3612 | return true; | |
3613 | ||
3614 | // Verify blocks in the best chain | |
3615 | if (nCheckDepth <= 0) | |
3616 | nCheckDepth = 1000000000; // suffices until the year 19000 | |
3617 | if (nCheckDepth > chainActive.Height()) | |
3618 | nCheckDepth = chainActive.Height(); | |
3619 | nCheckLevel = std::max(0, std::min(4, nCheckLevel)); | |
3620 | LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); | |
3621 | CCoinsViewCache coins(coinsview); | |
3622 | CBlockIndex* pindexState = chainActive.Tip(); | |
3623 | CBlockIndex* pindexFailure = NULL; | |
3624 | int nGoodTransactions = 0; | |
3625 | CValidationState state; | |
3626 | // No need to verify JoinSplits twice | |
3627 | auto verifier = libzcash::ProofVerifier::Disabled(); | |
3628 | for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) | |
3629 | { | |
3630 | boost::this_thread::interruption_point(); | |
3631 | uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))))); | |
3632 | if (pindex->nHeight < chainActive.Height()-nCheckDepth) | |
3633 | break; | |
3634 | CBlock block; | |
3635 | // check level 0: read from disk | |
3636 | if (!ReadBlockFromDisk(block, pindex)) | |
3637 | return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
3638 | // check level 1: verify block validity | |
3639 | if (nCheckLevel >= 1 && !CheckBlock(block, state, verifier)) | |
3640 | return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
3641 | // check level 2: verify undo validity | |
3642 | if (nCheckLevel >= 2 && pindex) { | |
3643 | CBlockUndo undo; | |
3644 | CDiskBlockPos pos = pindex->GetUndoPos(); | |
3645 | if (!pos.IsNull()) { | |
3646 | if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) | |
3647 | return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
3648 | } | |
3649 | } | |
3650 | // check level 3: check for inconsistencies during memory-only disconnect of tip blocks | |
3651 | if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { | |
3652 | bool fClean = true; | |
3653 | if (!DisconnectBlock(block, state, pindex, coins, &fClean)) | |
3654 | return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
3655 | pindexState = pindex->pprev; | |
3656 | if (!fClean) { | |
3657 | nGoodTransactions = 0; | |
3658 | pindexFailure = pindex; | |
3659 | } else | |
3660 | nGoodTransactions += block.vtx.size(); | |
3661 | } | |
3662 | if (ShutdownRequested()) | |
3663 | return true; | |
3664 | } | |
3665 | if (pindexFailure) | |
3666 | return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); | |
3667 | ||
3668 | // check level 4: try reconnecting blocks | |
3669 | if (nCheckLevel >= 4) { | |
3670 | CBlockIndex *pindex = pindexState; | |
3671 | while (pindex != chainActive.Tip()) { | |
3672 | boost::this_thread::interruption_point(); | |
3673 | uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)))); | |
3674 | pindex = chainActive.Next(pindex); | |
3675 | CBlock block; | |
3676 | if (!ReadBlockFromDisk(block, pindex)) | |
3677 | return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
3678 | if (!ConnectBlock(block, state, pindex, coins)) | |
3679 | return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
3680 | } | |
3681 | } | |
3682 | ||
3683 | LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions); | |
3684 | ||
3685 | return true; | |
3686 | } | |
3687 | ||
3688 | void UnloadBlockIndex() | |
3689 | { | |
3690 | LOCK(cs_main); | |
3691 | setBlockIndexCandidates.clear(); | |
3692 | chainActive.SetTip(NULL); | |
3693 | pindexBestInvalid = NULL; | |
3694 | pindexBestHeader = NULL; | |
3695 | mempool.clear(); | |
3696 | mapOrphanTransactions.clear(); | |
3697 | mapOrphanTransactionsByPrev.clear(); | |
3698 | nSyncStarted = 0; | |
3699 | mapBlocksUnlinked.clear(); | |
3700 | vinfoBlockFile.clear(); | |
3701 | nLastBlockFile = 0; | |
3702 | nBlockSequenceId = 1; | |
3703 | mapBlockSource.clear(); | |
3704 | mapBlocksInFlight.clear(); | |
3705 | nQueuedValidatedHeaders = 0; | |
3706 | nPreferredDownload = 0; | |
3707 | setDirtyBlockIndex.clear(); | |
3708 | setDirtyFileInfo.clear(); | |
3709 | mapNodeState.clear(); | |
3710 | recentRejects.reset(NULL); | |
3711 | ||
3712 | BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) { | |
3713 | delete entry.second; | |
3714 | } | |
3715 | mapBlockIndex.clear(); | |
3716 | fHavePruned = false; | |
3717 | } | |
3718 | ||
3719 | bool LoadBlockIndex() | |
3720 | { | |
3721 | // Load block index from databases | |
3722 | if (!fReindex && !LoadBlockIndexDB()) | |
3723 | return false; | |
3724 | return true; | |
3725 | } | |
3726 | ||
3727 | ||
3728 | bool InitBlockIndex() { | |
3729 | const CChainParams& chainparams = Params(); | |
3730 | LOCK(cs_main); | |
3731 | ||
3732 | // Initialize global variables that cannot be constructed at startup. | |
3733 | recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); | |
3734 | ||
3735 | // Check whether we're already initialized | |
3736 | if (chainActive.Genesis() != NULL) | |
3737 | return true; | |
3738 | ||
3739 | // Use the provided setting for -txindex in the new database | |
3740 | fTxIndex = GetBoolArg("-txindex", false); | |
3741 | pblocktree->WriteFlag("txindex", fTxIndex); | |
3742 | LogPrintf("Initializing databases...\n"); | |
3743 | ||
3744 | // Only add the genesis block if not reindexing (in which case we reuse the one already on disk) | |
3745 | if (!fReindex) { | |
3746 | try { | |
3747 | CBlock &block = const_cast<CBlock&>(Params().GenesisBlock()); | |
3748 | // Start new block file | |
3749 | unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); | |
3750 | CDiskBlockPos blockPos; | |
3751 | CValidationState state; | |
3752 | if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) | |
3753 | return error("LoadBlockIndex(): FindBlockPos failed"); | |
3754 | if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) | |
3755 | return error("LoadBlockIndex(): writing genesis block to disk failed"); | |
3756 | CBlockIndex *pindex = AddToBlockIndex(block); | |
3757 | if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) | |
3758 | return error("LoadBlockIndex(): genesis block not accepted"); | |
3759 | if (!ActivateBestChain(state, &block)) | |
3760 | return error("LoadBlockIndex(): genesis block cannot be activated"); | |
3761 | // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data | |
3762 | return FlushStateToDisk(state, FLUSH_STATE_ALWAYS); | |
3763 | } catch (const std::runtime_error& e) { | |
3764 | return error("LoadBlockIndex(): failed to initialize block database: %s", e.what()); | |
3765 | } | |
3766 | } | |
3767 | ||
3768 | return true; | |
3769 | } | |
3770 | ||
3771 | ||
3772 | ||
3773 | bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) | |
3774 | { | |
3775 | const CChainParams& chainparams = Params(); | |
3776 | // Map of disk positions for blocks with unknown parent (only used for reindex) | |
3777 | static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent; | |
3778 | int64_t nStart = GetTimeMillis(); | |
3779 | ||
3780 | int nLoaded = 0; | |
3781 | try { | |
3782 | // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor | |
3783 | CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION); | |
3784 | uint64_t nRewind = blkdat.GetPos(); | |
3785 | while (!blkdat.eof()) { | |
3786 | boost::this_thread::interruption_point(); | |
3787 | ||
3788 | blkdat.SetPos(nRewind); | |
3789 | nRewind++; // start one byte further next time, in case of failure | |
3790 | blkdat.SetLimit(); // remove former limit | |
3791 | unsigned int nSize = 0; | |
3792 | try { | |
3793 | // locate a header | |
3794 | unsigned char buf[MESSAGE_START_SIZE]; | |
3795 | blkdat.FindByte(Params().MessageStart()[0]); | |
3796 | nRewind = blkdat.GetPos()+1; | |
3797 | blkdat >> FLATDATA(buf); | |
3798 | if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE)) | |
3799 | continue; | |
3800 | // read size | |
3801 | blkdat >> nSize; | |
3802 | if (nSize < 80 || nSize > MAX_BLOCK_SIZE) | |
3803 | continue; | |
3804 | } catch (const std::exception&) { | |
3805 | // no valid block header found; don't complain | |
3806 | break; | |
3807 | } | |
3808 | try { | |
3809 | // read block | |
3810 | uint64_t nBlockPos = blkdat.GetPos(); | |
3811 | if (dbp) | |
3812 | dbp->nPos = nBlockPos; | |
3813 | blkdat.SetLimit(nBlockPos + nSize); | |
3814 | blkdat.SetPos(nBlockPos); | |
3815 | CBlock block; | |
3816 | blkdat >> block; | |
3817 | nRewind = blkdat.GetPos(); | |
3818 | ||
3819 | // detect out of order blocks, and store them for later | |
3820 | uint256 hash = block.GetHash(); | |
3821 | if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { | |
3822 | LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), | |
3823 | block.hashPrevBlock.ToString()); | |
3824 | if (dbp) | |
3825 | mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp)); | |
3826 | continue; | |
3827 | } | |
3828 | ||
3829 | // process in case the block isn't known yet | |
3830 | if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { | |
3831 | CValidationState state; | |
3832 | if (ProcessNewBlock(state, NULL, &block, true, dbp)) | |
3833 | nLoaded++; | |
3834 | if (state.IsError()) | |
3835 | break; | |
3836 | } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) { | |
3837 | LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight); | |
3838 | } | |
3839 | ||
3840 | // Recursively process earlier encountered successors of this block | |
3841 | deque<uint256> queue; | |
3842 | queue.push_back(hash); | |
3843 | while (!queue.empty()) { | |
3844 | uint256 head = queue.front(); | |
3845 | queue.pop_front(); | |
3846 | std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head); | |
3847 | while (range.first != range.second) { | |
3848 | std::multimap<uint256, CDiskBlockPos>::iterator it = range.first; | |
3849 | if (ReadBlockFromDisk(block, it->second)) | |
3850 | { | |
3851 | LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(), | |
3852 | head.ToString()); | |
3853 | CValidationState dummy; | |
3854 | if (ProcessNewBlock(dummy, NULL, &block, true, &it->second)) | |
3855 | { | |
3856 | nLoaded++; | |
3857 | queue.push_back(block.GetHash()); | |
3858 | } | |
3859 | } | |
3860 | range.first++; | |
3861 | mapBlocksUnknownParent.erase(it); | |
3862 | } | |
3863 | } | |
3864 | } catch (const std::exception& e) { | |
3865 | LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what()); | |
3866 | } | |
3867 | } | |
3868 | } catch (const std::runtime_error& e) { | |
3869 | AbortNode(std::string("System error: ") + e.what()); | |
3870 | } | |
3871 | if (nLoaded > 0) | |
3872 | LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); | |
3873 | return nLoaded > 0; | |
3874 | } | |
3875 | ||
3876 | void static CheckBlockIndex() | |
3877 | { | |
3878 | const Consensus::Params& consensusParams = Params().GetConsensus(); | |
3879 | if (!fCheckBlockIndex) { | |
3880 | return; | |
3881 | } | |
3882 | ||
3883 | LOCK(cs_main); | |
3884 | ||
3885 | // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain, | |
3886 | // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when | |
3887 | // iterating the block tree require that chainActive has been initialized.) | |
3888 | if (chainActive.Height() < 0) { | |
3889 | assert(mapBlockIndex.size() <= 1); | |
3890 | return; | |
3891 | } | |
3892 | ||
3893 | // Build forward-pointing map of the entire block tree. | |
3894 | std::multimap<CBlockIndex*,CBlockIndex*> forward; | |
3895 | for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { | |
3896 | forward.insert(std::make_pair(it->second->pprev, it->second)); | |
3897 | } | |
3898 | ||
3899 | assert(forward.size() == mapBlockIndex.size()); | |
3900 | ||
3901 | std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL); | |
3902 | CBlockIndex *pindex = rangeGenesis.first->second; | |
3903 | rangeGenesis.first++; | |
3904 | assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL. | |
3905 | ||
3906 | // Iterate over the entire block tree, using depth-first search. | |
3907 | // Along the way, remember whether there are blocks on the path from genesis | |
3908 | // block being explored which are the first to have certain properties. | |
3909 | size_t nNodes = 0; | |
3910 | int nHeight = 0; | |
3911 | CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid. | |
3912 | CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. | |
3913 | CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0. | |
3914 | CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). | |
3915 | CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). | |
3916 | CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). | |
3917 | CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). | |
3918 | while (pindex != NULL) { | |
3919 | nNodes++; | |
3920 | if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; | |
3921 | if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; | |
3922 | if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; | |
3923 | if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; | |
3924 | if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex; | |
3925 | if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; | |
3926 | if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex; | |
3927 | ||
3928 | // Begin: actual consistency checks. | |
3929 | if (pindex->pprev == NULL) { | |
3930 | // Genesis block checks. | |
3931 | assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match. | |
3932 | assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block. | |
3933 | } | |
3934 | if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked | |
3935 | // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). | |
3936 | // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. | |
3937 | if (!fHavePruned) { | |
3938 | // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0 | |
3939 | assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); | |
3940 | assert(pindexFirstMissing == pindexFirstNeverProcessed); | |
3941 | } else { | |
3942 | // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0 | |
3943 | if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); | |
3944 | } | |
3945 | if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); | |
3946 | assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. | |
3947 | // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. | |
3948 | assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned). | |
3949 | assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0)); | |
3950 | assert(pindex->nHeight == nHeight); // nHeight must be consistent. | |
3951 | assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. | |
3952 | assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. | |
3953 | assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid | |
3954 | if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid | |
3955 | if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid | |
3956 | if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid | |
3957 | if (pindexFirstInvalid == NULL) { | |
3958 | // Checks for not-invalid blocks. | |
3959 | assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. | |
3960 | } | |
3961 | if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) { | |
3962 | if (pindexFirstInvalid == NULL) { | |
3963 | // If this block sorts at least as good as the current tip and | |
3964 | // is valid and we have all data for its parents, it must be in | |
3965 | // setBlockIndexCandidates. chainActive.Tip() must also be there | |
3966 | // even if some data has been pruned. | |
3967 | if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) { | |
3968 | assert(setBlockIndexCandidates.count(pindex)); | |
3969 | } | |
3970 | // If some parent is missing, then it could be that this block was in | |
3971 | // setBlockIndexCandidates but had to be removed because of the missing data. | |
3972 | // In this case it must be in mapBlocksUnlinked -- see test below. | |
3973 | } | |
3974 | } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates. | |
3975 | assert(setBlockIndexCandidates.count(pindex) == 0); | |
3976 | } | |
3977 | // Check whether this block is in mapBlocksUnlinked. | |
3978 | std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); | |
3979 | bool foundInUnlinked = false; | |
3980 | while (rangeUnlinked.first != rangeUnlinked.second) { | |
3981 | assert(rangeUnlinked.first->first == pindex->pprev); | |
3982 | if (rangeUnlinked.first->second == pindex) { | |
3983 | foundInUnlinked = true; | |
3984 | break; | |
3985 | } | |
3986 | rangeUnlinked.first++; | |
3987 | } | |
3988 | if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) { | |
3989 | // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked. | |
3990 | assert(foundInUnlinked); | |
3991 | } | |
3992 | if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA | |
3993 | if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. | |
3994 | if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) { | |
3995 | // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. | |
3996 | assert(fHavePruned); // We must have pruned. | |
3997 | // This block may have entered mapBlocksUnlinked if: | |
3998 | // - it has a descendant that at some point had more work than the | |
3999 | // tip, and | |
4000 | // - we tried switching to that descendant but were missing | |
4001 | // data for some intermediate block between chainActive and the | |
4002 | // tip. | |
4003 | // So if this block is itself better than chainActive.Tip() and it wasn't in | |
4004 | // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. | |
4005 | if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { | |
4006 | if (pindexFirstInvalid == NULL) { | |
4007 | assert(foundInUnlinked); | |
4008 | } | |
4009 | } | |
4010 | } | |
4011 | // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow | |
4012 | // End: actual consistency checks. | |
4013 | ||
4014 | // Try descending into the first subnode. | |
4015 | std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex); | |
4016 | if (range.first != range.second) { | |
4017 | // A subnode was found. | |
4018 | pindex = range.first->second; | |
4019 | nHeight++; | |
4020 | continue; | |
4021 | } | |
4022 | // This is a leaf node. | |
4023 | // Move upwards until we reach a node of which we have not yet visited the last child. | |
4024 | while (pindex) { | |
4025 | // We are going to either move to a parent or a sibling of pindex. | |
4026 | // If pindex was the first with a certain property, unset the corresponding variable. | |
4027 | if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL; | |
4028 | if (pindex == pindexFirstMissing) pindexFirstMissing = NULL; | |
4029 | if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL; | |
4030 | if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL; | |
4031 | if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL; | |
4032 | if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL; | |
4033 | if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL; | |
4034 | // Find our parent. | |
4035 | CBlockIndex* pindexPar = pindex->pprev; | |
4036 | // Find which child we just visited. | |
4037 | std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar); | |
4038 | while (rangePar.first->second != pindex) { | |
4039 | assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child. | |
4040 | rangePar.first++; | |
4041 | } | |
4042 | // Proceed to the next one. | |
4043 | rangePar.first++; | |
4044 | if (rangePar.first != rangePar.second) { | |
4045 | // Move to the sibling. | |
4046 | pindex = rangePar.first->second; | |
4047 | break; | |
4048 | } else { | |
4049 | // Move up further. | |
4050 | pindex = pindexPar; | |
4051 | nHeight--; | |
4052 | continue; | |
4053 | } | |
4054 | } | |
4055 | } | |
4056 | ||
4057 | // Check that we actually traversed the entire map. | |
4058 | assert(nNodes == forward.size()); | |
4059 | } | |
4060 | ||
4061 | ////////////////////////////////////////////////////////////////////////////// | |
4062 | // | |
4063 | // CAlert | |
4064 | // | |
4065 | ||
4066 | std::string GetWarnings(const std::string& strFor) | |
4067 | { | |
4068 | int nPriority = 0; | |
4069 | string strStatusBar; | |
4070 | string strRPC; | |
4071 | ||
4072 | if (!CLIENT_VERSION_IS_RELEASE) | |
4073 | strStatusBar = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications"); | |
4074 | ||
4075 | if (GetBoolArg("-testsafemode", false)) | |
4076 | strStatusBar = strRPC = "testsafemode enabled"; | |
4077 | ||
4078 | // Misc warnings like out of disk space and clock is wrong | |
4079 | if (strMiscWarning != "") | |
4080 | { | |
4081 | nPriority = 1000; | |
4082 | strStatusBar = strMiscWarning; | |
4083 | } | |
4084 | ||
4085 | if (fLargeWorkForkFound) | |
4086 | { | |
4087 | nPriority = 2000; | |
4088 | strStatusBar = strRPC = _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues."); | |
4089 | } | |
4090 | else if (fLargeWorkInvalidChainFound) | |
4091 | { | |
4092 | nPriority = 2000; | |
4093 | strStatusBar = strRPC = _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade."); | |
4094 | } | |
4095 | ||
4096 | // Alerts | |
4097 | { | |
4098 | LOCK(cs_mapAlerts); | |
4099 | BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts) | |
4100 | { | |
4101 | const CAlert& alert = item.second; | |
4102 | if (alert.AppliesToMe() && alert.nPriority > nPriority) | |
4103 | { | |
4104 | nPriority = alert.nPriority; | |
4105 | strStatusBar = alert.strStatusBar; | |
4106 | if (alert.nPriority >= ALERT_PRIORITY_SAFE_MODE) { | |
4107 | strRPC = alert.strRPCError; | |
4108 | } | |
4109 | } | |
4110 | } | |
4111 | } | |
4112 | ||
4113 | if (strFor == "statusbar") | |
4114 | return strStatusBar; | |
4115 | else if (strFor == "rpc") | |
4116 | return strRPC; | |
4117 | assert(!"GetWarnings(): invalid parameter"); | |
4118 | return "error"; | |
4119 | } | |
4120 | ||
4121 | ||
4122 | ||
4123 | ||
4124 | ||
4125 | ||
4126 | ||
4127 | ||
4128 | ////////////////////////////////////////////////////////////////////////////// | |
4129 | // | |
4130 | // Messages | |
4131 | // | |
4132 | ||
4133 | ||
4134 | bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | |
4135 | { | |
4136 | switch (inv.type) | |
4137 | { | |
4138 | case MSG_TX: | |
4139 | { | |
4140 | assert(recentRejects); | |
4141 | if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip) | |
4142 | { | |
4143 | // If the chain tip has changed previously rejected transactions | |
4144 | // might be now valid, e.g. due to a nLockTime'd tx becoming valid, | |
4145 | // or a double-spend. Reset the rejects filter and give those | |
4146 | // txs a second chance. | |
4147 | hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash(); | |
4148 | recentRejects->reset(); | |
4149 | } | |
4150 | ||
4151 | return recentRejects->contains(inv.hash) || | |
4152 | mempool.exists(inv.hash) || | |
4153 | mapOrphanTransactions.count(inv.hash) || | |
4154 | pcoinsTip->HaveCoins(inv.hash); | |
4155 | } | |
4156 | case MSG_BLOCK: | |
4157 | return mapBlockIndex.count(inv.hash); | |
4158 | } | |
4159 | // Don't know what it is, just say we already got one | |
4160 | return true; | |
4161 | } | |
4162 | ||
4163 | void static ProcessGetData(CNode* pfrom) | |
4164 | { | |
4165 | std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); | |
4166 | ||
4167 | vector<CInv> vNotFound; | |
4168 | ||
4169 | LOCK(cs_main); | |
4170 | ||
4171 | while (it != pfrom->vRecvGetData.end()) { | |
4172 | // Don't bother if send buffer is too full to respond anyway | |
4173 | if (pfrom->nSendSize >= SendBufferSize()) | |
4174 | break; | |
4175 | ||
4176 | const CInv &inv = *it; | |
4177 | { | |
4178 | boost::this_thread::interruption_point(); | |
4179 | it++; | |
4180 | ||
4181 | if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK) | |
4182 | { | |
4183 | bool send = false; | |
4184 | BlockMap::iterator mi = mapBlockIndex.find(inv.hash); | |
4185 | if (mi != mapBlockIndex.end()) | |
4186 | { | |
4187 | if (chainActive.Contains(mi->second)) { | |
4188 | send = true; | |
4189 | } else { | |
4190 | static const int nOneMonth = 30 * 24 * 60 * 60; | |
4191 | // To prevent fingerprinting attacks, only send blocks outside of the active | |
4192 | // chain if they are valid, and no more than a month older (both in time, and in | |
4193 | // best equivalent proof of work) than the best header chain we know about. | |
4194 | send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) && | |
4195 | (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) && | |
4196 | (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth); | |
4197 | if (!send) { | |
4198 | LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); | |
4199 | } | |
4200 | } | |
4201 | } | |
4202 | // Pruned nodes may have deleted the block, so check whether | |
4203 | // it's available before trying to send. | |
4204 | if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) | |
4205 | { | |
4206 | // Send block from disk | |
4207 | CBlock block; | |
4208 | if (!ReadBlockFromDisk(block, (*mi).second)) | |
4209 | assert(!"cannot load block from disk"); | |
4210 | if (inv.type == MSG_BLOCK) | |
4211 | pfrom->PushMessage("block", block); | |
4212 | else // MSG_FILTERED_BLOCK) | |
4213 | { | |
4214 | LOCK(pfrom->cs_filter); | |
4215 | if (pfrom->pfilter) | |
4216 | { | |
4217 | CMerkleBlock merkleBlock(block, *pfrom->pfilter); | |
4218 | pfrom->PushMessage("merkleblock", merkleBlock); | |
4219 | // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see | |
4220 | // This avoids hurting performance by pointlessly requiring a round-trip | |
4221 | // Note that there is currently no way for a node to request any single transactions we didn't send here - | |
4222 | // they must either disconnect and retry or request the full block. | |
4223 | // Thus, the protocol spec specified allows for us to provide duplicate txn here, | |
4224 | // however we MUST always provide at least what the remote peer needs | |
4225 | typedef std::pair<unsigned int, uint256> PairType; | |
4226 | BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn) | |
4227 | if (!pfrom->setInventoryKnown.count(CInv(MSG_TX, pair.second))) | |
4228 | pfrom->PushMessage("tx", block.vtx[pair.first]); | |
4229 | } | |
4230 | // else | |
4231 | // no response | |
4232 | } | |
4233 | ||
4234 | // Trigger the peer node to send a getblocks request for the next batch of inventory | |
4235 | if (inv.hash == pfrom->hashContinue) | |
4236 | { | |
4237 | // Bypass PushInventory, this must send even if redundant, | |
4238 | // and we want it right after the last block so they don't | |
4239 | // wait for other stuff first. | |
4240 | vector<CInv> vInv; | |
4241 | vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); | |
4242 | pfrom->PushMessage("inv", vInv); | |
4243 | pfrom->hashContinue.SetNull(); | |
4244 | } | |
4245 | } | |
4246 | } | |
4247 | else if (inv.IsKnownType()) | |
4248 | { | |
4249 | // Send stream from relay memory | |
4250 | bool pushed = false; | |
4251 | { | |
4252 | LOCK(cs_mapRelay); | |
4253 | map<CInv, CDataStream>::iterator mi = mapRelay.find(inv); | |
4254 | if (mi != mapRelay.end()) { | |
4255 | pfrom->PushMessage(inv.GetCommand(), (*mi).second); | |
4256 | pushed = true; | |
4257 | } | |
4258 | } | |
4259 | if (!pushed && inv.type == MSG_TX) { | |
4260 | CTransaction tx; | |
4261 | if (mempool.lookup(inv.hash, tx)) { | |
4262 | CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); | |
4263 | ss.reserve(1000); | |
4264 | ss << tx; | |
4265 | pfrom->PushMessage("tx", ss); | |
4266 | pushed = true; | |
4267 | } | |
4268 | } | |
4269 | if (!pushed) { | |
4270 | vNotFound.push_back(inv); | |
4271 | } | |
4272 | } | |
4273 | ||
4274 | // Track requests for our stuff. | |
4275 | GetMainSignals().Inventory(inv.hash); | |
4276 | ||
4277 | if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK) | |
4278 | break; | |
4279 | } | |
4280 | } | |
4281 | ||
4282 | pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it); | |
4283 | ||
4284 | if (!vNotFound.empty()) { | |
4285 | // Let the peer know that we didn't find what it asked for, so it doesn't | |
4286 | // have to wait around forever. Currently only SPV clients actually care | |
4287 | // about this message: it's needed when they are recursively walking the | |
4288 | // dependencies of relevant unconfirmed transactions. SPV clients want to | |
4289 | // do that because they want to know about (and store and rebroadcast and | |
4290 | // risk analyze) the dependencies of transactions relevant to them, without | |
4291 | // having to download the entire memory pool. | |
4292 | pfrom->PushMessage("notfound", vNotFound); | |
4293 | } | |
4294 | } | |
4295 | ||
4296 | bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived) | |
4297 | { | |
4298 | const CChainParams& chainparams = Params(); | |
4299 | LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); | |
4300 | if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0) | |
4301 | { | |
4302 | LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); | |
4303 | return true; | |
4304 | } | |
4305 | ||
4306 | ||
4307 | ||
4308 | ||
4309 | if (strCommand == "version") | |
4310 | { | |
4311 | // Each connection can only send one version message | |
4312 | if (pfrom->nVersion != 0) | |
4313 | { | |
4314 | pfrom->PushMessage("reject", strCommand, REJECT_DUPLICATE, string("Duplicate version message")); | |
4315 | Misbehaving(pfrom->GetId(), 1); | |
4316 | return false; | |
4317 | } | |
4318 | ||
4319 | int64_t nTime; | |
4320 | CAddress addrMe; | |
4321 | CAddress addrFrom; | |
4322 | uint64_t nNonce = 1; | |
4323 | vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe; | |
4324 | if (pfrom->nVersion < MIN_PEER_PROTO_VERSION) | |
4325 | { | |
4326 | // disconnect from peers older than this proto version | |
4327 | LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion); | |
4328 | pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE, | |
4329 | strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)); | |
4330 | pfrom->fDisconnect = true; | |
4331 | return false; | |
4332 | } | |
4333 | ||
4334 | if (pfrom->nVersion == 10300) | |
4335 | pfrom->nVersion = 300; | |
4336 | if (!vRecv.empty()) | |
4337 | vRecv >> addrFrom >> nNonce; | |
4338 | if (!vRecv.empty()) { | |
4339 | vRecv >> LIMITED_STRING(pfrom->strSubVer, 256); | |
4340 | pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer); | |
4341 | } | |
4342 | if (!vRecv.empty()) | |
4343 | vRecv >> pfrom->nStartingHeight; | |
4344 | if (!vRecv.empty()) | |
4345 | vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message | |
4346 | else | |
4347 | pfrom->fRelayTxes = true; | |
4348 | ||
4349 | // Disconnect if we connected to ourself | |
4350 | if (nNonce == nLocalHostNonce && nNonce > 1) | |
4351 | { | |
4352 | LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString()); | |
4353 | pfrom->fDisconnect = true; | |
4354 | return true; | |
4355 | } | |
4356 | ||
4357 | pfrom->addrLocal = addrMe; | |
4358 | if (pfrom->fInbound && addrMe.IsRoutable()) | |
4359 | { | |
4360 | SeenLocal(addrMe); | |
4361 | } | |
4362 | ||
4363 | // Be shy and don't send version until we hear | |
4364 | if (pfrom->fInbound) | |
4365 | pfrom->PushVersion(); | |
4366 | ||
4367 | pfrom->fClient = !(pfrom->nServices & NODE_NETWORK); | |
4368 | ||
4369 | // Potentially mark this peer as a preferred download peer. | |
4370 | UpdatePreferredDownload(pfrom, State(pfrom->GetId())); | |
4371 | ||
4372 | // Change version | |
4373 | pfrom->PushMessage("verack"); | |
4374 | pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); | |
4375 | ||
4376 | if (!pfrom->fInbound) | |
4377 | { | |
4378 | // Advertise our address | |
4379 | if (fListen && !IsInitialBlockDownload()) | |
4380 | { | |
4381 | CAddress addr = GetLocalAddress(&pfrom->addr); | |
4382 | if (addr.IsRoutable()) | |
4383 | { | |
4384 | pfrom->PushAddress(addr); | |
4385 | } else if (IsPeerAddrLocalGood(pfrom)) { | |
4386 | addr.SetIP(pfrom->addrLocal); | |
4387 | pfrom->PushAddress(addr); | |
4388 | } | |
4389 | } | |
4390 | ||
4391 | // Get recent addresses | |
4392 | if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000) | |
4393 | { | |
4394 | pfrom->PushMessage("getaddr"); | |
4395 | pfrom->fGetAddr = true; | |
4396 | } | |
4397 | addrman.Good(pfrom->addr); | |
4398 | } else { | |
4399 | if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom) | |
4400 | { | |
4401 | addrman.Add(addrFrom, addrFrom); | |
4402 | addrman.Good(addrFrom); | |
4403 | } | |
4404 | } | |
4405 | ||
4406 | // Relay alerts | |
4407 | { | |
4408 | LOCK(cs_mapAlerts); | |
4409 | BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts) | |
4410 | item.second.RelayTo(pfrom); | |
4411 | } | |
4412 | ||
4413 | pfrom->fSuccessfullyConnected = true; | |
4414 | ||
4415 | string remoteAddr; | |
4416 | if (fLogIPs) | |
4417 | remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); | |
4418 | ||
4419 | LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n", | |
4420 | pfrom->cleanSubVer, pfrom->nVersion, | |
4421 | pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, | |
4422 | remoteAddr); | |
4423 | ||
4424 | int64_t nTimeOffset = nTime - GetTime(); | |
4425 | pfrom->nTimeOffset = nTimeOffset; | |
4426 | AddTimeData(pfrom->addr, nTimeOffset); | |
4427 | } | |
4428 | ||
4429 | ||
4430 | else if (pfrom->nVersion == 0) | |
4431 | { | |
4432 | // Must have a version message before anything else | |
4433 | Misbehaving(pfrom->GetId(), 1); | |
4434 | return false; | |
4435 | } | |
4436 | ||
4437 | ||
4438 | else if (strCommand == "verack") | |
4439 | { | |
4440 | pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); | |
4441 | ||
4442 | // Mark this node as currently connected, so we update its timestamp later. | |
4443 | if (pfrom->fNetworkNode) { | |
4444 | LOCK(cs_main); | |
4445 | State(pfrom->GetId())->fCurrentlyConnected = true; | |
4446 | } | |
4447 | } | |
4448 | ||
4449 | ||
4450 | else if (strCommand == "addr") | |
4451 | { | |
4452 | vector<CAddress> vAddr; | |
4453 | vRecv >> vAddr; | |
4454 | ||
4455 | // Don't want addr from older versions unless seeding | |
4456 | if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000) | |
4457 | return true; | |
4458 | if (vAddr.size() > 1000) | |
4459 | { | |
4460 | Misbehaving(pfrom->GetId(), 20); | |
4461 | return error("message addr size() = %u", vAddr.size()); | |
4462 | } | |
4463 | ||
4464 | // Store the new addresses | |
4465 | vector<CAddress> vAddrOk; | |
4466 | int64_t nNow = GetAdjustedTime(); | |
4467 | int64_t nSince = nNow - 10 * 60; | |
4468 | BOOST_FOREACH(CAddress& addr, vAddr) | |
4469 | { | |
4470 | boost::this_thread::interruption_point(); | |
4471 | ||
4472 | if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) | |
4473 | addr.nTime = nNow - 5 * 24 * 60 * 60; | |
4474 | pfrom->AddAddressKnown(addr); | |
4475 | bool fReachable = IsReachable(addr); | |
4476 | if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) | |
4477 | { | |
4478 | // Relay to a limited number of other nodes | |
4479 | { | |
4480 | LOCK(cs_vNodes); | |
4481 | // Use deterministic randomness to send to the same nodes for 24 hours | |
4482 | // at a time so the addrKnowns of the chosen nodes prevent repeats | |
4483 | static uint256 hashSalt; | |
4484 | if (hashSalt.IsNull()) | |
4485 | hashSalt = GetRandHash(); | |
4486 | uint64_t hashAddr = addr.GetHash(); | |
4487 | uint256 hashRand = ArithToUint256(UintToArith256(hashSalt) ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60))); | |
4488 | hashRand = Hash(BEGIN(hashRand), END(hashRand)); | |
4489 | multimap<uint256, CNode*> mapMix; | |
4490 | BOOST_FOREACH(CNode* pnode, vNodes) | |
4491 | { | |
4492 | if (pnode->nVersion < CADDR_TIME_VERSION) | |
4493 | continue; | |
4494 | unsigned int nPointer; | |
4495 | memcpy(&nPointer, &pnode, sizeof(nPointer)); | |
4496 | uint256 hashKey = ArithToUint256(UintToArith256(hashRand) ^ nPointer); | |
4497 | hashKey = Hash(BEGIN(hashKey), END(hashKey)); | |
4498 | mapMix.insert(make_pair(hashKey, pnode)); | |
4499 | } | |
4500 | int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s) | |
4501 | for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi) | |
4502 | ((*mi).second)->PushAddress(addr); | |
4503 | } | |
4504 | } | |
4505 | // Do not store addresses outside our network | |
4506 | if (fReachable) | |
4507 | vAddrOk.push_back(addr); | |
4508 | } | |
4509 | addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60); | |
4510 | if (vAddr.size() < 1000) | |
4511 | pfrom->fGetAddr = false; | |
4512 | if (pfrom->fOneShot) | |
4513 | pfrom->fDisconnect = true; | |
4514 | } | |
4515 | ||
4516 | ||
4517 | else if (strCommand == "inv") | |
4518 | { | |
4519 | vector<CInv> vInv; | |
4520 | vRecv >> vInv; | |
4521 | if (vInv.size() > MAX_INV_SZ) | |
4522 | { | |
4523 | Misbehaving(pfrom->GetId(), 20); | |
4524 | return error("message inv size() = %u", vInv.size()); | |
4525 | } | |
4526 | ||
4527 | LOCK(cs_main); | |
4528 | ||
4529 | std::vector<CInv> vToFetch; | |
4530 | ||
4531 | for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) | |
4532 | { | |
4533 | const CInv &inv = vInv[nInv]; | |
4534 | ||
4535 | boost::this_thread::interruption_point(); | |
4536 | pfrom->AddInventoryKnown(inv); | |
4537 | ||
4538 | bool fAlreadyHave = AlreadyHave(inv); | |
4539 | LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id); | |
4540 | ||
4541 | if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK) | |
4542 | pfrom->AskFor(inv); | |
4543 | ||
4544 | if (inv.type == MSG_BLOCK) { | |
4545 | UpdateBlockAvailability(pfrom->GetId(), inv.hash); | |
4546 | if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { | |
4547 | // First request the headers preceding the announced block. In the normal fully-synced | |
4548 | // case where a new block is announced that succeeds the current tip (no reorganization), | |
4549 | // there are no such headers. | |
4550 | // Secondly, and only when we are close to being synced, we request the announced block directly, | |
4551 | // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the | |
4552 | // time the block arrives, the header chain leading up to it is already validated. Not | |
4553 | // doing this will result in the received block being rejected as an orphan in case it is | |
4554 | // not a direct successor. | |
4555 | pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash); | |
4556 | CNodeState *nodestate = State(pfrom->GetId()); | |
4557 | if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 && | |
4558 | nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | |
4559 | vToFetch.push_back(inv); | |
4560 | // Mark block as in flight already, even though the actual "getdata" message only goes out | |
4561 | // later (within the same cs_main lock, though). | |
4562 | MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus()); | |
4563 | } | |
4564 | LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); | |
4565 | } | |
4566 | } | |
4567 | ||
4568 | // Track requests for our stuff | |
4569 | GetMainSignals().Inventory(inv.hash); | |
4570 | ||
4571 | if (pfrom->nSendSize > (SendBufferSize() * 2)) { | |
4572 | Misbehaving(pfrom->GetId(), 50); | |
4573 | return error("send buffer size() = %u", pfrom->nSendSize); | |
4574 | } | |
4575 | } | |
4576 | ||
4577 | if (!vToFetch.empty()) | |
4578 | pfrom->PushMessage("getdata", vToFetch); | |
4579 | } | |
4580 | ||
4581 | ||
4582 | else if (strCommand == "getdata") | |
4583 | { | |
4584 | vector<CInv> vInv; | |
4585 | vRecv >> vInv; | |
4586 | if (vInv.size() > MAX_INV_SZ) | |
4587 | { | |
4588 | Misbehaving(pfrom->GetId(), 20); | |
4589 | return error("message getdata size() = %u", vInv.size()); | |
4590 | } | |
4591 | ||
4592 | if (fDebug || (vInv.size() != 1)) | |
4593 | LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id); | |
4594 | ||
4595 | if ((fDebug && vInv.size() > 0) || (vInv.size() == 1)) | |
4596 | LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id); | |
4597 | ||
4598 | pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end()); | |
4599 | ProcessGetData(pfrom); | |
4600 | } | |
4601 | ||
4602 | ||
4603 | else if (strCommand == "getblocks") | |
4604 | { | |
4605 | CBlockLocator locator; | |
4606 | uint256 hashStop; | |
4607 | vRecv >> locator >> hashStop; | |
4608 | ||
4609 | LOCK(cs_main); | |
4610 | ||
4611 | // Find the last block the caller has in the main chain | |
4612 | CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator); | |
4613 | ||
4614 | // Send the rest of the chain | |
4615 | if (pindex) | |
4616 | pindex = chainActive.Next(pindex); | |
4617 | int nLimit = 500; | |
4618 | LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id); | |
4619 | for (; pindex; pindex = chainActive.Next(pindex)) | |
4620 | { | |
4621 | if (pindex->GetBlockHash() == hashStop) | |
4622 | { | |
4623 | LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
4624 | break; | |
4625 | } | |
4626 | pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); | |
4627 | if (--nLimit <= 0) | |
4628 | { | |
4629 | // When this block is requested, we'll send an inv that'll | |
4630 | // trigger the peer to getblocks the next batch of inventory. | |
4631 | LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | |
4632 | pfrom->hashContinue = pindex->GetBlockHash(); | |
4633 | break; | |
4634 | } | |
4635 | } | |
4636 | } | |
4637 | ||
4638 | ||
4639 | else if (strCommand == "getheaders") | |
4640 | { | |
4641 | CBlockLocator locator; | |
4642 | uint256 hashStop; | |
4643 | vRecv >> locator >> hashStop; | |
4644 | ||
4645 | LOCK(cs_main); | |
4646 | ||
4647 | if (IsInitialBlockDownload()) | |
4648 | return true; | |
4649 | ||
4650 | CBlockIndex* pindex = NULL; | |
4651 | if (locator.IsNull()) | |
4652 | { | |
4653 | // If locator is null, return the hashStop block | |
4654 | BlockMap::iterator mi = mapBlockIndex.find(hashStop); | |
4655 | if (mi == mapBlockIndex.end()) | |
4656 | return true; | |
4657 | pindex = (*mi).second; | |
4658 | } | |
4659 | else | |
4660 | { | |
4661 | // Find the last block the caller has in the main chain | |
4662 | pindex = FindForkInGlobalIndex(chainActive, locator); | |
4663 | if (pindex) | |
4664 | pindex = chainActive.Next(pindex); | |
4665 | } | |
4666 | ||
4667 | // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end | |
4668 | vector<CBlock> vHeaders; | |
4669 | int nLimit = MAX_HEADERS_RESULTS; | |
4670 | LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id); | |
4671 | for (; pindex; pindex = chainActive.Next(pindex)) | |
4672 | { | |
4673 | vHeaders.push_back(pindex->GetBlockHeader()); | |
4674 | if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) | |
4675 | break; | |
4676 | } | |
4677 | pfrom->PushMessage("headers", vHeaders); | |
4678 | } | |
4679 | ||
4680 | ||
4681 | else if (strCommand == "tx") | |
4682 | { | |
4683 | vector<uint256> vWorkQueue; | |
4684 | vector<uint256> vEraseQueue; | |
4685 | CTransaction tx; | |
4686 | vRecv >> tx; | |
4687 | ||
4688 | CInv inv(MSG_TX, tx.GetHash()); | |
4689 | pfrom->AddInventoryKnown(inv); | |
4690 | ||
4691 | LOCK(cs_main); | |
4692 | ||
4693 | bool fMissingInputs = false; | |
4694 | CValidationState state; | |
4695 | ||
4696 | pfrom->setAskFor.erase(inv.hash); | |
4697 | mapAlreadyAskedFor.erase(inv); | |
4698 | ||
4699 | if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) | |
4700 | { | |
4701 | mempool.check(pcoinsTip); | |
4702 | RelayTransaction(tx); | |
4703 | vWorkQueue.push_back(inv.hash); | |
4704 | ||
4705 | LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s: accepted %s (poolsz %u)\n", | |
4706 | pfrom->id, pfrom->cleanSubVer, | |
4707 | tx.GetHash().ToString(), | |
4708 | mempool.mapTx.size()); | |
4709 | ||
4710 | // Recursively process any orphan transactions that depended on this one | |
4711 | set<NodeId> setMisbehaving; | |
4712 | for (unsigned int i = 0; i < vWorkQueue.size(); i++) | |
4713 | { | |
4714 | map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]); | |
4715 | if (itByPrev == mapOrphanTransactionsByPrev.end()) | |
4716 | continue; | |
4717 | for (set<uint256>::iterator mi = itByPrev->second.begin(); | |
4718 | mi != itByPrev->second.end(); | |
4719 | ++mi) | |
4720 | { | |
4721 | const uint256& orphanHash = *mi; | |
4722 | const CTransaction& orphanTx = mapOrphanTransactions[orphanHash].tx; | |
4723 | NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer; | |
4724 | bool fMissingInputs2 = false; | |
4725 | // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan | |
4726 | // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get | |
4727 | // anyone relaying LegitTxX banned) | |
4728 | CValidationState stateDummy; | |
4729 | ||
4730 | ||
4731 | if (setMisbehaving.count(fromPeer)) | |
4732 | continue; | |
4733 | if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) | |
4734 | { | |
4735 | LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString()); | |
4736 | RelayTransaction(orphanTx); | |
4737 | vWorkQueue.push_back(orphanHash); | |
4738 | vEraseQueue.push_back(orphanHash); | |
4739 | } | |
4740 | else if (!fMissingInputs2) | |
4741 | { | |
4742 | int nDos = 0; | |
4743 | if (stateDummy.IsInvalid(nDos) && nDos > 0) | |
4744 | { | |
4745 | // Punish peer that gave us an invalid orphan tx | |
4746 | Misbehaving(fromPeer, nDos); | |
4747 | setMisbehaving.insert(fromPeer); | |
4748 | LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString()); | |
4749 | } | |
4750 | // Has inputs but not accepted to mempool | |
4751 | // Probably non-standard or insufficient fee/priority | |
4752 | LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString()); | |
4753 | vEraseQueue.push_back(orphanHash); | |
4754 | assert(recentRejects); | |
4755 | recentRejects->insert(orphanHash); | |
4756 | } | |
4757 | mempool.check(pcoinsTip); | |
4758 | } | |
4759 | } | |
4760 | ||
4761 | BOOST_FOREACH(uint256 hash, vEraseQueue) | |
4762 | EraseOrphanTx(hash); | |
4763 | } | |
4764 | // TODO: currently, prohibit joinsplits from entering mapOrphans | |
4765 | else if (fMissingInputs && tx.vjoinsplit.size() == 0) | |
4766 | { | |
4767 | AddOrphanTx(tx, pfrom->GetId()); | |
4768 | ||
4769 | // DoS prevention: do not allow mapOrphanTransactions to grow unbounded | |
4770 | unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); | |
4771 | unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); | |
4772 | if (nEvicted > 0) | |
4773 | LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted); | |
4774 | } else { | |
4775 | assert(recentRejects); | |
4776 | recentRejects->insert(tx.GetHash()); | |
4777 | ||
4778 | if (pfrom->fWhitelisted) { | |
4779 | // Always relay transactions received from whitelisted peers, even | |
4780 | // if they were already in the mempool or rejected from it due | |
4781 | // to policy, allowing the node to function as a gateway for | |
4782 | // nodes hidden behind it. | |
4783 | // | |
4784 | // Never relay transactions that we would assign a non-zero DoS | |
4785 | // score for, as we expect peers to do the same with us in that | |
4786 | // case. | |
4787 | int nDoS = 0; | |
4788 | if (!state.IsInvalid(nDoS) || nDoS == 0) { | |
4789 | LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id); | |
4790 | RelayTransaction(tx); | |
4791 | } else { | |
4792 | LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s (code %d))\n", | |
4793 | tx.GetHash().ToString(), pfrom->id, state.GetRejectReason(), state.GetRejectCode()); | |
4794 | } | |
4795 | } | |
4796 | } | |
4797 | int nDoS = 0; | |
4798 | if (state.IsInvalid(nDoS)) | |
4799 | { | |
4800 | LogPrint("mempool", "%s from peer=%d %s was not accepted into the memory pool: %s\n", tx.GetHash().ToString(), | |
4801 | pfrom->id, pfrom->cleanSubVer, | |
4802 | state.GetRejectReason()); | |
4803 | pfrom->PushMessage("reject", strCommand, state.GetRejectCode(), | |
4804 | state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash); | |
4805 | if (nDoS > 0) | |
4806 | Misbehaving(pfrom->GetId(), nDoS); | |
4807 | } | |
4808 | } | |
4809 | ||
4810 | ||
4811 | else if (strCommand == "headers" && !fImporting && !fReindex) // Ignore headers received while importing | |
4812 | { | |
4813 | std::vector<CBlockHeader> headers; | |
4814 | ||
4815 | // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. | |
4816 | unsigned int nCount = ReadCompactSize(vRecv); | |
4817 | if (nCount > MAX_HEADERS_RESULTS) { | |
4818 | Misbehaving(pfrom->GetId(), 20); | |
4819 | return error("headers message size = %u", nCount); | |
4820 | } | |
4821 | headers.resize(nCount); | |
4822 | for (unsigned int n = 0; n < nCount; n++) { | |
4823 | vRecv >> headers[n]; | |
4824 | ReadCompactSize(vRecv); // ignore tx count; assume it is 0. | |
4825 | } | |
4826 | ||
4827 | LOCK(cs_main); | |
4828 | ||
4829 | if (nCount == 0) { | |
4830 | // Nothing interesting. Stop asking this peers for more headers. | |
4831 | return true; | |
4832 | } | |
4833 | ||
4834 | CBlockIndex *pindexLast = NULL; | |
4835 | BOOST_FOREACH(const CBlockHeader& header, headers) { | |
4836 | CValidationState state; | |
4837 | if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) { | |
4838 | Misbehaving(pfrom->GetId(), 20); | |
4839 | return error("non-continuous headers sequence"); | |
4840 | } | |
4841 | if (!AcceptBlockHeader(header, state, &pindexLast)) { | |
4842 | int nDoS; | |
4843 | if (state.IsInvalid(nDoS)) { | |
4844 | if (nDoS > 0) | |
4845 | Misbehaving(pfrom->GetId(), nDoS); | |
4846 | return error("invalid header received"); | |
4847 | } | |
4848 | } | |
4849 | } | |
4850 | ||
4851 | if (pindexLast) | |
4852 | UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); | |
4853 | ||
4854 | if (nCount == MAX_HEADERS_RESULTS && pindexLast) { | |
4855 | // Headers message had its maximum size; the peer may have more headers. | |
4856 | // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue | |
4857 | // from there instead. | |
4858 | LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); | |
4859 | pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256()); | |
4860 | } | |
4861 | ||
4862 | CheckBlockIndex(); | |
4863 | } | |
4864 | ||
4865 | else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing | |
4866 | { | |
4867 | CBlock block; | |
4868 | vRecv >> block; | |
4869 | ||
4870 | CInv inv(MSG_BLOCK, block.GetHash()); | |
4871 | LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id); | |
4872 | ||
4873 | pfrom->AddInventoryKnown(inv); | |
4874 | ||
4875 | CValidationState state; | |
4876 | // Process all blocks from whitelisted peers, even if not requested, | |
4877 | // unless we're still syncing with the network. | |
4878 | // Such an unrequested block may still be processed, subject to the | |
4879 | // conditions in AcceptBlock(). | |
4880 | bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload(); | |
4881 | ProcessNewBlock(state, pfrom, &block, forceProcessing, NULL); | |
4882 | int nDoS; | |
4883 | if (state.IsInvalid(nDoS)) { | |
4884 | pfrom->PushMessage("reject", strCommand, state.GetRejectCode(), | |
4885 | state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash); | |
4886 | if (nDoS > 0) { | |
4887 | LOCK(cs_main); | |
4888 | Misbehaving(pfrom->GetId(), nDoS); | |
4889 | } | |
4890 | } | |
4891 | ||
4892 | } | |
4893 | ||
4894 | ||
4895 | // This asymmetric behavior for inbound and outbound connections was introduced | |
4896 | // to prevent a fingerprinting attack: an attacker can send specific fake addresses | |
4897 | // to users' AddrMan and later request them by sending getaddr messages. | |
4898 | // Making nodes which are behind NAT and can only make outgoing connections ignore | |
4899 | // the getaddr message mitigates the attack. | |
4900 | else if ((strCommand == "getaddr") && (pfrom->fInbound)) | |
4901 | { | |
4902 | // Only send one GetAddr response per connection to reduce resource waste | |
4903 | // and discourage addr stamping of INV announcements. | |
4904 | if (pfrom->fSentAddr) { | |
4905 | LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id); | |
4906 | return true; | |
4907 | } | |
4908 | pfrom->fSentAddr = true; | |
4909 | ||
4910 | pfrom->vAddrToSend.clear(); | |
4911 | vector<CAddress> vAddr = addrman.GetAddr(); | |
4912 | BOOST_FOREACH(const CAddress &addr, vAddr) | |
4913 | pfrom->PushAddress(addr); | |
4914 | } | |
4915 | ||
4916 | ||
4917 | else if (strCommand == "mempool") | |
4918 | { | |
4919 | LOCK2(cs_main, pfrom->cs_filter); | |
4920 | ||
4921 | std::vector<uint256> vtxid; | |
4922 | mempool.queryHashes(vtxid); | |
4923 | vector<CInv> vInv; | |
4924 | BOOST_FOREACH(uint256& hash, vtxid) { | |
4925 | CInv inv(MSG_TX, hash); | |
4926 | CTransaction tx; | |
4927 | bool fInMemPool = mempool.lookup(hash, tx); | |
4928 | if (!fInMemPool) continue; // another thread removed since queryHashes, maybe... | |
4929 | if ((pfrom->pfilter && pfrom->pfilter->IsRelevantAndUpdate(tx)) || | |
4930 | (!pfrom->pfilter)) | |
4931 | vInv.push_back(inv); | |
4932 | if (vInv.size() == MAX_INV_SZ) { | |
4933 | pfrom->PushMessage("inv", vInv); | |
4934 | vInv.clear(); | |
4935 | } | |
4936 | } | |
4937 | if (vInv.size() > 0) | |
4938 | pfrom->PushMessage("inv", vInv); | |
4939 | } | |
4940 | ||
4941 | ||
4942 | else if (strCommand == "ping") | |
4943 | { | |
4944 | if (pfrom->nVersion > BIP0031_VERSION) | |
4945 | { | |
4946 | uint64_t nonce = 0; | |
4947 | vRecv >> nonce; | |
4948 | // Echo the message back with the nonce. This allows for two useful features: | |
4949 | // | |
4950 | // 1) A remote node can quickly check if the connection is operational | |
4951 | // 2) Remote nodes can measure the latency of the network thread. If this node | |
4952 | // is overloaded it won't respond to pings quickly and the remote node can | |
4953 | // avoid sending us more work, like chain download requests. | |
4954 | // | |
4955 | // The nonce stops the remote getting confused between different pings: without | |
4956 | // it, if the remote node sends a ping once per second and this node takes 5 | |
4957 | // seconds to respond to each, the 5th ping the remote sends would appear to | |
4958 | // return very quickly. | |
4959 | pfrom->PushMessage("pong", nonce); | |
4960 | } | |
4961 | } | |
4962 | ||
4963 | ||
4964 | else if (strCommand == "pong") | |
4965 | { | |
4966 | int64_t pingUsecEnd = nTimeReceived; | |
4967 | uint64_t nonce = 0; | |
4968 | size_t nAvail = vRecv.in_avail(); | |
4969 | bool bPingFinished = false; | |
4970 | std::string sProblem; | |
4971 | ||
4972 | if (nAvail >= sizeof(nonce)) { | |
4973 | vRecv >> nonce; | |
4974 | ||
4975 | // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) | |
4976 | if (pfrom->nPingNonceSent != 0) { | |
4977 | if (nonce == pfrom->nPingNonceSent) { | |
4978 | // Matching pong received, this ping is no longer outstanding | |
4979 | bPingFinished = true; | |
4980 | int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart; | |
4981 | if (pingUsecTime > 0) { | |
4982 | // Successful ping time measurement, replace previous | |
4983 | pfrom->nPingUsecTime = pingUsecTime; | |
4984 | pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime); | |
4985 | } else { | |
4986 | // This should never happen | |
4987 | sProblem = "Timing mishap"; | |
4988 | } | |
4989 | } else { | |
4990 | // Nonce mismatches are normal when pings are overlapping | |
4991 | sProblem = "Nonce mismatch"; | |
4992 | if (nonce == 0) { | |
4993 | // This is most likely a bug in another implementation somewhere; cancel this ping | |
4994 | bPingFinished = true; | |
4995 | sProblem = "Nonce zero"; | |
4996 | } | |
4997 | } | |
4998 | } else { | |
4999 | sProblem = "Unsolicited pong without ping"; | |
5000 | } | |
5001 | } else { | |
5002 | // This is most likely a bug in another implementation somewhere; cancel this ping | |
5003 | bPingFinished = true; | |
5004 | sProblem = "Short payload"; | |
5005 | } | |
5006 | ||
5007 | if (!(sProblem.empty())) { | |
5008 | LogPrint("net", "pong peer=%d %s: %s, %x expected, %x received, %u bytes\n", | |
5009 | pfrom->id, | |
5010 | pfrom->cleanSubVer, | |
5011 | sProblem, | |
5012 | pfrom->nPingNonceSent, | |
5013 | nonce, | |
5014 | nAvail); | |
5015 | } | |
5016 | if (bPingFinished) { | |
5017 | pfrom->nPingNonceSent = 0; | |
5018 | } | |
5019 | } | |
5020 | ||
5021 | ||
5022 | else if (fAlerts && strCommand == "alert") | |
5023 | { | |
5024 | CAlert alert; | |
5025 | vRecv >> alert; | |
5026 | ||
5027 | uint256 alertHash = alert.GetHash(); | |
5028 | if (pfrom->setKnown.count(alertHash) == 0) | |
5029 | { | |
5030 | if (alert.ProcessAlert(Params().AlertKey())) | |
5031 | { | |
5032 | // Relay | |
5033 | pfrom->setKnown.insert(alertHash); | |
5034 | { | |
5035 | LOCK(cs_vNodes); | |
5036 | BOOST_FOREACH(CNode* pnode, vNodes) | |
5037 | alert.RelayTo(pnode); | |
5038 | } | |
5039 | } | |
5040 | else { | |
5041 | // Small DoS penalty so peers that send us lots of | |
5042 | // duplicate/expired/invalid-signature/whatever alerts | |
5043 | // eventually get banned. | |
5044 | // This isn't a Misbehaving(100) (immediate ban) because the | |
5045 | // peer might be an older or different implementation with | |
5046 | // a different signature key, etc. | |
5047 | Misbehaving(pfrom->GetId(), 10); | |
5048 | } | |
5049 | } | |
5050 | } | |
5051 | ||
5052 | ||
5053 | else if (strCommand == "filterload") | |
5054 | { | |
5055 | CBloomFilter filter; | |
5056 | vRecv >> filter; | |
5057 | ||
5058 | if (!filter.IsWithinSizeConstraints()) | |
5059 | // There is no excuse for sending a too-large filter | |
5060 | Misbehaving(pfrom->GetId(), 100); | |
5061 | else | |
5062 | { | |
5063 | LOCK(pfrom->cs_filter); | |
5064 | delete pfrom->pfilter; | |
5065 | pfrom->pfilter = new CBloomFilter(filter); | |
5066 | pfrom->pfilter->UpdateEmptyFull(); | |
5067 | } | |
5068 | pfrom->fRelayTxes = true; | |
5069 | } | |
5070 | ||
5071 | ||
5072 | else if (strCommand == "filteradd") | |
5073 | { | |
5074 | vector<unsigned char> vData; | |
5075 | vRecv >> vData; | |
5076 | ||
5077 | // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object, | |
5078 | // and thus, the maximum size any matched object can have) in a filteradd message | |
5079 | if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) | |
5080 | { | |
5081 | Misbehaving(pfrom->GetId(), 100); | |
5082 | } else { | |
5083 | LOCK(pfrom->cs_filter); | |
5084 | if (pfrom->pfilter) | |
5085 | pfrom->pfilter->insert(vData); | |
5086 | else | |
5087 | Misbehaving(pfrom->GetId(), 100); | |
5088 | } | |
5089 | } | |
5090 | ||
5091 | ||
5092 | else if (strCommand == "filterclear") | |
5093 | { | |
5094 | LOCK(pfrom->cs_filter); | |
5095 | delete pfrom->pfilter; | |
5096 | pfrom->pfilter = new CBloomFilter(); | |
5097 | pfrom->fRelayTxes = true; | |
5098 | } | |
5099 | ||
5100 | ||
5101 | else if (strCommand == "reject") | |
5102 | { | |
5103 | if (fDebug) { | |
5104 | try { | |
5105 | string strMsg; unsigned char ccode; string strReason; | |
5106 | vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH); | |
5107 | ||
5108 | ostringstream ss; | |
5109 | ss << strMsg << " code " << itostr(ccode) << ": " << strReason; | |
5110 | ||
5111 | if (strMsg == "block" || strMsg == "tx") | |
5112 | { | |
5113 | uint256 hash; | |
5114 | vRecv >> hash; | |
5115 | ss << ": hash " << hash.ToString(); | |
5116 | } | |
5117 | LogPrint("net", "Reject %s\n", SanitizeString(ss.str())); | |
5118 | } catch (const std::ios_base::failure&) { | |
5119 | // Avoid feedback loops by preventing reject messages from triggering a new reject message. | |
5120 | LogPrint("net", "Unparseable reject message received\n"); | |
5121 | } | |
5122 | } | |
5123 | } | |
5124 | ||
5125 | else if (strCommand == "notfound") { | |
5126 | // We do not care about the NOTFOUND message, but logging an Unknown Command | |
5127 | // message would be undesirable as we transmit it ourselves. | |
5128 | } | |
5129 | ||
5130 | else { | |
5131 | // Ignore unknown commands for extensibility | |
5132 | LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id); | |
5133 | } | |
5134 | ||
5135 | ||
5136 | ||
5137 | return true; | |
5138 | } | |
5139 | ||
5140 | // requires LOCK(cs_vRecvMsg) | |
5141 | bool ProcessMessages(CNode* pfrom) | |
5142 | { | |
5143 | //if (fDebug) | |
5144 | // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size()); | |
5145 | ||
5146 | // | |
5147 | // Message format | |
5148 | // (4) message start | |
5149 | // (12) command | |
5150 | // (4) size | |
5151 | // (4) checksum | |
5152 | // (x) data | |
5153 | // | |
5154 | bool fOk = true; | |
5155 | ||
5156 | if (!pfrom->vRecvGetData.empty()) | |
5157 | ProcessGetData(pfrom); | |
5158 | ||
5159 | // this maintains the order of responses | |
5160 | if (!pfrom->vRecvGetData.empty()) return fOk; | |
5161 | ||
5162 | std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin(); | |
5163 | while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) { | |
5164 | // Don't bother if send buffer is too full to respond anyway | |
5165 | if (pfrom->nSendSize >= SendBufferSize()) | |
5166 | break; | |
5167 | ||
5168 | // get next message | |
5169 | CNetMessage& msg = *it; | |
5170 | ||
5171 | //if (fDebug) | |
5172 | // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__, | |
5173 | // msg.hdr.nMessageSize, msg.vRecv.size(), | |
5174 | // msg.complete() ? "Y" : "N"); | |
5175 | ||
5176 | // end, if an incomplete message is found | |
5177 | if (!msg.complete()) | |
5178 | break; | |
5179 | ||
5180 | // at this point, any failure means we can delete the current message | |
5181 | it++; | |
5182 | ||
5183 | // Scan for message start | |
5184 | if (memcmp(msg.hdr.pchMessageStart, Params().MessageStart(), MESSAGE_START_SIZE) != 0) { | |
5185 | LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id); | |
5186 | fOk = false; | |
5187 | break; | |
5188 | } | |
5189 | ||
5190 | // Read header | |
5191 | CMessageHeader& hdr = msg.hdr; | |
5192 | if (!hdr.IsValid(Params().MessageStart())) | |
5193 | { | |
5194 | LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); | |
5195 | continue; | |
5196 | } | |
5197 | string strCommand = hdr.GetCommand(); | |
5198 | ||
5199 | // Message size | |
5200 | unsigned int nMessageSize = hdr.nMessageSize; | |
5201 | ||
5202 | // Checksum | |
5203 | CDataStream& vRecv = msg.vRecv; | |
5204 | uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize); | |
5205 | unsigned int nChecksum = ReadLE32((unsigned char*)&hash); | |
5206 | if (nChecksum != hdr.nChecksum) | |
5207 | { | |
5208 | LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__, | |
5209 | SanitizeString(strCommand), nMessageSize, nChecksum, hdr.nChecksum); | |
5210 | continue; | |
5211 | } | |
5212 | ||
5213 | // Process message | |
5214 | bool fRet = false; | |
5215 | try | |
5216 | { | |
5217 | fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime); | |
5218 | boost::this_thread::interruption_point(); | |
5219 | } | |
5220 | catch (const std::ios_base::failure& e) | |
5221 | { | |
5222 | pfrom->PushMessage("reject", strCommand, REJECT_MALFORMED, string("error parsing message")); | |
5223 | if (strstr(e.what(), "end of data")) | |
5224 | { | |
5225 | // Allow exceptions from under-length message on vRecv | |
5226 | LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); | |
5227 | } | |
5228 | else if (strstr(e.what(), "size too large")) | |
5229 | { | |
5230 | // Allow exceptions from over-long size | |
5231 | LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); | |
5232 | } | |
5233 | else | |
5234 | { | |
5235 | PrintExceptionContinue(&e, "ProcessMessages()"); | |
5236 | } | |
5237 | } | |
5238 | catch (const boost::thread_interrupted&) { | |
5239 | throw; | |
5240 | } | |
5241 | catch (const std::exception& e) { | |
5242 | PrintExceptionContinue(&e, "ProcessMessages()"); | |
5243 | } catch (...) { | |
5244 | PrintExceptionContinue(NULL, "ProcessMessages()"); | |
5245 | } | |
5246 | ||
5247 | if (!fRet) | |
5248 | LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id); | |
5249 | ||
5250 | break; | |
5251 | } | |
5252 | ||
5253 | // In case the connection got shut down, its receive buffer was wiped | |
5254 | if (!pfrom->fDisconnect) | |
5255 | pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it); | |
5256 | ||
5257 | return fOk; | |
5258 | } | |
5259 | ||
5260 | ||
5261 | bool SendMessages(CNode* pto, bool fSendTrickle) | |
5262 | { | |
5263 | const Consensus::Params& consensusParams = Params().GetConsensus(); | |
5264 | { | |
5265 | // Don't send anything until we get its version message | |
5266 | if (pto->nVersion == 0) | |
5267 | return true; | |
5268 | ||
5269 | // | |
5270 | // Message: ping | |
5271 | // | |
5272 | bool pingSend = false; | |
5273 | if (pto->fPingQueued) { | |
5274 | // RPC ping request by user | |
5275 | pingSend = true; | |
5276 | } | |
5277 | if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { | |
5278 | // Ping automatically sent as a latency probe & keepalive. | |
5279 | pingSend = true; | |
5280 | } | |
5281 | if (pingSend) { | |
5282 | uint64_t nonce = 0; | |
5283 | while (nonce == 0) { | |
5284 | GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); | |
5285 | } | |
5286 | pto->fPingQueued = false; | |
5287 | pto->nPingUsecStart = GetTimeMicros(); | |
5288 | if (pto->nVersion > BIP0031_VERSION) { | |
5289 | pto->nPingNonceSent = nonce; | |
5290 | pto->PushMessage("ping", nonce); | |
5291 | } else { | |
5292 | // Peer is too old to support ping command with nonce, pong will never arrive. | |
5293 | pto->nPingNonceSent = 0; | |
5294 | pto->PushMessage("ping"); | |
5295 | } | |
5296 | } | |
5297 | ||
5298 | TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState() | |
5299 | if (!lockMain) | |
5300 | return true; | |
5301 | ||
5302 | // Address refresh broadcast | |
5303 | static int64_t nLastRebroadcast; | |
5304 | if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60)) | |
5305 | { | |
5306 | LOCK(cs_vNodes); | |
5307 | BOOST_FOREACH(CNode* pnode, vNodes) | |
5308 | { | |
5309 | // Periodically clear addrKnown to allow refresh broadcasts | |
5310 | if (nLastRebroadcast) | |
5311 | pnode->addrKnown.reset(); | |
5312 | ||
5313 | // Rebroadcast our address | |
5314 | AdvertizeLocal(pnode); | |
5315 | } | |
5316 | if (!vNodes.empty()) | |
5317 | nLastRebroadcast = GetTime(); | |
5318 | } | |
5319 | ||
5320 | // | |
5321 | // Message: addr | |
5322 | // | |
5323 | if (fSendTrickle) | |
5324 | { | |
5325 | vector<CAddress> vAddr; | |
5326 | vAddr.reserve(pto->vAddrToSend.size()); | |
5327 | BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend) | |
5328 | { | |
5329 | if (!pto->addrKnown.contains(addr.GetKey())) | |
5330 | { | |
5331 | pto->addrKnown.insert(addr.GetKey()); | |
5332 | vAddr.push_back(addr); | |
5333 | // receiver rejects addr messages larger than 1000 | |
5334 | if (vAddr.size() >= 1000) | |
5335 | { | |
5336 | pto->PushMessage("addr", vAddr); | |
5337 | vAddr.clear(); | |
5338 | } | |
5339 | } | |
5340 | } | |
5341 | pto->vAddrToSend.clear(); | |
5342 | if (!vAddr.empty()) | |
5343 | pto->PushMessage("addr", vAddr); | |
5344 | } | |
5345 | ||
5346 | CNodeState &state = *State(pto->GetId()); | |
5347 | if (state.fShouldBan) { | |
5348 | if (pto->fWhitelisted) | |
5349 | LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString()); | |
5350 | else { | |
5351 | pto->fDisconnect = true; | |
5352 | if (pto->addr.IsLocal()) | |
5353 | LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString()); | |
5354 | else | |
5355 | { | |
5356 | CNode::Ban(pto->addr); | |
5357 | } | |
5358 | } | |
5359 | state.fShouldBan = false; | |
5360 | } | |
5361 | ||
5362 | BOOST_FOREACH(const CBlockReject& reject, state.rejects) | |
5363 | pto->PushMessage("reject", (string)"block", reject.chRejectCode, reject.strRejectReason, reject.hashBlock); | |
5364 | state.rejects.clear(); | |
5365 | ||
5366 | // Start block sync | |
5367 | if (pindexBestHeader == NULL) | |
5368 | pindexBestHeader = chainActive.Tip(); | |
5369 | bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. | |
5370 | if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { | |
5371 | // Only actively request headers from a single peer, unless we're close to today. | |
5372 | if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { | |
5373 | state.fSyncStarted = true; | |
5374 | nSyncStarted++; | |
5375 | CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader; | |
5376 | LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); | |
5377 | pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256()); | |
5378 | } | |
5379 | } | |
5380 | ||
5381 | // Resend wallet transactions that haven't gotten in a block yet | |
5382 | // Except during reindex, importing and IBD, when old wallet | |
5383 | // transactions become unconfirmed and spams other nodes. | |
5384 | if (!fReindex && !fImporting && !IsInitialBlockDownload()) | |
5385 | { | |
5386 | GetMainSignals().Broadcast(nTimeBestReceived); | |
5387 | } | |
5388 | ||
5389 | // | |
5390 | // Message: inventory | |
5391 | // | |
5392 | vector<CInv> vInv; | |
5393 | vector<CInv> vInvWait; | |
5394 | { | |
5395 | LOCK(pto->cs_inventory); | |
5396 | vInv.reserve(pto->vInventoryToSend.size()); | |
5397 | vInvWait.reserve(pto->vInventoryToSend.size()); | |
5398 | BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend) | |
5399 | { | |
5400 | if (pto->setInventoryKnown.count(inv)) | |
5401 | continue; | |
5402 | ||
5403 | // trickle out tx inv to protect privacy | |
5404 | if (inv.type == MSG_TX && !fSendTrickle) | |
5405 | { | |
5406 | // 1/4 of tx invs blast to all immediately | |
5407 | static uint256 hashSalt; | |
5408 | if (hashSalt.IsNull()) | |
5409 | hashSalt = GetRandHash(); | |
5410 | uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt)); | |
5411 | hashRand = Hash(BEGIN(hashRand), END(hashRand)); | |
5412 | bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0); | |
5413 | ||
5414 | if (fTrickleWait) | |
5415 | { | |
5416 | vInvWait.push_back(inv); | |
5417 | continue; | |
5418 | } | |
5419 | } | |
5420 | ||
5421 | // returns true if wasn't already contained in the set | |
5422 | if (pto->setInventoryKnown.insert(inv).second) | |
5423 | { | |
5424 | vInv.push_back(inv); | |
5425 | if (vInv.size() >= 1000) | |
5426 | { | |
5427 | pto->PushMessage("inv", vInv); | |
5428 | vInv.clear(); | |
5429 | } | |
5430 | } | |
5431 | } | |
5432 | pto->vInventoryToSend = vInvWait; | |
5433 | } | |
5434 | if (!vInv.empty()) | |
5435 | pto->PushMessage("inv", vInv); | |
5436 | ||
5437 | // Detect whether we're stalling | |
5438 | int64_t nNow = GetTimeMicros(); | |
5439 | if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { | |
5440 | // Stalling only triggers when the block download window cannot move. During normal steady state, | |
5441 | // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection | |
5442 | // should only happen during initial block download. | |
5443 | LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); | |
5444 | pto->fDisconnect = true; | |
5445 | } | |
5446 | // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval | |
5447 | // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to | |
5448 | // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link | |
5449 | // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes | |
5450 | // to unreasonably increase our timeout. | |
5451 | // We also compare the block download timeout originally calculated against the time at which we'd disconnect | |
5452 | // if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're | |
5453 | // only looking at this peer's oldest request). This way a large queue in the past doesn't result in a | |
5454 | // permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing | |
5455 | // more quickly than once every 5 minutes, then we'll shorten the download window for this block). | |
5456 | if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) { | |
5457 | QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); | |
5458 | int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams); | |
5459 | if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) { | |
5460 | LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow); | |
5461 | queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow; | |
5462 | } | |
5463 | if (queuedBlock.nTimeDisconnect < nNow) { | |
5464 | LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id); | |
5465 | pto->fDisconnect = true; | |
5466 | } | |
5467 | } | |
5468 | ||
5469 | // | |
5470 | // Message: getdata (blocks) | |
5471 | // | |
5472 | vector<CInv> vGetData; | |
5473 | if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | |
5474 | vector<CBlockIndex*> vToDownload; | |
5475 | NodeId staller = -1; | |
5476 | FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); | |
5477 | BOOST_FOREACH(CBlockIndex *pindex, vToDownload) { | |
5478 | vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); | |
5479 | MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); | |
5480 | LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), | |
5481 | pindex->nHeight, pto->id); | |
5482 | } | |
5483 | if (state.nBlocksInFlight == 0 && staller != -1) { | |
5484 | if (State(staller)->nStallingSince == 0) { | |
5485 | State(staller)->nStallingSince = nNow; | |
5486 | LogPrint("net", "Stall started peer=%d\n", staller); | |
5487 | } | |
5488 | } | |
5489 | } | |
5490 | ||
5491 | // | |
5492 | // Message: getdata (non-blocks) | |
5493 | // | |
5494 | while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) | |
5495 | { | |
5496 | const CInv& inv = (*pto->mapAskFor.begin()).second; | |
5497 | if (!AlreadyHave(inv)) | |
5498 | { | |
5499 | if (fDebug) | |
5500 | LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id); | |
5501 | vGetData.push_back(inv); | |
5502 | if (vGetData.size() >= 1000) | |
5503 | { | |
5504 | pto->PushMessage("getdata", vGetData); | |
5505 | vGetData.clear(); | |
5506 | } | |
5507 | } else { | |
5508 | //If we're not going to ask, don't expect a response. | |
5509 | pto->setAskFor.erase(inv.hash); | |
5510 | } | |
5511 | pto->mapAskFor.erase(pto->mapAskFor.begin()); | |
5512 | } | |
5513 | if (!vGetData.empty()) | |
5514 | pto->PushMessage("getdata", vGetData); | |
5515 | ||
5516 | } | |
5517 | return true; | |
5518 | } | |
5519 | ||
5520 | std::string CBlockFileInfo::ToString() const { | |
5521 | return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); | |
5522 | } | |
5523 | ||
5524 | ||
5525 | ||
5526 | class CMainCleanup | |
5527 | { | |
5528 | public: | |
5529 | CMainCleanup() {} | |
5530 | ~CMainCleanup() { | |
5531 | // block headers | |
5532 | BlockMap::iterator it1 = mapBlockIndex.begin(); | |
5533 | for (; it1 != mapBlockIndex.end(); it1++) | |
5534 | delete (*it1).second; | |
5535 | mapBlockIndex.clear(); | |
5536 | ||
5537 | // orphan transactions | |
5538 | mapOrphanTransactions.clear(); | |
5539 | mapOrphanTransactionsByPrev.clear(); | |
5540 | } | |
5541 | } instance_of_cmaincleanup; |