]> Git Repo - VerusCoin.git/blame_incremental - src/main.cpp
Rename time lock parameters and globals
[VerusCoin.git] / src / main.cpp
... / ...
CommitLineData
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-2014 The Bitcoin Core developers
3// Distributed under the MIT software license, see the accompanying
4// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#include "main.h"
7
8#include "sodium.h"
9
10#include "addrman.h"
11#include "alert.h"
12#include "arith_uint256.h"
13#include "chainparams.h"
14#include "checkpoints.h"
15#include "checkqueue.h"
16#include "consensus/upgrades.h"
17#include "consensus/validation.h"
18#include "deprecation.h"
19#include "init.h"
20#include "merkleblock.h"
21#include "metrics.h"
22#include "net.h"
23#include "pow.h"
24#include "txdb.h"
25#include "txmempool.h"
26#include "ui_interface.h"
27#include "undo.h"
28#include "util.h"
29#include "utilmoneystr.h"
30#include "validationinterface.h"
31#include "wallet/asyncrpcoperation_sendmany.h"
32#include "wallet/asyncrpcoperation_shieldcoinbase.h"
33
34#include <sstream>
35
36#include <boost/algorithm/string/replace.hpp>
37#include <boost/filesystem.hpp>
38#include <boost/filesystem/fstream.hpp>
39#include <boost/math/distributions/poisson.hpp>
40#include <boost/thread.hpp>
41#include <boost/static_assert.hpp>
42
43using namespace std;
44
45#if defined(NDEBUG)
46# error "Zcash cannot be compiled without assertions."
47#endif
48
49
50/**
51 * Global state
52 */
53
54CCriticalSection cs_main;
55extern uint8_t NOTARY_PUBKEY33[33];
56
57BlockMap mapBlockIndex;
58CChain chainActive;
59CBlockIndex *pindexBestHeader = NULL;
60int64_t nTimeBestReceived = 0;
61CWaitableCriticalSection csBestBlock;
62CConditionVariable cvBlockChange;
63int nScriptCheckThreads = 0;
64bool fExperimentalMode = false;
65bool fImporting = false;
66bool fReindex = false;
67bool fTxIndex = false;
68bool fHavePruned = false;
69bool fPruneMode = false;
70bool fIsBareMultisigStd = true;
71bool fCheckBlockIndex = false;
72bool fCheckpointsEnabled = true;
73bool fCoinbaseEnforcedProtectionEnabled = true;
74size_t nCoinCacheUsage = 5000 * 300;
75uint64_t nPruneTarget = 0;
76bool fAlerts = DEFAULT_ALERTS;
77
78unsigned int expiryDelta = DEFAULT_TX_EXPIRY_DELTA;
79
80/** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */
81CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
82
83CTxMemPool mempool(::minRelayTxFee);
84
85struct COrphanTx {
86 CTransaction tx;
87 NodeId fromPeer;
88};
89map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);;
90map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);;
91void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
92
93/**
94 * Returns true if there are nRequired or more blocks of minVersion or above
95 * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
96 */
97static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams);
98static void CheckBlockIndex();
99
100/** Constant stuff for coinbase transactions we create: */
101CScript COINBASE_FLAGS;
102
103const string strMessageMagic = "Komodo Signed Message:\n";
104
105// Internal stuff
106namespace {
107
108 struct CBlockIndexWorkComparator
109 {
110 bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
111 // First sort by most total work, ...
112 if (pa->nChainWork > pb->nChainWork) return false;
113 if (pa->nChainWork < pb->nChainWork) return true;
114
115 // ... then by earliest time received, ...
116 if (pa->nSequenceId < pb->nSequenceId) return false;
117 if (pa->nSequenceId > pb->nSequenceId) return true;
118
119 // Use pointer address as tie breaker (should only happen with blocks
120 // loaded from disk, as those all have id 0).
121 if (pa < pb) return false;
122 if (pa > pb) return true;
123
124 // Identical blocks.
125 return false;
126 }
127 };
128
129 CBlockIndex *pindexBestInvalid;
130
131 /**
132 * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
133 * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
134 * missing the data for the block.
135 */
136 set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
137 /** Number of nodes with fSyncStarted. */
138 int nSyncStarted = 0;
139 /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions.
140 * Pruned nodes may have entries where B is missing data.
141 */
142 multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
143
144 CCriticalSection cs_LastBlockFile;
145 std::vector<CBlockFileInfo> vinfoBlockFile;
146 int nLastBlockFile = 0;
147 /** Global flag to indicate we should check to see if there are
148 * block/undo files that should be deleted. Set on startup
149 * or if we allocate more file space when we're in prune mode
150 */
151 bool fCheckForPruning = false;
152
153 /**
154 * Every received block is assigned a unique and increasing identifier, so we
155 * know which one to give priority in case of a fork.
156 */
157 CCriticalSection cs_nBlockSequenceId;
158 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
159 uint32_t nBlockSequenceId = 1;
160
161 /**
162 * Sources of received blocks, saved to be able to send them reject
163 * messages or ban them when processing happens afterwards. Protected by
164 * cs_main.
165 */
166 map<uint256, NodeId> mapBlockSource;
167
168 /**
169 * Filter for transactions that were recently rejected by
170 * AcceptToMemoryPool. These are not rerequested until the chain tip
171 * changes, at which point the entire filter is reset. Protected by
172 * cs_main.
173 *
174 * Without this filter we'd be re-requesting txs from each of our peers,
175 * increasing bandwidth consumption considerably. For instance, with 100
176 * peers, half of which relay a tx we don't accept, that might be a 50x
177 * bandwidth increase. A flooding attacker attempting to roll-over the
178 * filter using minimum-sized, 60byte, transactions might manage to send
179 * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
180 * two minute window to send invs to us.
181 *
182 * Decreasing the false positive rate is fairly cheap, so we pick one in a
183 * million to make it highly unlikely for users to have issues with this
184 * filter.
185 *
186 * Memory used: 1.7MB
187 */
188 boost::scoped_ptr<CRollingBloomFilter> recentRejects;
189 uint256 hashRecentRejectsChainTip;
190
191 /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
192 struct QueuedBlock {
193 uint256 hash;
194 CBlockIndex *pindex; //! Optional.
195 int64_t nTime; //! Time of "getdata" request in microseconds.
196 bool fValidatedHeaders; //! Whether this block has validated headers at the time of request.
197 int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer)
198 };
199 map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
200
201 /** Number of blocks in flight with validated headers. */
202 int nQueuedValidatedHeaders = 0;
203
204 /** Number of preferable block download peers. */
205 int nPreferredDownload = 0;
206
207 /** Dirty block index entries. */
208 set<CBlockIndex*> setDirtyBlockIndex;
209
210 /** Dirty block file entries. */
211 set<int> setDirtyFileInfo;
212} // anon namespace
213
214//////////////////////////////////////////////////////////////////////////////
215//
216// Registration of network node signals.
217//
218
219namespace {
220
221struct CBlockReject {
222 unsigned char chRejectCode;
223 string strRejectReason;
224 uint256 hashBlock;
225};
226
227/**
228 * Maintain validation-specific state about nodes, protected by cs_main, instead
229 * by CNode's own locks. This simplifies asynchronous operation, where
230 * processing of incoming data is done after the ProcessMessage call returns,
231 * and we're no longer holding the node's locks.
232 */
233struct CNodeState {
234 //! The peer's address
235 CService address;
236 //! Whether we have a fully established connection.
237 bool fCurrentlyConnected;
238 //! Accumulated misbehaviour score for this peer.
239 int nMisbehavior;
240 //! Whether this peer should be disconnected and banned (unless whitelisted).
241 bool fShouldBan;
242 //! String name of this peer (debugging/logging purposes).
243 std::string name;
244 //! List of asynchronously-determined block rejections to notify this peer about.
245 std::vector<CBlockReject> rejects;
246 //! The best known block we know this peer has announced.
247 CBlockIndex *pindexBestKnownBlock;
248 //! The hash of the last unknown block this peer has announced.
249 uint256 hashLastUnknownBlock;
250 //! The last full block we both have.
251 CBlockIndex *pindexLastCommonBlock;
252 //! Whether we've started headers synchronization with this peer.
253 bool fSyncStarted;
254 //! Since when we're stalling block download progress (in microseconds), or 0.
255 int64_t nStallingSince;
256 list<QueuedBlock> vBlocksInFlight;
257 int nBlocksInFlight;
258 int nBlocksInFlightValidHeaders;
259 //! Whether we consider this a preferred download peer.
260 bool fPreferredDownload;
261
262 CNodeState() {
263 fCurrentlyConnected = false;
264 nMisbehavior = 0;
265 fShouldBan = false;
266 pindexBestKnownBlock = NULL;
267 hashLastUnknownBlock.SetNull();
268 pindexLastCommonBlock = NULL;
269 fSyncStarted = false;
270 nStallingSince = 0;
271 nBlocksInFlight = 0;
272 nBlocksInFlightValidHeaders = 0;
273 fPreferredDownload = false;
274 }
275};
276
277/** Map maintaining per-node state. Requires cs_main. */
278map<NodeId, CNodeState> mapNodeState;
279
280// Requires cs_main.
281CNodeState *State(NodeId pnode) {
282 map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
283 if (it == mapNodeState.end())
284 return NULL;
285 return &it->second;
286}
287
288int GetHeight()
289{
290 LOCK(cs_main);
291 return chainActive.Height();
292}
293
294void UpdatePreferredDownload(CNode* node, CNodeState* state)
295{
296 nPreferredDownload -= state->fPreferredDownload;
297
298 // Whether this node should be marked as a preferred download node.
299 state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
300
301 nPreferredDownload += state->fPreferredDownload;
302}
303
304// Returns time at which to timeout block request (nTime in microseconds)
305int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consensus::Params &consensusParams)
306{
307 return nTime + 500000 * consensusParams.nPowTargetSpacing * (4 + nValidatedQueuedBefore);
308}
309
310void InitializeNode(NodeId nodeid, const CNode *pnode) {
311 LOCK(cs_main);
312 CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second;
313 state.name = pnode->addrName;
314 state.address = pnode->addr;
315}
316
317void FinalizeNode(NodeId nodeid) {
318 LOCK(cs_main);
319 CNodeState *state = State(nodeid);
320
321 if (state->fSyncStarted)
322 nSyncStarted--;
323
324 if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
325 AddressCurrentlyConnected(state->address);
326 }
327
328 BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight)
329 mapBlocksInFlight.erase(entry.hash);
330 EraseOrphansFor(nodeid);
331 nPreferredDownload -= state->fPreferredDownload;
332
333 mapNodeState.erase(nodeid);
334}
335
336void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age)
337{
338/* int expired = pool.Expire(GetTime() - age);
339 if (expired != 0)
340 LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired);
341
342 std::vector<uint256> vNoSpendsRemaining;
343 pool.TrimToSize(limit, &vNoSpendsRemaining);
344 BOOST_FOREACH(const uint256& removed, vNoSpendsRemaining)
345 pcoinsTip->Uncache(removed);*/
346}
347
348// Requires cs_main.
349// Returns a bool indicating whether we requested this block.
350bool MarkBlockAsReceived(const uint256& hash) {
351 map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
352 if (itInFlight != mapBlocksInFlight.end()) {
353 CNodeState *state = State(itInFlight->second.first);
354 nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders;
355 state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
356 state->vBlocksInFlight.erase(itInFlight->second.second);
357 state->nBlocksInFlight--;
358 state->nStallingSince = 0;
359 mapBlocksInFlight.erase(itInFlight);
360 return true;
361 }
362 return false;
363}
364
365// Requires cs_main.
366void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
367 CNodeState *state = State(nodeid);
368 assert(state != NULL);
369
370 // Make sure it's not listed somewhere already.
371 MarkBlockAsReceived(hash);
372
373 int64_t nNow = GetTimeMicros();
374 QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)};
375 nQueuedValidatedHeaders += newentry.fValidatedHeaders;
376 list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
377 state->nBlocksInFlight++;
378 state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders;
379 mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
380}
381
382/** Check whether the last unknown block a peer advertized is not yet known. */
383void ProcessBlockAvailability(NodeId nodeid) {
384 CNodeState *state = State(nodeid);
385 assert(state != NULL);
386
387 if (!state->hashLastUnknownBlock.IsNull()) {
388 BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
389 if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0)
390 {
391 if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
392 state->pindexBestKnownBlock = itOld->second;
393 state->hashLastUnknownBlock.SetNull();
394 }
395 }
396}
397
398/** Update tracking information about which blocks a peer is assumed to have. */
399void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
400 CNodeState *state = State(nodeid);
401 assert(state != NULL);
402
403 /*ProcessBlockAvailability(nodeid);
404
405 BlockMap::iterator it = mapBlockIndex.find(hash);
406 if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
407 // An actually better block was announced.
408 if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
409 state->pindexBestKnownBlock = it->second;
410 } else*/
411 {
412 // An unknown block was announced; just assume that the latest one is the best one.
413 state->hashLastUnknownBlock = hash;
414 }
415}
416
417/** Find the last common ancestor two blocks have.
418 * Both pa and pb must be non-NULL. */
419CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
420 if (pa->nHeight > pb->nHeight) {
421 pa = pa->GetAncestor(pb->nHeight);
422 } else if (pb->nHeight > pa->nHeight) {
423 pb = pb->GetAncestor(pa->nHeight);
424 }
425
426 while (pa != pb && pa && pb) {
427 pa = pa->pprev;
428 pb = pb->pprev;
429 }
430
431 // Eventually all chain branches meet at the genesis block.
432 assert(pa == pb);
433 return pa;
434}
435
436/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
437 * at most count entries. */
438void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) {
439 if (count == 0)
440 return;
441
442 vBlocks.reserve(vBlocks.size() + count);
443 CNodeState *state = State(nodeid);
444 assert(state != NULL);
445
446 // Make sure pindexBestKnownBlock is up to date, we'll need it.
447 ProcessBlockAvailability(nodeid);
448
449 if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
450 // This peer has nothing interesting.
451 return;
452 }
453
454 if (state->pindexLastCommonBlock == NULL) {
455 // Bootstrap quickly by guessing a parent of our best tip is the forking point.
456 // Guessing wrong in either direction is not a problem.
457 state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
458 }
459
460 // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
461 // of its current tip anymore. Go back enough to fix that.
462 state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
463 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
464 return;
465
466 std::vector<CBlockIndex*> vToFetch;
467 CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
468 // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
469 // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
470 // download that next block if the window were 1 larger.
471 int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
472 int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
473 NodeId waitingfor = -1;
474 while (pindexWalk->nHeight < nMaxHeight) {
475 // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
476 // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
477 // as iterating over ~100 CBlockIndex* entries anyway.
478 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
479 vToFetch.resize(nToFetch);
480 pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
481 vToFetch[nToFetch - 1] = pindexWalk;
482 for (unsigned int i = nToFetch - 1; i > 0; i--) {
483 vToFetch[i - 1] = vToFetch[i]->pprev;
484 }
485
486 // Iterate over those blocks in vToFetch (in forward direction), adding the ones that
487 // are not yet downloaded and not in flight to vBlocks. In the meantime, update
488 // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
489 // already part of our chain (and therefore don't need it even if pruned).
490 BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
491 if (!pindex->IsValid(BLOCK_VALID_TREE)) {
492 // We consider the chain that this peer is on invalid.
493 return;
494 }
495 if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
496 if (pindex->nChainTx)
497 state->pindexLastCommonBlock = pindex;
498 } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
499 // The block is not already downloaded, and not yet in flight.
500 if (pindex->nHeight > nWindowEnd) {
501 // We reached the end of the window.
502 if (vBlocks.size() == 0 && waitingfor != nodeid) {
503 // We aren't able to fetch anything, but we would be if the download window was one larger.
504 nodeStaller = waitingfor;
505 }
506 return;
507 }
508 vBlocks.push_back(pindex);
509 if (vBlocks.size() == count) {
510 return;
511 }
512 } else if (waitingfor == -1) {
513 // This is the first already-in-flight block.
514 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
515 }
516 }
517 }
518}
519
520} // anon namespace
521
522bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
523 LOCK(cs_main);
524 CNodeState *state = State(nodeid);
525 if (state == NULL)
526 return false;
527 stats.nMisbehavior = state->nMisbehavior;
528 stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
529 stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
530 BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
531 if (queue.pindex)
532 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
533 }
534 return true;
535}
536
537void RegisterNodeSignals(CNodeSignals& nodeSignals)
538{
539 nodeSignals.GetHeight.connect(&GetHeight);
540 nodeSignals.ProcessMessages.connect(&ProcessMessages);
541 nodeSignals.SendMessages.connect(&SendMessages);
542 nodeSignals.InitializeNode.connect(&InitializeNode);
543 nodeSignals.FinalizeNode.connect(&FinalizeNode);
544}
545
546void UnregisterNodeSignals(CNodeSignals& nodeSignals)
547{
548 nodeSignals.GetHeight.disconnect(&GetHeight);
549 nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
550 nodeSignals.SendMessages.disconnect(&SendMessages);
551 nodeSignals.InitializeNode.disconnect(&InitializeNode);
552 nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
553}
554
555CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
556{
557 // Find the first block the caller has in the main chain
558 BOOST_FOREACH(const uint256& hash, locator.vHave) {
559 BlockMap::iterator mi = mapBlockIndex.find(hash);
560 if (mi != mapBlockIndex.end())
561 {
562 CBlockIndex* pindex = (*mi).second;
563 if (pindex != 0 && chain.Contains(pindex))
564 return pindex;
565 if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
566 return chain.Tip();
567 }
568 }
569 }
570 return chain.Genesis();
571}
572
573CCoinsViewCache *pcoinsTip = NULL;
574CBlockTreeDB *pblocktree = NULL;
575
576// Komodo globals
577
578#define KOMODO_ZCASH
579#include "komodo.h"
580
581//////////////////////////////////////////////////////////////////////////////
582//
583// mapOrphanTransactions
584//
585
586bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
587{
588 uint256 hash = tx.GetHash();
589 if (mapOrphanTransactions.count(hash))
590 return false;
591
592 // Ignore big transactions, to avoid a
593 // send-big-orphans memory exhaustion attack. If a peer has a legitimate
594 // large transaction with a missing parent then we assume
595 // it will rebroadcast it later, after the parent transaction(s)
596 // have been mined or received.
597 // 10,000 orphans, each of which is at most 5,000 bytes big is
598 // at most 500 megabytes of orphans:
599 unsigned int sz = tx.GetSerializeSize(SER_NETWORK, tx.nVersion);
600 if (sz > 5000)
601 {
602 LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
603 return false;
604 }
605
606 mapOrphanTransactions[hash].tx = tx;
607 mapOrphanTransactions[hash].fromPeer = peer;
608 BOOST_FOREACH(const CTxIn& txin, tx.vin)
609 mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash);
610
611 LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(),
612 mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
613 return true;
614}
615
616void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
617{
618 map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
619 if (it == mapOrphanTransactions.end())
620 return;
621 BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
622 {
623 map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash);
624 if (itPrev == mapOrphanTransactionsByPrev.end())
625 continue;
626 itPrev->second.erase(hash);
627 if (itPrev->second.empty())
628 mapOrphanTransactionsByPrev.erase(itPrev);
629 }
630 mapOrphanTransactions.erase(it);
631}
632
633void EraseOrphansFor(NodeId peer)
634{
635 int nErased = 0;
636 map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
637 while (iter != mapOrphanTransactions.end())
638 {
639 map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
640 if (maybeErase->second.fromPeer == peer)
641 {
642 EraseOrphanTx(maybeErase->second.tx.GetHash());
643 ++nErased;
644 }
645 }
646 if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
647}
648
649
650unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
651{
652 unsigned int nEvicted = 0;
653 while (mapOrphanTransactions.size() > nMaxOrphans)
654 {
655 // Evict a random orphan:
656 uint256 randomhash = GetRandHash();
657 map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
658 if (it == mapOrphanTransactions.end())
659 it = mapOrphanTransactions.begin();
660 EraseOrphanTx(it->first);
661 ++nEvicted;
662 }
663 return nEvicted;
664}
665
666
667bool IsStandardTx(const CTransaction& tx, string& reason, const int nHeight)
668{
669 bool isOverwinter = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
670
671 if (isOverwinter) {
672 // Overwinter standard rules apply
673 if (tx.nVersion > CTransaction::OVERWINTER_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::OVERWINTER_MIN_CURRENT_VERSION) {
674 reason = "overwinter-version";
675 return false;
676 }
677 } else {
678 // Sprout standard rules apply
679 if (tx.nVersion > CTransaction::SPROUT_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SPROUT_MIN_CURRENT_VERSION) {
680 reason = "version";
681 return false;
682 }
683 }
684
685 BOOST_FOREACH(const CTxIn& txin, tx.vin)
686 {
687 // Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed
688 // keys. (remember the 520 byte limit on redeemScript size) That works
689 // out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627
690 // bytes of scriptSig, which we round off to 1650 bytes for some minor
691 // future-proofing. That's also enough to spend a 20-of-20
692 // CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not
693 // considered standard)
694 if (txin.scriptSig.size() > 1650) {
695 reason = "scriptsig-size";
696 return false;
697 }
698 if (!txin.scriptSig.IsPushOnly()) {
699 reason = "scriptsig-not-pushonly";
700 return false;
701 }
702 }
703
704 unsigned int v=0,nDataOut = 0;
705 txnouttype whichType;
706 BOOST_FOREACH(const CTxOut& txout, tx.vout)
707 {
708 if (!::IsStandard(txout.scriptPubKey, whichType))
709 {
710 reason = "scriptpubkey";
711 fprintf(stderr,">>>>>>>>>>>>>>> vout.%d nDataout.%d\n",v,nDataOut);
712 return false;
713 }
714
715 if (whichType == TX_NULL_DATA)
716 {
717 nDataOut++;
718 //fprintf(stderr,"is OP_RETURN\n");
719 }
720 else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) {
721 reason = "bare-multisig";
722 return false;
723 } else if (txout.IsDust(::minRelayTxFee)) {
724 reason = "dust";
725 return false;
726 }
727 v++;
728 }
729
730 // only one OP_RETURN txout is permitted
731 if (nDataOut > 1) {
732 reason = "multi-op-return";
733 return false;
734 }
735
736 return true;
737}
738
739bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
740{
741 int32_t i;
742 if (tx.nLockTime == 0)
743 return true;
744 if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime))
745 return true;
746 BOOST_FOREACH(const CTxIn& txin, tx.vin)
747 {
748 if ( txin.nSequence == 0xfffffffe && (((int64_t)tx.nLockTime >= LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime > nBlockTime) || ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime > nBlockHeight)) )
749 {
750
751 }
752 else if (!txin.IsFinal())
753 {
754 //printf("non-final txin seq.%x locktime.%u vs nTime.%u\n",txin.nSequence,(uint32_t)tx.nLockTime,(uint32_t)nBlockTime);
755 return false;
756 }
757 }
758 return true;
759}
760
761bool IsExpiredTx(const CTransaction &tx, int nBlockHeight)
762{
763 if (tx.nExpiryHeight == 0 || tx.IsCoinBase()) {
764 return false;
765 }
766 return static_cast<uint32_t>(nBlockHeight) > tx.nExpiryHeight;
767}
768
769bool CheckFinalTx(const CTransaction &tx, int flags)
770{
771 AssertLockHeld(cs_main);
772
773 // By convention a negative value for flags indicates that the
774 // current network-enforced consensus rules should be used. In
775 // a future soft-fork scenario that would mean checking which
776 // rules would be enforced for the next block and setting the
777 // appropriate flags. At the present time no soft-forks are
778 // scheduled, so no flags are set.
779 flags = std::max(flags, 0);
780
781 // CheckFinalTx() uses chainActive.Height()+1 to evaluate
782 // nLockTime because when IsFinalTx() is called within
783 // CBlock::AcceptBlock(), the height of the block *being*
784 // evaluated is what is used. Thus if we want to know if a
785 // transaction can be part of the *next* block, we need to call
786 // IsFinalTx() with one more than chainActive.Height().
787 const int nBlockHeight = chainActive.Height() + 1;
788
789 // Timestamps on the other hand don't get any special treatment,
790 // because we can't know what timestamp the next block will have,
791 // and there aren't timestamp applications where it matters.
792 // However this changes once median past time-locks are enforced:
793 const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
794 ? chainActive.Tip()->GetMedianTimePast()
795 : GetAdjustedTime();
796
797 return IsFinalTx(tx, nBlockHeight, nBlockTime);
798}
799
800/**
801 * Check transaction inputs to mitigate two
802 * potential denial-of-service attacks:
803 *
804 * 1. scriptSigs with extra data stuffed into them,
805 * not consumed by scriptPubKey (or P2SH script)
806 * 2. P2SH scripts with a crazy number of expensive
807 * CHECKSIG/CHECKMULTISIG operations
808 */
809bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, uint32_t consensusBranchId)
810{
811 if (tx.IsCoinBase())
812 return true; // Coinbases don't use vin normally
813
814 for (unsigned int i = 0; i < tx.vin.size(); i++)
815 {
816 const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]);
817
818 vector<vector<unsigned char> > vSolutions;
819 txnouttype whichType;
820 // get the scriptPubKey corresponding to this input:
821 const CScript& prevScript = prev.scriptPubKey;
822 if (!Solver(prevScript, whichType, vSolutions))
823 return false;
824 int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
825 if (nArgsExpected < 0)
826 return false;
827
828 // Transactions with extra stuff in their scriptSigs are
829 // non-standard. Note that this EvalScript() call will
830 // be quick, because if there are any operations
831 // beside "push data" in the scriptSig
832 // IsStandardTx() will have already returned false
833 // and this method isn't called.
834 vector<vector<unsigned char> > stack;
835 if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), consensusBranchId))
836 return false;
837
838 if (whichType == TX_SCRIPTHASH)
839 {
840 if (stack.empty())
841 return false;
842 CScript subscript(stack.back().begin(), stack.back().end());
843 vector<vector<unsigned char> > vSolutions2;
844 txnouttype whichType2;
845 if (Solver(subscript, whichType2, vSolutions2))
846 {
847 int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
848 if (tmpExpected < 0)
849 return false;
850 nArgsExpected += tmpExpected;
851 }
852 else
853 {
854 // Any other Script with less than 15 sigops OK:
855 unsigned int sigops = subscript.GetSigOpCount(true);
856 // ... extra data left on the stack after execution is OK, too:
857 return (sigops <= MAX_P2SH_SIGOPS);
858 }
859 }
860
861 if (stack.size() != (unsigned int)nArgsExpected)
862 return false;
863 }
864
865 return true;
866}
867
868unsigned int GetLegacySigOpCount(const CTransaction& tx)
869{
870 unsigned int nSigOps = 0;
871 BOOST_FOREACH(const CTxIn& txin, tx.vin)
872 {
873 nSigOps += txin.scriptSig.GetSigOpCount(false);
874 }
875 BOOST_FOREACH(const CTxOut& txout, tx.vout)
876 {
877 nSigOps += txout.scriptPubKey.GetSigOpCount(false);
878 }
879 return nSigOps;
880}
881
882unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs)
883{
884 if (tx.IsCoinBase())
885 return 0;
886
887 unsigned int nSigOps = 0;
888 for (unsigned int i = 0; i < tx.vin.size(); i++)
889 {
890 const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
891 if (prevout.scriptPubKey.IsPayToScriptHash())
892 nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig);
893 }
894 return nSigOps;
895}
896
897/**
898 * Check a transaction contextually against a set of consensus rules valid at a given block height.
899 *
900 * Notes:
901 * 1. AcceptToMemoryPool calls CheckTransaction and this function.
902 * 2. ProcessNewBlock calls AcceptBlock, which calls CheckBlock (which calls CheckTransaction)
903 * and ContextualCheckBlock (which calls this function).
904 */
905bool ContextualCheckTransaction(const CTransaction& tx, CValidationState &state, const int nHeight, const int dosLevel)
906{
907 bool isOverwinter = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
908 bool isSprout = !isOverwinter;
909
910 // If Sprout rules apply, reject transactions which are intended for Overwinter and beyond
911 if (isSprout && tx.fOverwintered) {
912 return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter is not active yet"),
913 REJECT_INVALID, "tx-overwinter-not-active");
914 }
915
916 // If Overwinter rules apply:
917 if (isOverwinter) {
918 // Reject transactions with valid version but missing overwinter flag
919 if (tx.nVersion >= OVERWINTER_MIN_TX_VERSION && !tx.fOverwintered) {
920 return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter flag must be set"),
921 REJECT_INVALID, "tx-overwinter-flag-not-set");
922 }
923
924 // Reject transactions with invalid version
925 if (tx.fOverwintered && tx.nVersion > OVERWINTER_MAX_TX_VERSION ) {
926 return state.DoS(100, error("CheckTransaction(): overwinter version too high"),
927 REJECT_INVALID, "bad-tx-overwinter-version-too-high");
928 }
929
930 // Reject transactions intended for Sprout
931 if (!tx.fOverwintered) {
932 return state.DoS(dosLevel, error("ContextualCheckTransaction: overwinter is active"),
933 REJECT_INVALID, "tx-overwinter-active");
934 }
935
936 // Check that all transactions are unexpired
937 if (IsExpiredTx(tx, nHeight)) {
938 return state.DoS(dosLevel, error("ContextualCheckTransaction(): transaction is expired"), REJECT_INVALID, "tx-overwinter-expired");
939 }
940 }
941
942 if (!(tx.IsCoinBase() || tx.vjoinsplit.empty())) {
943 auto consensusBranchId = CurrentEpochBranchId(nHeight, Params().GetConsensus());
944 // Empty output script.
945 CScript scriptCode;
946 uint256 dataToBeSigned;
947 try {
948 dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL, 0, consensusBranchId);
949 } catch (std::logic_error ex) {
950 return state.DoS(100, error("CheckTransaction(): error computing signature hash"),
951 REJECT_INVALID, "error-computing-signature-hash");
952 }
953
954 BOOST_STATIC_ASSERT(crypto_sign_PUBLICKEYBYTES == 32);
955
956 // We rely on libsodium to check that the signature is canonical.
957 // https://github.com/jedisct1/libsodium/commit/62911edb7ff2275cccd74bf1c8aefcc4d76924e0
958 if (crypto_sign_verify_detached(&tx.joinSplitSig[0],
959 dataToBeSigned.begin(), 32,
960 tx.joinSplitPubKey.begin()
961 ) != 0) {
962 return state.DoS(100, error("CheckTransaction(): invalid joinsplit signature"),
963 REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
964 }
965 }
966
967 if (tx.IsCoinBase() && tx.vout[0].nValue >= ASSETCHAINS_TIMELOCKGTE)
968 {
969 // if time locks are on, ensure that this coin base is time locked exactly as it should be
970
971 }
972 return true;
973}
974
975bool CheckTransaction(const CTransaction& tx, CValidationState &state,
976 libzcash::ProofVerifier& verifier)
977{
978 static uint256 array[64]; static int32_t numbanned,indallvouts; int32_t j,k,n;
979 if ( *(int32_t *)&array[0] == 0 )
980 numbanned = komodo_bannedset(&indallvouts,array,(int32_t)(sizeof(array)/sizeof(*array)));
981 n = tx.vin.size();
982 for (j=0; j<n; j++)
983 {
984 for (k=0; k<numbanned; k++)
985 {
986 if ( tx.vin[j].prevout.hash == array[k] && (tx.vin[j].prevout.n == 1 || k >= indallvouts) )
987 {
988 static uint32_t counter;
989 if ( counter++ < 100 )
990 printf("MEMPOOL: banned tx.%d being used at ht.%d vout.%d\n",k,(int32_t)chainActive.Tip()->nHeight,j);
991 return(false);
992 }
993 }
994 }
995 // Don't count coinbase transactions because mining skews the count
996 if (!tx.IsCoinBase()) {
997 transactionsValidated.increment();
998 }
999
1000 if (!CheckTransactionWithoutProofVerification(tx, state)) {
1001 return false;
1002 } else {
1003 // Ensure that zk-SNARKs verify
1004 BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
1005 if (!joinsplit.Verify(*pzcashParams, verifier, tx.joinSplitPubKey)) {
1006 return state.DoS(100, error("CheckTransaction(): joinsplit does not verify"),
1007 REJECT_INVALID, "bad-txns-joinsplit-verification-failed");
1008 }
1009 }
1010 return true;
1011 }
1012}
1013
1014bool CheckTransactionWithoutProofVerification(const CTransaction& tx, CValidationState &state)
1015{
1016 // Basic checks that don't depend on any context
1017
1018 /**
1019 * Previously:
1020 * 1. The consensus rule below was:
1021 * if (tx.nVersion < SPROUT_MIN_TX_VERSION) { ... }
1022 * which checked if tx.nVersion fell within the range:
1023 * INT32_MIN <= tx.nVersion < SPROUT_MIN_TX_VERSION
1024 * 2. The parser allowed tx.nVersion to be negative
1025 *
1026 * Now:
1027 * 1. The consensus rule checks to see if tx.Version falls within the range:
1028 * 0 <= tx.nVersion < SPROUT_MIN_TX_VERSION
1029 * 2. The previous consensus rule checked for negative values within the range:
1030 * INT32_MIN <= tx.nVersion < 0
1031 * This is unnecessary for Overwinter transactions since the parser now
1032 * interprets the sign bit as fOverwintered, so tx.nVersion is always >=0,
1033 * and when Overwinter is not active ContextualCheckTransaction rejects
1034 * transactions with fOverwintered set. When fOverwintered is set,
1035 * this function and ContextualCheckTransaction will together check to
1036 * ensure tx.nVersion avoids the following ranges:
1037 * 0 <= tx.nVersion < OVERWINTER_MIN_TX_VERSION
1038 * OVERWINTER_MAX_TX_VERSION < tx.nVersion <= INT32_MAX
1039 */
1040 if (!tx.fOverwintered && tx.nVersion < SPROUT_MIN_TX_VERSION) {
1041 return state.DoS(100, error("CheckTransaction(): version too low"),
1042 REJECT_INVALID, "bad-txns-version-too-low");
1043 }
1044 else if (tx.fOverwintered) {
1045 if (tx.nVersion < OVERWINTER_MIN_TX_VERSION) {
1046 return state.DoS(100, error("CheckTransaction(): overwinter version too low"),
1047 REJECT_INVALID, "bad-tx-overwinter-version-too-low");
1048 }
1049 if (tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) {
1050 return state.DoS(100, error("CheckTransaction(): unknown tx version group id"),
1051 REJECT_INVALID, "bad-tx-version-group-id");
1052 }
1053 if (tx.nExpiryHeight >= TX_EXPIRY_HEIGHT_THRESHOLD) {
1054 return state.DoS(100, error("CheckTransaction(): expiry height is too high"),
1055 REJECT_INVALID, "bad-tx-expiry-height-too-high");
1056 }
1057 }
1058
1059 // Transactions can contain empty `vin` and `vout` so long as
1060 // `vjoinsplit` is non-empty.
1061 if (tx.vin.empty() && tx.vjoinsplit.empty())
1062 return state.DoS(10, error("CheckTransaction(): vin empty"),
1063 REJECT_INVALID, "bad-txns-vin-empty");
1064 if (tx.vout.empty() && tx.vjoinsplit.empty())
1065 return state.DoS(10, error("CheckTransaction(): vout empty"),
1066 REJECT_INVALID, "bad-txns-vout-empty");
1067
1068 // Size limits
1069 BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE > MAX_TX_SIZE); // sanity
1070 if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE)
1071 return state.DoS(100, error("CheckTransaction(): size limits failed"),
1072 REJECT_INVALID, "bad-txns-oversize");
1073
1074 // Check for negative or overflow output values
1075 CAmount nValueOut = 0;
1076 BOOST_FOREACH(const CTxOut& txout, tx.vout)
1077 {
1078 if (txout.nValue < 0)
1079 return state.DoS(100, error("CheckTransaction(): txout.nValue negative"),
1080 REJECT_INVALID, "bad-txns-vout-negative");
1081 if (txout.nValue > MAX_MONEY)
1082 {
1083 fprintf(stderr,"%.8f > max %.8f\n",(double)txout.nValue/COIN,(double)MAX_MONEY/COIN);
1084 return state.DoS(100, error("CheckTransaction(): txout.nValue too high"),REJECT_INVALID, "bad-txns-vout-toolarge");
1085 }
1086 nValueOut += txout.nValue;
1087 if (!MoneyRange(nValueOut))
1088 return state.DoS(100, error("CheckTransaction(): txout total out of range"),
1089 REJECT_INVALID, "bad-txns-txouttotal-toolarge");
1090 }
1091
1092 // Ensure that joinsplit values are well-formed
1093 BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
1094 {
1095 if (joinsplit.vpub_old < 0) {
1096 return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old negative"),
1097 REJECT_INVALID, "bad-txns-vpub_old-negative");
1098 }
1099
1100 if (joinsplit.vpub_new < 0) {
1101 return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new negative"),
1102 REJECT_INVALID, "bad-txns-vpub_new-negative");
1103 }
1104
1105 if (joinsplit.vpub_old > MAX_MONEY) {
1106 return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old too high"),
1107 REJECT_INVALID, "bad-txns-vpub_old-toolarge");
1108 }
1109
1110 if (joinsplit.vpub_new > MAX_MONEY) {
1111 return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new too high"),
1112 REJECT_INVALID, "bad-txns-vpub_new-toolarge");
1113 }
1114
1115 if (joinsplit.vpub_new != 0 && joinsplit.vpub_old != 0) {
1116 return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new and joinsplit.vpub_old both nonzero"),
1117 REJECT_INVALID, "bad-txns-vpubs-both-nonzero");
1118 }
1119
1120 nValueOut += joinsplit.vpub_old;
1121 if (!MoneyRange(nValueOut)) {
1122 return state.DoS(100, error("CheckTransaction(): txout total out of range"),
1123 REJECT_INVALID, "bad-txns-txouttotal-toolarge");
1124 }
1125 }
1126
1127 // Ensure input values do not exceed MAX_MONEY
1128 // We have not resolved the txin values at this stage,
1129 // but we do know what the joinsplits claim to add
1130 // to the value pool.
1131 {
1132 CAmount nValueIn = 0;
1133 for (std::vector<JSDescription>::const_iterator it(tx.vjoinsplit.begin()); it != tx.vjoinsplit.end(); ++it)
1134 {
1135 nValueIn += it->vpub_new;
1136
1137 if (!MoneyRange(it->vpub_new) || !MoneyRange(nValueIn)) {
1138 return state.DoS(100, error("CheckTransaction(): txin total out of range"),
1139 REJECT_INVALID, "bad-txns-txintotal-toolarge");
1140 }
1141 }
1142 }
1143
1144
1145 // Check for duplicate inputs
1146 set<COutPoint> vInOutPoints;
1147 BOOST_FOREACH(const CTxIn& txin, tx.vin)
1148 {
1149 if (vInOutPoints.count(txin.prevout))
1150 return state.DoS(100, error("CheckTransaction(): duplicate inputs"),
1151 REJECT_INVALID, "bad-txns-inputs-duplicate");
1152 vInOutPoints.insert(txin.prevout);
1153 }
1154
1155 // Check for duplicate joinsplit nullifiers in this transaction
1156 set<uint256> vJoinSplitNullifiers;
1157 BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
1158 {
1159 BOOST_FOREACH(const uint256& nf, joinsplit.nullifiers)
1160 {
1161 if (vJoinSplitNullifiers.count(nf))
1162 return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"),
1163 REJECT_INVALID, "bad-joinsplits-nullifiers-duplicate");
1164
1165 vJoinSplitNullifiers.insert(nf);
1166 }
1167 }
1168
1169 if (tx.IsCoinBase())
1170 {
1171 // There should be no joinsplits in a coinbase transaction
1172 if (tx.vjoinsplit.size() > 0)
1173 return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"),
1174 REJECT_INVALID, "bad-cb-has-joinsplits");
1175
1176 if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
1177 return state.DoS(100, error("CheckTransaction(): coinbase script size"),
1178 REJECT_INVALID, "bad-cb-length");
1179 }
1180 else
1181 {
1182 BOOST_FOREACH(const CTxIn& txin, tx.vin)
1183 if (txin.prevout.IsNull())
1184 return state.DoS(10, error("CheckTransaction(): prevout is null"),
1185 REJECT_INVALID, "bad-txns-prevout-null");
1186 }
1187
1188 return true;
1189}
1190
1191CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowFree)
1192{
1193 extern int32_t KOMODO_ON_DEMAND;
1194 {
1195 LOCK(mempool.cs);
1196 uint256 hash = tx.GetHash();
1197 double dPriorityDelta = 0;
1198 CAmount nFeeDelta = 0;
1199 mempool.ApplyDeltas(hash, dPriorityDelta, nFeeDelta);
1200 if (dPriorityDelta > 0 || nFeeDelta > 0)
1201 return 0;
1202 }
1203
1204 CAmount nMinFee = ::minRelayTxFee.GetFee(nBytes);
1205
1206 if (fAllowFree)
1207 {
1208 // There is a free transaction area in blocks created by most miners,
1209 // * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000
1210 // to be considered to fall into this category. We don't want to encourage sending
1211 // multiple transactions instead of one big transaction to avoid fees.
1212 if (nBytes < (DEFAULT_BLOCK_PRIORITY_SIZE - 1000))
1213 nMinFee = 0;
1214 }
1215
1216 if (!MoneyRange(nMinFee))
1217 nMinFee = MAX_MONEY;
1218 return nMinFee;
1219}
1220
1221
1222bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,bool* pfMissingInputs, bool fRejectAbsurdFee)
1223{
1224 AssertLockHeld(cs_main);
1225 if (pfMissingInputs)
1226 *pfMissingInputs = false;
1227
1228 int nextBlockHeight = chainActive.Height() + 1;
1229 auto consensusBranchId = CurrentEpochBranchId(nextBlockHeight, Params().GetConsensus());
1230
1231 // Node operator can choose to reject tx by number of transparent inputs
1232 static_assert(std::numeric_limits<size_t>::max() >= std::numeric_limits<int64_t>::max(), "size_t too small");
1233 size_t limit = (size_t) GetArg("-mempooltxinputlimit", 0);
1234 if (limit > 0) {
1235 size_t n = tx.vin.size();
1236 if (n > limit) {
1237 LogPrint("mempool", "Dropping txid %s : too many transparent inputs %zu > limit %zu\n", tx.GetHash().ToString(), n, limit );
1238 return false;
1239 }
1240 }
1241
1242 auto verifier = libzcash::ProofVerifier::Strict();
1243 if ( komodo_validate_interest(tx,chainActive.Tip()->nHeight+1,chainActive.Tip()->GetMedianTimePast() + 777,0) < 0 )
1244 {
1245 //fprintf(stderr,"AcceptToMemoryPool komodo_validate_interest failure\n");
1246 return error("AcceptToMemoryPool: komodo_validate_interest failed");
1247 }
1248 if (!CheckTransaction(tx, state, verifier))
1249 return error("AcceptToMemoryPool: CheckTransaction failed");
1250
1251 // DoS level set to 10 to be more forgiving.
1252 // Check transaction contextually against the set of consensus rules which apply in the next block to be mined.
1253 if (!ContextualCheckTransaction(tx, state, nextBlockHeight, 10)) {
1254 return error("AcceptToMemoryPool: ContextualCheckTransaction failed");
1255 }
1256
1257 // Coinbase is only valid in a block, not as a loose transaction
1258 if (tx.IsCoinBase())
1259 {
1260 fprintf(stderr,"AcceptToMemoryPool coinbase as individual tx\n");
1261 return state.DoS(100, error("AcceptToMemoryPool: coinbase as individual tx"),REJECT_INVALID, "coinbase");
1262 }
1263 // Rather not work on nonstandard transactions (unless -testnet/-regtest)
1264 string reason;
1265 if (Params().RequireStandard() && !IsStandardTx(tx, reason, nextBlockHeight))
1266 {
1267 fprintf(stderr,"AcceptToMemoryPool reject nonstandard transaction: %s\n",reason.c_str());
1268 return state.DoS(0,error("AcceptToMemoryPool: nonstandard transaction: %s", reason),REJECT_NONSTANDARD, reason);
1269 }
1270 // Only accept nLockTime-using transactions that can be mined in the next
1271 // block; we don't want our mempool filled up with transactions that can't
1272 // be mined yet.
1273 if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
1274 {
1275 //fprintf(stderr,"AcceptToMemoryPool reject non-final\n");
1276 return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
1277 }
1278 // is it already in the memory pool?
1279 uint256 hash = tx.GetHash();
1280 if (pool.exists(hash))
1281 {
1282 fprintf(stderr,"already in mempool\n");
1283 return false;
1284 }
1285
1286 // Check for conflicts with in-memory transactions
1287 {
1288 LOCK(pool.cs); // protect pool.mapNextTx
1289 for (unsigned int i = 0; i < tx.vin.size(); i++)
1290 {
1291 COutPoint outpoint = tx.vin[i].prevout;
1292 if (pool.mapNextTx.count(outpoint))
1293 {
1294 static uint32_t counter;
1295 // Disable replacement feature for now
1296 //if ( counter++ < 100 )
1297 fprintf(stderr,"Disable replacement feature for now\n");
1298 return false;
1299 }
1300 }
1301 BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit)
1302 {
1303 BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers)
1304 {
1305 if (pool.mapNullifiers.count(nf))
1306 {
1307 fprintf(stderr,"pool.mapNullifiers.count\n");
1308 return false;
1309 }
1310 }
1311 }
1312 }
1313
1314 {
1315 CCoinsView dummy;
1316 CCoinsViewCache view(&dummy);
1317 int64_t interest;
1318 CAmount nValueIn = 0;
1319 {
1320 LOCK(pool.cs);
1321 CCoinsViewMemPool viewMemPool(pcoinsTip, pool);
1322 view.SetBackend(viewMemPool);
1323
1324 // do we already have it?
1325 if (view.HaveCoins(hash))
1326 {
1327 fprintf(stderr,"view.HaveCoins(hash) error\n");
1328 return false;
1329 }
1330
1331 // do all inputs exist?
1332 // Note that this does not check for the presence of actual outputs (see the next check for that),
1333 // and only helps with filling in pfMissingInputs (to determine missing vs spent).
1334 BOOST_FOREACH(const CTxIn txin, tx.vin)
1335 {
1336 if (!view.HaveCoins(txin.prevout.hash))
1337 {
1338 if (pfMissingInputs)
1339 *pfMissingInputs = true;
1340 //fprintf(stderr,"missing inputs\n");
1341 return false;
1342 }
1343 }
1344
1345 // are the actual inputs available?
1346 if (!view.HaveInputs(tx))
1347 {
1348 //fprintf(stderr,"accept failure.1\n");
1349 return state.Invalid(error("AcceptToMemoryPool: inputs already spent"),REJECT_DUPLICATE, "bad-txns-inputs-spent");
1350 }
1351 // are the joinsplit's requirements met?
1352 if (!view.HaveJoinSplitRequirements(tx))
1353 {
1354 //fprintf(stderr,"accept failure.2\n");
1355 return state.Invalid(error("AcceptToMemoryPool: joinsplit requirements not met"),REJECT_DUPLICATE, "bad-txns-joinsplit-requirements-not-met");
1356 }
1357
1358 // Bring the best block into scope
1359 view.GetBestBlock();
1360
1361 nValueIn = view.GetValueIn(chainActive.Tip()->nHeight,&interest,tx,chainActive.Tip()->nTime);
1362 if ( 0 && interest != 0 )
1363 fprintf(stderr,"add interest %.8f\n",(double)interest/COIN);
1364 // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
1365 view.SetBackend(dummy);
1366 }
1367
1368 // Check for non-standard pay-to-script-hash in inputs
1369 if (Params().RequireStandard() && !AreInputsStandard(tx, view, consensusBranchId))
1370 return error("AcceptToMemoryPool: reject nonstandard transaction input");
1371
1372 // Check that the transaction doesn't have an excessive number of
1373 // sigops, making it impossible to mine. Since the coinbase transaction
1374 // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
1375 // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
1376 // merely non-standard transaction.
1377 unsigned int nSigOps = GetLegacySigOpCount(tx);
1378 nSigOps += GetP2SHSigOpCount(tx, view);
1379 if (nSigOps > MAX_STANDARD_TX_SIGOPS)
1380 {
1381 fprintf(stderr,"accept failure.4\n");
1382 return state.DoS(0, error("AcceptToMemoryPool: too many sigops %s, %d > %d", hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS),REJECT_NONSTANDARD, "bad-txns-too-many-sigops");
1383 }
1384
1385 CAmount nValueOut = tx.GetValueOut();
1386 CAmount nFees = nValueIn-nValueOut;
1387 double dPriority = view.GetPriority(tx, chainActive.Height());
1388
1389 // Keep track of transactions that spend a coinbase, which we re-scan
1390 // during reorgs to ensure COINBASE_MATURITY is still met.
1391 bool fSpendsCoinbase = false;
1392 BOOST_FOREACH(const CTxIn &txin, tx.vin) {
1393 const CCoins *coins = view.AccessCoins(txin.prevout.hash);
1394 if (coins->IsCoinBase()) {
1395 fSpendsCoinbase = true;
1396 break;
1397 }
1398 }
1399
1400 // Grab the branch ID we expect this transaction to commit to. We don't
1401 // yet know if it does, but if the entry gets added to the mempool, then
1402 // it has passed ContextualCheckInputs and therefore this is correct.
1403 auto consensusBranchId = CurrentEpochBranchId(chainActive.Height() + 1, Params().GetConsensus());
1404
1405 CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx), fSpendsCoinbase, consensusBranchId);
1406 unsigned int nSize = entry.GetTxSize();
1407
1408 // Accept a tx if it contains joinsplits and has at least the default fee specified by z_sendmany.
1409 if (tx.vjoinsplit.size() > 0 && nFees >= ASYNC_RPC_OPERATION_DEFAULT_MINERS_FEE) {
1410 // In future we will we have more accurate and dynamic computation of fees for tx with joinsplits.
1411 } else {
1412 // Don't accept it if it can't get into a block
1413 CAmount txMinFee = GetMinRelayFee(tx, nSize, true);
1414 if (fLimitFree && nFees < txMinFee)
1415 {
1416 fprintf(stderr,"accept failure.5\n");
1417 return state.DoS(0, error("AcceptToMemoryPool: not enough fees %s, %d < %d",hash.ToString(), nFees, txMinFee),REJECT_INSUFFICIENTFEE, "insufficient fee");
1418 }
1419 }
1420
1421 // Require that free transactions have sufficient priority to be mined in the next block.
1422 if (GetBoolArg("-relaypriority", false) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) {
1423 fprintf(stderr,"accept failure.6\n");
1424 return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority");
1425 }
1426
1427 // Continuously rate-limit free (really, very-low-fee) transactions
1428 // This mitigates 'penny-flooding' -- sending thousands of free transactions just to
1429 // be annoying or make others' transactions take longer to confirm.
1430 if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize))
1431 {
1432 static CCriticalSection csFreeLimiter;
1433 static double dFreeCount;
1434 static int64_t nLastTime;
1435 int64_t nNow = GetTime();
1436
1437 LOCK(csFreeLimiter);
1438
1439 // Use an exponentially decaying ~10-minute window:
1440 dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
1441 nLastTime = nNow;
1442 // -limitfreerelay unit is thousand-bytes-per-minute
1443 // At default rate it would take over a month to fill 1GB
1444 if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000)
1445 {
1446 fprintf(stderr,"accept failure.7\n");
1447 return state.DoS(0, error("AcceptToMemoryPool: free transaction rejected by rate limiter"), REJECT_INSUFFICIENTFEE, "rate limited free transaction");
1448 }
1449 LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
1450 dFreeCount += nSize;
1451 }
1452
1453 if (fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000 && nFees > nValueOut/19 )
1454 {
1455 fprintf(stderr,"accept failure.8\n");
1456 return error("AcceptToMemoryPool: absurdly high fees %s, %d > %d",hash.ToString(), nFees, ::minRelayTxFee.GetFee(nSize) * 10000);
1457 }
1458
1459 // Check against previous transactions
1460 // This is done last to help prevent CPU exhaustion denial-of-service attacks.
1461 PrecomputedTransactionData txdata(tx);
1462 if (!ContextualCheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId))
1463 {
1464 //fprintf(stderr,"accept failure.9\n");
1465 return error("AcceptToMemoryPool: ConnectInputs failed %s", hash.ToString());
1466 }
1467
1468 // Check again against just the consensus-critical mandatory script
1469 // verification flags, in case of bugs in the standard flags that cause
1470 // transactions to pass as valid when they're actually invalid. For
1471 // instance the STRICTENC flag was incorrectly allowing certain
1472 // CHECKSIG NOT scripts to pass, even though they were invalid.
1473 //
1474 // There is a similar check in CreateNewBlock() to prevent creating
1475 // invalid blocks, however allowing such transactions into the mempool
1476 // can be exploited as a DoS attack.
1477 if (!ContextualCheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId))
1478 {
1479 fprintf(stderr,"accept failure.10\n");
1480 return error("AcceptToMemoryPool: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString());
1481 }
1482
1483 // Store transaction in memory
1484 if ( komodo_is_notarytx(tx) == 0 )
1485 KOMODO_ON_DEMAND++;
1486 pool.addUnchecked(hash, entry, !IsInitialBlockDownload());
1487 }
1488
1489 SyncWithWallets(tx, NULL);
1490
1491 return true;
1492}
1493
1494/** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
1495bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow)
1496{
1497 CBlockIndex *pindexSlow = NULL;
1498
1499 LOCK(cs_main);
1500
1501 if (mempool.lookup(hash, txOut))
1502 {
1503 return true;
1504 }
1505
1506 if (fTxIndex) {
1507 CDiskTxPos postx;
1508 if (pblocktree->ReadTxIndex(hash, postx)) {
1509 CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION);
1510 if (file.IsNull())
1511 return error("%s: OpenBlockFile failed", __func__);
1512 CBlockHeader header;
1513 try {
1514 file >> header;
1515 fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
1516 file >> txOut;
1517 } catch (const std::exception& e) {
1518 return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1519 }
1520 hashBlock = header.GetHash();
1521 if (txOut.GetHash() != hash)
1522 return error("%s: txid mismatch", __func__);
1523 return true;
1524 }
1525 }
1526
1527 if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
1528 int nHeight = -1;
1529 {
1530 CCoinsViewCache &view = *pcoinsTip;
1531 const CCoins* coins = view.AccessCoins(hash);
1532 if (coins)
1533 nHeight = coins->nHeight;
1534 }
1535 if (nHeight > 0)
1536 pindexSlow = chainActive[nHeight];
1537 }
1538
1539 if (pindexSlow) {
1540 CBlock block;
1541 if (ReadBlockFromDisk(block, pindexSlow)) {
1542 BOOST_FOREACH(const CTransaction &tx, block.vtx) {
1543 if (tx.GetHash() == hash) {
1544 txOut = tx;
1545 hashBlock = pindexSlow->GetBlockHash();
1546 return true;
1547 }
1548 }
1549 }
1550 }
1551
1552 return false;
1553}
1554
1555/*char *komodo_getspendscript(uint256 hash,int32_t n)
1556{
1557 CTransaction tx; uint256 hashBlock;
1558 if ( !GetTransaction(hash,tx,hashBlock,true) )
1559 {
1560 printf("null GetTransaction\n");
1561 return(0);
1562 }
1563 if ( n >= 0 && n < tx.vout.size() )
1564 return((char *)tx.vout[n].scriptPubKey.ToString().c_str());
1565 else printf("getspendscript illegal n.%d\n",n);
1566 return(0);
1567}*/
1568
1569
1570//////////////////////////////////////////////////////////////////////////////
1571//
1572// CBlock and CBlockIndex
1573//
1574
1575bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart)
1576{
1577 // Open history file to append
1578 CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
1579 if (fileout.IsNull())
1580 return error("WriteBlockToDisk: OpenBlockFile failed");
1581
1582 // Write index header
1583 unsigned int nSize = fileout.GetSerializeSize(block);
1584 fileout << FLATDATA(messageStart) << nSize;
1585
1586 // Write block
1587 long fileOutPos = ftell(fileout.Get());
1588 if (fileOutPos < 0)
1589 return error("WriteBlockToDisk: ftell failed");
1590 pos.nPos = (unsigned int)fileOutPos;
1591 fileout << block;
1592
1593 return true;
1594}
1595
1596bool ReadBlockFromDisk(int32_t height,CBlock& block, const CDiskBlockPos& pos)
1597{
1598 uint8_t pubkey33[33];
1599 block.SetNull();
1600
1601 // Open history file to read
1602 CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
1603 if (filein.IsNull())
1604 {
1605 //fprintf(stderr,"readblockfromdisk err A\n");
1606 return false;//error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
1607 }
1608
1609 // Read block
1610 try {
1611 filein >> block;
1612 }
1613 catch (const std::exception& e) {
1614 fprintf(stderr,"readblockfromdisk err B\n");
1615 return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
1616 }
1617 // Check the header
1618 komodo_block2pubkey33(pubkey33,block);
1619 if (!(CheckEquihashSolution(&block, Params()) && CheckProofOfWork(height,pubkey33,block.GetHash(), block.nBits, Params().GetConsensus())))
1620 {
1621 int32_t i; for (i=0; i<33; i++)
1622 printf("%02x",pubkey33[i]);
1623 fprintf(stderr," warning unexpected diff at ht.%d\n",height);
1624
1625 return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
1626 }
1627 return true;
1628}
1629
1630bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex)
1631{
1632 if ( pindex == 0 )
1633 return false;
1634 if (!ReadBlockFromDisk(pindex->nHeight,block, pindex->GetBlockPos()))
1635 return false;
1636 if (block.GetHash() != pindex->GetBlockHash())
1637 return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
1638 pindex->ToString(), pindex->GetBlockPos().ToString());
1639 return true;
1640}
1641
1642//uint64_t komodo_moneysupply(int32_t height);
1643extern char ASSETCHAINS_SYMBOL[KOMODO_ASSETCHAIN_MAXLEN];
1644extern uint32_t ASSETCHAINS_MAGIC;
1645extern uint64_t ASSETCHAINS_LINEAR,ASSETCHAINS_COMMISSION,ASSETCHAINS_SUPPLY;
1646
1647CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
1648{
1649 int32_t numhalvings,i; uint64_t numerator; CAmount nSubsidy = 3 * COIN;
1650 if ( ASSETCHAINS_SYMBOL[0] == 0 )
1651 {
1652 if ( nHeight == 1 )
1653 return(100000000 * COIN); // ICO allocation
1654 else if ( nHeight < KOMODO_ENDOFERA ) //komodo_moneysupply(nHeight) < MAX_MONEY )
1655 return(3 * COIN);
1656 else return(0);
1657 }
1658 else
1659 {
1660 if ( nHeight == 1 )
1661 return(ASSETCHAINS_SUPPLY * COIN + (ASSETCHAINS_MAGIC & 0xffffff));
1662 else
1663 return(komodo_ac_block_subsidy(nHeight));
1664 }
1665/*
1666 // Mining slow start
1667 // The subsidy is ramped up linearly, skipping the middle payout of
1668 // MAX_SUBSIDY/2 to keep the monetary curve consistent with no slow start.
1669 if (nHeight < consensusParams.nSubsidySlowStartInterval / 2) {
1670 nSubsidy /= consensusParams.nSubsidySlowStartInterval;
1671 nSubsidy *= nHeight;
1672 return nSubsidy;
1673 } else if (nHeight < consensusParams.nSubsidySlowStartInterval) {
1674 nSubsidy /= consensusParams.nSubsidySlowStartInterval;
1675 nSubsidy *= (nHeight+1);
1676 return nSubsidy;
1677 }
1678
1679 assert(nHeight > consensusParams.SubsidySlowStartShift());
1680 int halvings = (nHeight - consensusParams.SubsidySlowStartShift()) / consensusParams.nSubsidyHalvingInterval;*/
1681 // Force block reward to zero when right shift is undefined.
1682 //int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
1683 //if (halvings >= 64)
1684 // return 0;
1685
1686 // Subsidy is cut in half every 840,000 blocks which will occur approximately every 4 years.
1687 //nSubsidy >>= halvings;
1688 //return nSubsidy;
1689}
1690
1691bool IsInitialBlockDownload()
1692{
1693 const CChainParams& chainParams = Params();
1694 LOCK(cs_main);
1695 if (fImporting || fReindex)
1696 {
1697 //fprintf(stderr,"IsInitialBlockDownload: fImporting %d || %d fReindex\n",(int32_t)fImporting,(int32_t)fReindex);
1698 return true;
1699 }
1700 if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()))
1701 {
1702 //fprintf(stderr,"IsInitialBlockDownload: checkpoint -> initialdownload\n");
1703 return true;
1704 }
1705 static bool lockIBDState = false;
1706 if (lockIBDState)
1707 {
1708 //fprintf(stderr,"lockIBDState true %d < %d\n",chainActive.Height(),pindexBestHeader->nHeight - 10);
1709 return false;
1710 }
1711 bool state; CBlockIndex *ptr = chainActive.Tip();
1712 if ( ptr == 0 )
1713 ptr = pindexBestHeader;
1714 else if ( pindexBestHeader != 0 && pindexBestHeader->nHeight > ptr->nHeight )
1715 ptr = pindexBestHeader;
1716 //if ( ASSETCHAINS_SYMBOL[0] == 0 )
1717 state = ((chainActive.Height() < ptr->nHeight - 24*60) ||
1718 ptr->GetBlockTime() < (GetTime() - chainParams.MaxTipAge()));
1719 //else state = (chainActive.Height() < ptr->nHeight - 24*60);
1720 //fprintf(stderr,"state.%d ht.%d vs %d, t.%u %u\n",state,(int32_t)chainActive.Height(),(uint32_t)ptr->nHeight,(int32_t)ptr->GetBlockTime(),(uint32_t)(GetTime() - chainParams.MaxTipAge()));
1721 if (!state)
1722 {
1723 lockIBDState = true;
1724 }
1725 return state;
1726}
1727
1728bool fLargeWorkForkFound = false;
1729bool fLargeWorkInvalidChainFound = false;
1730CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL;
1731
1732void CheckForkWarningConditions()
1733{
1734 AssertLockHeld(cs_main);
1735 // Before we get past initial download, we cannot reliably alert about forks
1736 // (we assume we don't get stuck on a fork before the last checkpoint)
1737 if (IsInitialBlockDownload())
1738 return;
1739
1740 // If our best fork is no longer within 288 blocks (+/- 12 hours if no one mines it)
1741 // of our head, drop it
1742 if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 288)
1743 pindexBestForkTip = NULL;
1744
1745 if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
1746 {
1747 if (!fLargeWorkForkFound && pindexBestForkBase)
1748 {
1749 std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
1750 pindexBestForkBase->phashBlock->ToString() + std::string("'");
1751 CAlert::Notify(warning, true);
1752 }
1753 if (pindexBestForkTip && pindexBestForkBase)
1754 {
1755 LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
1756 pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
1757 pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
1758 fLargeWorkForkFound = true;
1759 }
1760 else
1761 {
1762 std::string warning = std::string("Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.");
1763 LogPrintf("%s: %s\n", warning.c_str(), __func__);
1764 CAlert::Notify(warning, true);
1765 fLargeWorkInvalidChainFound = true;
1766 }
1767 }
1768 else
1769 {
1770 fLargeWorkForkFound = false;
1771 fLargeWorkInvalidChainFound = false;
1772 }
1773}
1774
1775void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
1776{
1777 AssertLockHeld(cs_main);
1778 // If we are on a fork that is sufficiently large, set a warning flag
1779 CBlockIndex* pfork = pindexNewForkTip;
1780 CBlockIndex* plonger = chainActive.Tip();
1781 while (pfork && pfork != plonger)
1782 {
1783 while (plonger && plonger->nHeight > pfork->nHeight)
1784 plonger = plonger->pprev;
1785 if (pfork == plonger)
1786 break;
1787 pfork = pfork->pprev;
1788 }
1789
1790 // We define a condition where we should warn the user about as a fork of at least 7 blocks
1791 // with a tip within 72 blocks (+/- 3 hours if no one mines it) of ours
1792 // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
1793 // hash rate operating on the fork.
1794 // or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
1795 // We define it this way because it allows us to only store the highest fork tip (+ base) which meets
1796 // the 7-block condition and from this always have the most-likely-to-cause-warning fork
1797 if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
1798 pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
1799 chainActive.Height() - pindexNewForkTip->nHeight < 72)
1800 {
1801 pindexBestForkTip = pindexNewForkTip;
1802 pindexBestForkBase = pfork;
1803 }
1804
1805 CheckForkWarningConditions();
1806}
1807
1808// Requires cs_main.
1809void Misbehaving(NodeId pnode, int howmuch)
1810{
1811 if (howmuch == 0)
1812 return;
1813
1814 CNodeState *state = State(pnode);
1815 if (state == NULL)
1816 return;
1817
1818 state->nMisbehavior += howmuch;
1819 int banscore = GetArg("-banscore", 100);
1820 if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
1821 {
1822 LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
1823 state->fShouldBan = true;
1824 } else
1825 LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
1826}
1827
1828void static InvalidChainFound(CBlockIndex* pindexNew)
1829{
1830 if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
1831 pindexBestInvalid = pindexNew;
1832
1833 LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
1834 pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
1835 log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
1836 pindexNew->GetBlockTime()));
1837 CBlockIndex *tip = chainActive.Tip();
1838 assert (tip);
1839 LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
1840 tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
1841 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
1842 CheckForkWarningConditions();
1843}
1844
1845void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
1846 int nDoS = 0;
1847 if (state.IsInvalid(nDoS)) {
1848 std::map<uint256, NodeId>::iterator it = mapBlockSource.find(pindex->GetBlockHash());
1849 if (it != mapBlockSource.end() && State(it->second)) {
1850 CBlockReject reject = {state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), pindex->GetBlockHash()};
1851 State(it->second)->rejects.push_back(reject);
1852 if (nDoS > 0)
1853 Misbehaving(it->second, nDoS);
1854 }
1855 }
1856 if (!state.CorruptionPossible()) {
1857 pindex->nStatus |= BLOCK_FAILED_VALID;
1858 setDirtyBlockIndex.insert(pindex);
1859 setBlockIndexCandidates.erase(pindex);
1860 InvalidChainFound(pindex);
1861 }
1862}
1863
1864void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
1865{
1866 if (!tx.IsCoinBase()) // mark inputs spent
1867 {
1868 txundo.vprevout.reserve(tx.vin.size());
1869 BOOST_FOREACH(const CTxIn &txin, tx.vin) {
1870 CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash);
1871 unsigned nPos = txin.prevout.n;
1872
1873 if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull())
1874 assert(false);
1875 // mark an outpoint spent, and construct undo information
1876 txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos]));
1877 coins->Spend(nPos);
1878 if (coins->vout.size() == 0) {
1879 CTxInUndo& undo = txundo.vprevout.back();
1880 undo.nHeight = coins->nHeight;
1881 undo.fCoinBase = coins->fCoinBase;
1882 undo.nVersion = coins->nVersion;
1883 }
1884 }
1885 }
1886 BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { // spend nullifiers
1887 BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
1888 inputs.SetNullifier(nf, true);
1889 }
1890 }
1891 inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); // add outputs
1892}
1893
1894void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
1895{
1896 CTxUndo txundo;
1897 UpdateCoins(tx, inputs, txundo, nHeight);
1898}
1899
1900bool CScriptCheck::operator()() {
1901 const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1902 if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, *txdata), consensusBranchId, &error)) {
1903 return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error));
1904 }
1905 return true;
1906}
1907
1908int GetSpendHeight(const CCoinsViewCache& inputs)
1909{
1910 LOCK(cs_main);
1911 CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
1912 return pindexPrev->nHeight + 1;
1913}
1914
1915namespace Consensus {
1916bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, const Consensus::Params& consensusParams)
1917{
1918 // This doesn't trigger the DoS code on purpose; if it did, it would make it easier
1919 // for an attacker to attempt to split the network.
1920 if (!inputs.HaveInputs(tx))
1921 return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString()));
1922
1923 // are the JoinSplit's requirements met?
1924 if (!inputs.HaveJoinSplitRequirements(tx))
1925 return state.Invalid(error("CheckInputs(): %s JoinSplit requirements not met", tx.GetHash().ToString()));
1926
1927 CAmount nValueIn = 0;
1928 CAmount nFees = 0;
1929 for (unsigned int i = 0; i < tx.vin.size(); i++)
1930 {
1931 const COutPoint &prevout = tx.vin[i].prevout;
1932 const CCoins *coins = inputs.AccessCoins(prevout.hash);
1933 assert(coins);
1934
1935 if (coins->IsCoinBase()) {
1936 // Ensure that coinbases are matured
1937 if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) {
1938 return state.Invalid(
1939 error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight),
1940 REJECT_INVALID, "bad-txns-premature-spend-of-coinbase");
1941 }
1942
1943 // Ensure that coinbases cannot be spent to transparent outputs
1944 // Disabled on regtest
1945 if (fCoinbaseEnforcedProtectionEnabled &&
1946 consensusParams.fCoinbaseMustBeProtected &&
1947 !tx.vout.empty()) {
1948 return state.Invalid(
1949 error("CheckInputs(): tried to spend coinbase with transparent outputs"),
1950 REJECT_INVALID, "bad-txns-coinbase-spend-has-transparent-outputs");
1951 }
1952 }
1953
1954 // Check for negative or overflow input values
1955 nValueIn += coins->vout[prevout.n].nValue;
1956#ifdef KOMODO_ENABLE_INTEREST
1957 if ( ASSETCHAINS_SYMBOL[0] == 0 && nSpendHeight > 60000 )//chainActive.Tip() != 0 && chainActive.Tip()->nHeight >= 60000 )
1958 {
1959 if ( coins->vout[prevout.n].nValue >= 10*COIN )
1960 {
1961 int64_t interest; int32_t txheight; uint32_t locktime;
1962 if ( (interest= komodo_accrued_interest(&txheight,&locktime,prevout.hash,prevout.n,0,coins->vout[prevout.n].nValue,(int32_t)nSpendHeight-1)) != 0 )
1963 {
1964//fprintf(stderr,"checkResult %.8f += val %.8f interest %.8f ht.%d lock.%u tip.%u\n",(double)nValueIn/COIN,(double)coins->vout[prevout.n].nValue/COIN,(double)interest/COIN,txheight,locktime,chainActive.Tip()->nTime);
1965 nValueIn += interest;
1966 }
1967 }
1968 }
1969#endif
1970 if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn))
1971 return state.DoS(100, error("CheckInputs(): txin values out of range"),
1972 REJECT_INVALID, "bad-txns-inputvalues-outofrange");
1973
1974 }
1975
1976 nValueIn += tx.GetJoinSplitValueIn();
1977 if (!MoneyRange(nValueIn))
1978 return state.DoS(100, error("CheckInputs(): vpub_old values out of range"),
1979 REJECT_INVALID, "bad-txns-inputvalues-outofrange");
1980
1981 if (nValueIn < tx.GetValueOut())
1982 {
1983 fprintf(stderr,"spentheight.%d valuein %s vs %s error\n",nSpendHeight,FormatMoney(nValueIn).c_str(), FormatMoney(tx.GetValueOut()).c_str());
1984 return state.DoS(100, error("CheckInputs(): %s value in (%s) < value out (%s) diff %.8f",
1985 tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut()),((double)nValueIn - tx.GetValueOut())/COIN),REJECT_INVALID, "bad-txns-in-belowout");
1986 }
1987 // Tally transaction fees
1988 CAmount nTxFee = nValueIn - tx.GetValueOut();
1989 if (nTxFee < 0)
1990 return state.DoS(100, error("CheckInputs(): %s nTxFee < 0", tx.GetHash().ToString()),
1991 REJECT_INVALID, "bad-txns-fee-negative");
1992 nFees += nTxFee;
1993 if (!MoneyRange(nFees))
1994 return state.DoS(100, error("CheckInputs(): nFees out of range"),
1995 REJECT_INVALID, "bad-txns-fee-outofrange");
1996 return true;
1997}
1998}// namespace Consensus
1999
2000bool ContextualCheckInputs(
2001 const CTransaction& tx,
2002 CValidationState &state,
2003 const CCoinsViewCache &inputs,
2004 bool fScriptChecks,
2005 unsigned int flags,
2006 bool cacheStore,
2007 PrecomputedTransactionData& txdata,
2008 const Consensus::Params& consensusParams,
2009 uint32_t consensusBranchId,
2010 std::vector<CScriptCheck> *pvChecks)
2011{
2012 if (!tx.IsCoinBase())
2013 {
2014 if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs), consensusParams)) {
2015 return false;
2016 }
2017
2018 if (pvChecks)
2019 pvChecks->reserve(tx.vin.size());
2020
2021 // The first loop above does all the inexpensive checks.
2022 // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
2023 // Helps prevent CPU exhaustion attacks.
2024
2025 // Skip ECDSA signature verification when connecting blocks
2026 // before the last block chain checkpoint. This is safe because block merkle hashes are
2027 // still computed and checked, and any change will be caught at the next checkpoint.
2028 if (fScriptChecks) {
2029 for (unsigned int i = 0; i < tx.vin.size(); i++) {
2030 const COutPoint &prevout = tx.vin[i].prevout;
2031 const CCoins* coins = inputs.AccessCoins(prevout.hash);
2032 assert(coins);
2033
2034 // Verify signature
2035 CScriptCheck check(*coins, tx, i, flags, cacheStore, consensusBranchId, &txdata);
2036 if (pvChecks) {
2037 pvChecks->push_back(CScriptCheck());
2038 check.swap(pvChecks->back());
2039 } else if (!check()) {
2040 if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
2041 // Check whether the failure was caused by a
2042 // non-mandatory script verification check, such as
2043 // non-standard DER encodings or non-null dummy
2044 // arguments; if so, don't trigger DoS protection to
2045 // avoid splitting the network between upgraded and
2046 // non-upgraded nodes.
2047 CScriptCheck check2(*coins, tx, i,
2048 flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, consensusBranchId, &txdata);
2049 if (check2())
2050 return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
2051 }
2052 // Failures of other flags indicate a transaction that is
2053 // invalid in new blocks, e.g. a invalid P2SH. We DoS ban
2054 // such nodes as they are not following the protocol. That
2055 // said during an upgrade careful thought should be taken
2056 // as to the correct behavior - we may want to continue
2057 // peering with non-upgraded nodes even after a soft-fork
2058 // super-majority vote has passed.
2059 return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
2060 }
2061 }
2062 }
2063 }
2064
2065 return true;
2066}
2067
2068
2069/*bool ContextualCheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, const Consensus::Params& consensusParams, std::vector<CScriptCheck> *pvChecks)
2070{
2071 if (!NonContextualCheckInputs(tx, state, inputs, fScriptChecks, flags, cacheStore, consensusParams, pvChecks)) {
2072 fprintf(stderr,"ContextualCheckInputs failure.0\n");
2073 return false;
2074 }
2075
2076 if (!tx.IsCoinBase())
2077 {
2078 // While checking, GetBestBlock() refers to the parent block.
2079 // This is also true for mempool checks.
2080 CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
2081 int nSpendHeight = pindexPrev->nHeight + 1;
2082 for (unsigned int i = 0; i < tx.vin.size(); i++)
2083 {
2084 const COutPoint &prevout = tx.vin[i].prevout;
2085 const CCoins *coins = inputs.AccessCoins(prevout.hash);
2086 // Assertion is okay because NonContextualCheckInputs ensures the inputs
2087 // are available.
2088 assert(coins);
2089
2090 // If prev is coinbase, check that it's matured
2091 if (coins->IsCoinBase()) {
2092 if ( ASSETCHAINS_SYMBOL[0] == 0 )
2093 COINBASE_MATURITY = _COINBASE_MATURITY;
2094 if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) {
2095 fprintf(stderr,"ContextualCheckInputs failure.1 i.%d of %d\n",i,(int32_t)tx.vin.size());
2096
2097 return state.Invalid(
2098 error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight),REJECT_INVALID, "bad-txns-premature-spend-of-coinbase");
2099 }
2100 }
2101 }
2102 }
2103
2104 return true;
2105}*/
2106
2107namespace {
2108
2109bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
2110{
2111 // Open history file to append
2112 CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
2113 if (fileout.IsNull())
2114 return error("%s: OpenUndoFile failed", __func__);
2115
2116 // Write index header
2117 unsigned int nSize = fileout.GetSerializeSize(blockundo);
2118 fileout << FLATDATA(messageStart) << nSize;
2119
2120 // Write undo data
2121 long fileOutPos = ftell(fileout.Get());
2122 if (fileOutPos < 0)
2123 return error("%s: ftell failed", __func__);
2124 pos.nPos = (unsigned int)fileOutPos;
2125 fileout << blockundo;
2126
2127 // calculate & write checksum
2128 CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
2129 hasher << hashBlock;
2130 hasher << blockundo;
2131 fileout << hasher.GetHash();
2132
2133 return true;
2134}
2135
2136bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock)
2137{
2138 // Open history file to read
2139 CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
2140 if (filein.IsNull())
2141 return error("%s: OpenBlockFile failed", __func__);
2142
2143 // Read block
2144 uint256 hashChecksum;
2145 try {
2146 filein >> blockundo;
2147 filein >> hashChecksum;
2148 }
2149 catch (const std::exception& e) {
2150 return error("%s: Deserialize or I/O error - %s", __func__, e.what());
2151 }
2152
2153 // Verify checksum
2154 CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
2155 hasher << hashBlock;
2156 hasher << blockundo;
2157 if (hashChecksum != hasher.GetHash())
2158 return error("%s: Checksum mismatch", __func__);
2159
2160 return true;
2161}
2162
2163/** Abort with a message */
2164bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
2165{
2166 strMiscWarning = strMessage;
2167 LogPrintf("*** %s\n", strMessage);
2168 uiInterface.ThreadSafeMessageBox(
2169 userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
2170 "", CClientUIInterface::MSG_ERROR);
2171 StartShutdown();
2172 return false;
2173}
2174
2175bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="")
2176{
2177 AbortNode(strMessage, userMessage);
2178 return state.Error(strMessage);
2179}
2180
2181} // anon namespace
2182
2183/**
2184 * Apply the undo operation of a CTxInUndo to the given chain state.
2185 * @param undo The undo object.
2186 * @param view The coins view to which to apply the changes.
2187 * @param out The out point that corresponds to the tx input.
2188 * @return True on success.
2189 */
2190static bool ApplyTxInUndo(const CTxInUndo& undo, CCoinsViewCache& view, const COutPoint& out)
2191{
2192 bool fClean = true;
2193
2194 CCoinsModifier coins = view.ModifyCoins(out.hash);
2195 if (undo.nHeight != 0) {
2196 // undo data contains height: this is the last output of the prevout tx being spent
2197 if (!coins->IsPruned())
2198 fClean = fClean && error("%s: undo data overwriting existing transaction", __func__);
2199 coins->Clear();
2200 coins->fCoinBase = undo.fCoinBase;
2201 coins->nHeight = undo.nHeight;
2202 coins->nVersion = undo.nVersion;
2203 } else {
2204 if (coins->IsPruned())
2205 fClean = fClean && error("%s: undo data adding output to missing transaction", __func__);
2206 }
2207 if (coins->IsAvailable(out.n))
2208 fClean = fClean && error("%s: undo data overwriting existing output", __func__);
2209 if (coins->vout.size() < out.n+1)
2210 coins->vout.resize(out.n+1);
2211 coins->vout[out.n] = undo.txout;
2212
2213 return fClean;
2214}
2215
2216bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean)
2217{
2218 assert(pindex->GetBlockHash() == view.GetBestBlock());
2219
2220 if (pfClean)
2221 *pfClean = false;
2222
2223 bool fClean = true;
2224 komodo_disconnect(pindex,block);
2225 CBlockUndo blockUndo;
2226 CDiskBlockPos pos = pindex->GetUndoPos();
2227 if (pos.IsNull())
2228 return error("DisconnectBlock(): no undo data available");
2229 if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash()))
2230 return error("DisconnectBlock(): failure reading undo data");
2231
2232 if (blockUndo.vtxundo.size() + 1 != block.vtx.size())
2233 return error("DisconnectBlock(): block and undo data inconsistent");
2234
2235 // undo transactions in reverse order
2236 for (int i = block.vtx.size() - 1; i >= 0; i--) {
2237 const CTransaction &tx = block.vtx[i];
2238 uint256 hash = tx.GetHash();
2239
2240 // Check that all outputs are available and match the outputs in the block itself
2241 // exactly.
2242 {
2243 CCoinsModifier outs = view.ModifyCoins(hash);
2244 outs->ClearUnspendable();
2245
2246 CCoins outsBlock(tx, pindex->nHeight);
2247 // The CCoins serialization does not serialize negative numbers.
2248 // No network rules currently depend on the version here, so an inconsistency is harmless
2249 // but it must be corrected before txout nversion ever influences a network rule.
2250 if (outsBlock.nVersion < 0)
2251 outs->nVersion = outsBlock.nVersion;
2252 if (*outs != outsBlock)
2253 fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted");
2254
2255 // remove outputs
2256 outs->Clear();
2257 }
2258
2259 // unspend nullifiers
2260 BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
2261 BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
2262 view.SetNullifier(nf, false);
2263 }
2264 }
2265
2266 // restore inputs
2267 if (i > 0) { // not coinbases
2268 const CTxUndo &txundo = blockUndo.vtxundo[i-1];
2269 if (txundo.vprevout.size() != tx.vin.size())
2270 return error("DisconnectBlock(): transaction and undo data inconsistent");
2271 for (unsigned int j = tx.vin.size(); j-- > 0;) {
2272 const COutPoint &out = tx.vin[j].prevout;
2273 const CTxInUndo &undo = txundo.vprevout[j];
2274 if (!ApplyTxInUndo(undo, view, out))
2275 fClean = false;
2276 }
2277 }
2278 }
2279
2280 // set the old best anchor back
2281 view.PopAnchor(blockUndo.old_tree_root);
2282
2283 // move best block pointer to prevout block
2284 view.SetBestBlock(pindex->pprev->GetBlockHash());
2285
2286 if (pfClean) {
2287 *pfClean = fClean;
2288 return true;
2289 }
2290
2291 return fClean;
2292}
2293
2294void static FlushBlockFile(bool fFinalize = false)
2295{
2296 LOCK(cs_LastBlockFile);
2297
2298 CDiskBlockPos posOld(nLastBlockFile, 0);
2299
2300 FILE *fileOld = OpenBlockFile(posOld);
2301 if (fileOld) {
2302 if (fFinalize)
2303 TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
2304 FileCommit(fileOld);
2305 fclose(fileOld);
2306 }
2307
2308 fileOld = OpenUndoFile(posOld);
2309 if (fileOld) {
2310 if (fFinalize)
2311 TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
2312 FileCommit(fileOld);
2313 fclose(fileOld);
2314 }
2315}
2316
2317bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
2318
2319static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
2320
2321void ThreadScriptCheck() {
2322 RenameThread("zcash-scriptch");
2323 scriptcheckqueue.Thread();
2324}
2325
2326//
2327// Called periodically asynchronously; alerts if it smells like
2328// we're being fed a bad chain (blocks being generated much
2329// too slowly or too quickly).
2330//
2331void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CBlockIndex *const &bestHeader,
2332 int64_t nPowTargetSpacing)
2333{
2334 if (bestHeader == NULL || initialDownloadCheck()) return;
2335
2336 static int64_t lastAlertTime = 0;
2337 int64_t now = GetAdjustedTime();
2338 if (lastAlertTime > now-60*60*24) return; // Alert at most once per day
2339
2340 const int SPAN_HOURS=4;
2341 const int SPAN_SECONDS=SPAN_HOURS*60*60;
2342 int BLOCKS_EXPECTED = SPAN_SECONDS / nPowTargetSpacing;
2343
2344 boost::math::poisson_distribution<double> poisson(BLOCKS_EXPECTED);
2345
2346 std::string strWarning;
2347 int64_t startTime = GetAdjustedTime()-SPAN_SECONDS;
2348
2349 LOCK(cs);
2350 const CBlockIndex* i = bestHeader;
2351 int nBlocks = 0;
2352 while (i->GetBlockTime() >= startTime) {
2353 ++nBlocks;
2354 i = i->pprev;
2355 if (i == NULL) return; // Ran out of chain, we must not be fully synced
2356 }
2357
2358 // How likely is it to find that many by chance?
2359 double p = boost::math::pdf(poisson, nBlocks);
2360
2361 LogPrint("partitioncheck", "%s : Found %d blocks in the last %d hours\n", __func__, nBlocks, SPAN_HOURS);
2362 LogPrint("partitioncheck", "%s : likelihood: %g\n", __func__, p);
2363
2364 // Aim for one false-positive about every fifty years of normal running:
2365 const int FIFTY_YEARS = 50*365*24*60*60;
2366 double alertThreshold = 1.0 / (FIFTY_YEARS / SPAN_SECONDS);
2367
2368 if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED)
2369 {
2370 // Many fewer blocks than expected: alert!
2371 strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
2372 nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
2373 }
2374 else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED)
2375 {
2376 // Many more blocks than expected: alert!
2377 strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
2378 nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
2379 }
2380 if (!strWarning.empty())
2381 {
2382 strMiscWarning = strWarning;
2383 CAlert::Notify(strWarning, true);
2384 lastAlertTime = now;
2385 }
2386}
2387
2388static int64_t nTimeVerify = 0;
2389static int64_t nTimeConnect = 0;
2390static int64_t nTimeIndex = 0;
2391static int64_t nTimeCallbacks = 0;
2392static int64_t nTimeTotal = 0;
2393
2394bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck)
2395{
2396 const CChainParams& chainparams = Params();
2397 //fprintf(stderr,"connectblock ht.%d\n",(int32_t)pindex->nHeight);
2398 AssertLockHeld(cs_main);
2399 bool fExpensiveChecks = true;
2400 if (fCheckpointsEnabled) {
2401 CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
2402 if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->nHeight) == pindex) {
2403 // This block is an ancestor of a checkpoint: disable script checks
2404 fExpensiveChecks = false;
2405 }
2406 }
2407 auto verifier = libzcash::ProofVerifier::Strict();
2408 auto disabledVerifier = libzcash::ProofVerifier::Disabled();
2409
2410 // Check it again to verify JoinSplit proofs, and in case a previous version let a bad block in
2411 if (!CheckBlock(pindex->nHeight,pindex,block, state, fExpensiveChecks ? verifier : disabledVerifier, !fJustCheck, !fJustCheck))
2412 return false;
2413
2414 // verify that the view's current state corresponds to the previous block
2415 uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash();
2416 assert(hashPrevBlock == view.GetBestBlock());
2417
2418 // Special case for the genesis block, skipping connection of its transactions
2419 // (its coinbase is unspendable)
2420 if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
2421 if (!fJustCheck) {
2422 view.SetBestBlock(pindex->GetBlockHash());
2423 // Before the genesis block, there was an empty tree
2424 ZCIncrementalMerkleTree tree;
2425 pindex->hashAnchor = tree.root();
2426 // The genesis block contained no JoinSplits
2427 pindex->hashAnchorEnd = pindex->hashAnchor;
2428 }
2429 return true;
2430 }
2431
2432 bool fScriptChecks = (!fCheckpointsEnabled || pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()));
2433 //if ( KOMODO_TESTNET_EXPIRATION != 0 && pindex->nHeight > KOMODO_TESTNET_EXPIRATION ) // "testnet"
2434 // return(false);
2435 // Do not allow blocks that contain transactions which 'overwrite' older transactions,
2436 // unless those are already completely spent.
2437 BOOST_FOREACH(const CTransaction& tx, block.vtx) {
2438 const CCoins* coins = view.AccessCoins(tx.GetHash());
2439 if (coins && !coins->IsPruned())
2440 return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
2441 REJECT_INVALID, "bad-txns-BIP30");
2442 }
2443
2444 unsigned int flags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
2445
2446 // DERSIG (BIP66) is also always enforced, but does not have a flag.
2447
2448 CBlockUndo blockundo;
2449
2450 CCheckQueueControl<CScriptCheck> control(fExpensiveChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
2451
2452 int64_t nTimeStart = GetTimeMicros();
2453 CAmount nFees = 0;
2454 int nInputs = 0;
2455 int64_t interest,sum = 0;
2456 unsigned int nSigOps = 0;
2457 CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
2458 std::vector<std::pair<uint256, CDiskTxPos> > vPos;
2459 vPos.reserve(block.vtx.size());
2460 blockundo.vtxundo.reserve(block.vtx.size() - 1);
2461
2462 // Construct the incremental merkle tree at the current
2463 // block position,
2464 auto old_tree_root = view.GetBestAnchor();
2465 // saving the top anchor in the block index as we go.
2466 if (!fJustCheck) {
2467 pindex->hashAnchor = old_tree_root;
2468 }
2469 ZCIncrementalMerkleTree tree;
2470 // This should never fail: we should always be able to get the root
2471 // that is on the tip of our chain
2472 assert(view.GetAnchorAt(old_tree_root, tree));
2473
2474 {
2475 // Consistency check: the root of the tree we're given should
2476 // match what we asked for.
2477 assert(tree.root() == old_tree_root);
2478 }
2479
2480 // Grab the consensus branch ID for the block's height
2481 auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, Params().GetConsensus());
2482
2483 std::vector<PrecomputedTransactionData> txdata;
2484 txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
2485 for (unsigned int i = 0; i < block.vtx.size(); i++)
2486 {
2487 const CTransaction &tx = block.vtx[i];
2488 nInputs += tx.vin.size();
2489 nSigOps += GetLegacySigOpCount(tx);
2490 if (nSigOps > MAX_BLOCK_SIGOPS)
2491 return state.DoS(100, error("ConnectBlock(): too many sigops"),
2492 REJECT_INVALID, "bad-blk-sigops");
2493//fprintf(stderr,"ht.%d vout0 t%u\n",pindex->nHeight,tx.nLockTime);
2494 if (!tx.IsCoinBase())
2495 {
2496 if (!view.HaveInputs(tx))
2497 return state.DoS(100, error("ConnectBlock(): inputs missing/spent"),
2498 REJECT_INVALID, "bad-txns-inputs-missingorspent");
2499
2500 // are the JoinSplit's requirements met?
2501 if (!view.HaveJoinSplitRequirements(tx))
2502 return state.DoS(100, error("ConnectBlock(): JoinSplit requirements not met"),
2503 REJECT_INVALID, "bad-txns-joinsplit-requirements-not-met");
2504
2505 // Add in sigops done by pay-to-script-hash inputs;
2506 // this is to prevent a "rogue miner" from creating
2507 // an incredibly-expensive-to-validate block.
2508 nSigOps += GetP2SHSigOpCount(tx, view);
2509 if (nSigOps > MAX_BLOCK_SIGOPS)
2510 return state.DoS(100, error("ConnectBlock(): too many sigops"),
2511 REJECT_INVALID, "bad-blk-sigops");
2512 }
2513
2514 txdata.emplace_back(tx);
2515
2516 if (!tx.IsCoinBase())
2517 {
2518 nFees += view.GetValueIn(chainActive.Tip()->nHeight,&interest,tx,chainActive.Tip()->nTime) - tx.GetValueOut();
2519 sum += interest;
2520
2521 std::vector<CScriptCheck> vChecks;
2522 if (!ContextualCheckInputs(tx, state, view, fExpensiveChecks, flags, false, txdata[i], chainparams.GetConsensus(), consensusBranchId, nScriptCheckThreads ? &vChecks : NULL))
2523 return false;
2524 control.Add(vChecks);
2525 }
2526 //if ( ASSETCHAINS_SYMBOL[0] == 0 )
2527 // komodo_earned_interest(pindex->nHeight,sum);
2528 CTxUndo undoDummy;
2529 if (i > 0) {
2530 blockundo.vtxundo.push_back(CTxUndo());
2531 }
2532 UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2533
2534 BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
2535 BOOST_FOREACH(const uint256 &note_commitment, joinsplit.commitments) {
2536 // Insert the note commitments into our temporary tree.
2537
2538 tree.append(note_commitment);
2539 }
2540 }
2541
2542 vPos.push_back(std::make_pair(tx.GetHash(), pos));
2543 pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
2544 }
2545
2546 view.PushAnchor(tree);
2547 if (!fJustCheck) {
2548 pindex->hashAnchorEnd = tree.root();
2549 }
2550 blockundo.old_tree_root = old_tree_root;
2551
2552 int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart;
2553 LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001);
2554
2555 CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()) + sum;
2556 if ( block.vtx[0].GetValueOut() > blockReward+1 )
2557 {
2558 if ( pindex->nHeight >= KOMODO_NOTARIES_HEIGHT1 || block.vtx[0].vout[0].nValue > blockReward )
2559 {
2560 return state.DoS(100,
2561 error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
2562 block.vtx[0].GetValueOut(), blockReward),
2563 REJECT_INVALID, "bad-cb-amount");
2564 } else if ( NOTARY_PUBKEY33[0] != 0 )
2565 fprintf(stderr,"allow nHeight.%d coinbase %.8f vs %.8f interest %.8f\n",(int32_t)pindex->nHeight,dstr(block.vtx[0].GetValueOut()),dstr(blockReward),dstr(sum));
2566 }
2567 if (!control.Wait())
2568 return state.DoS(100, false);
2569 int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart;
2570 LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs-1), nTimeVerify * 0.000001);
2571
2572 if (fJustCheck)
2573 return true;
2574
2575 // Write undo information to disk
2576 if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS))
2577 {
2578 if (pindex->GetUndoPos().IsNull()) {
2579 CDiskBlockPos pos;
2580 if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40))
2581 return error("ConnectBlock(): FindUndoPos failed");
2582 if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
2583 return AbortNode(state, "Failed to write undo data");
2584
2585 // update nUndoPos in block index
2586 pindex->nUndoPos = pos.nPos;
2587 pindex->nStatus |= BLOCK_HAVE_UNDO;
2588 }
2589
2590 // Now that all consensus rules have been validated, set nCachedBranchId.
2591 // Move this if BLOCK_VALID_CONSENSUS is ever altered.
2592 static_assert(BLOCK_VALID_CONSENSUS == BLOCK_VALID_SCRIPTS,
2593 "nCachedBranchId must be set after all consensus rules have been validated.");
2594 if (IsActivationHeightForAnyUpgrade(pindex->nHeight, Params().GetConsensus())) {
2595 pindex->nStatus |= BLOCK_ACTIVATES_UPGRADE;
2596 pindex->nCachedBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
2597 } else if (pindex->pprev) {
2598 pindex->nCachedBranchId = pindex->pprev->nCachedBranchId;
2599 }
2600
2601 pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
2602 setDirtyBlockIndex.insert(pindex);
2603 }
2604
2605 if (fTxIndex)
2606 if (!pblocktree->WriteTxIndex(vPos))
2607 return AbortNode(state, "Failed to write transaction index");
2608
2609 // add this block to the view's block chain
2610 view.SetBestBlock(pindex->GetBlockHash());
2611
2612 int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2;
2613 LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001);
2614
2615 // Watch for changes to the previous coinbase transaction.
2616 static uint256 hashPrevBestCoinBase;
2617 GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
2618 hashPrevBestCoinBase = block.vtx[0].GetHash();
2619
2620 int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3;
2621 LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001);
2622
2623 //FlushStateToDisk();
2624 komodo_connectblock(pindex,*(CBlock *)&block);
2625 return true;
2626}
2627
2628enum FlushStateMode {
2629 FLUSH_STATE_NONE,
2630 FLUSH_STATE_IF_NEEDED,
2631 FLUSH_STATE_PERIODIC,
2632 FLUSH_STATE_ALWAYS
2633};
2634
2635/**
2636 * Update the on-disk chain state.
2637 * The caches and indexes are flushed depending on the mode we're called with
2638 * if they're too large, if it's been a while since the last write,
2639 * or always and in all cases if we're in prune mode and are deleting files.
2640 */
2641bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
2642 LOCK2(cs_main, cs_LastBlockFile);
2643 static int64_t nLastWrite = 0;
2644 static int64_t nLastFlush = 0;
2645 static int64_t nLastSetChain = 0;
2646 std::set<int> setFilesToPrune;
2647 bool fFlushForPrune = false;
2648 try {
2649 if (fPruneMode && fCheckForPruning && !fReindex) {
2650 FindFilesToPrune(setFilesToPrune);
2651 fCheckForPruning = false;
2652 if (!setFilesToPrune.empty()) {
2653 fFlushForPrune = true;
2654 if (!fHavePruned) {
2655 pblocktree->WriteFlag("prunedblockfiles", true);
2656 fHavePruned = true;
2657 }
2658 }
2659 }
2660 int64_t nNow = GetTimeMicros();
2661 // Avoid writing/flushing immediately after startup.
2662 if (nLastWrite == 0) {
2663 nLastWrite = nNow;
2664 }
2665 if (nLastFlush == 0) {
2666 nLastFlush = nNow;
2667 }
2668 if (nLastSetChain == 0) {
2669 nLastSetChain = nNow;
2670 }
2671 size_t cacheSize = pcoinsTip->DynamicMemoryUsage();
2672 // The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
2673 bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage;
2674 // The cache is over the limit, we have to write now.
2675 bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage;
2676 // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
2677 bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
2678 // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
2679 bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
2680 // Combine all conditions that result in a full cache flush.
2681 bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
2682 // Write blocks and block index to disk.
2683 if (fDoFullFlush || fPeriodicWrite) {
2684 // Depend on nMinDiskSpace to ensure we can write block index
2685 if (!CheckDiskSpace(0))
2686 return state.Error("out of disk space");
2687 // First make sure all block and undo data is flushed to disk.
2688 FlushBlockFile();
2689 // Then update all block file information (which may refer to block and undo files).
2690 {
2691 std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
2692 vFiles.reserve(setDirtyFileInfo.size());
2693 for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
2694 vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it]));
2695 setDirtyFileInfo.erase(it++);
2696 }
2697 std::vector<const CBlockIndex*> vBlocks;
2698 vBlocks.reserve(setDirtyBlockIndex.size());
2699 for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
2700 vBlocks.push_back(*it);
2701 setDirtyBlockIndex.erase(it++);
2702 }
2703 if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
2704 return AbortNode(state, "Files to write to block index database");
2705 }
2706 }
2707 // Finally remove any pruned files
2708 if (fFlushForPrune)
2709 UnlinkPrunedFiles(setFilesToPrune);
2710 nLastWrite = nNow;
2711 }
2712 // Flush best chain related state. This can only be done if the blocks / block index write was also done.
2713 if (fDoFullFlush) {
2714 // Typical CCoins structures on disk are around 128 bytes in size.
2715 // Pushing a new one to the database can cause it to be written
2716 // twice (once in the log, and once in the tables). This is already
2717 // an overestimation, as most will delete an existing entry or
2718 // overwrite one. Still, use a conservative safety factor of 2.
2719 if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize()))
2720 return state.Error("out of disk space");
2721 // Flush the chainstate (which may refer to block index entries).
2722 if (!pcoinsTip->Flush())
2723 return AbortNode(state, "Failed to write to coin database");
2724 nLastFlush = nNow;
2725 }
2726 if ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000) {
2727 // Update best block in wallet (so we can detect restored wallets).
2728 GetMainSignals().SetBestChain(chainActive.GetLocator());
2729 nLastSetChain = nNow;
2730 }
2731 } catch (const std::runtime_error& e) {
2732 return AbortNode(state, std::string("System error while flushing: ") + e.what());
2733 }
2734 return true;
2735}
2736
2737void FlushStateToDisk() {
2738 CValidationState state;
2739 FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
2740}
2741
2742void PruneAndFlush() {
2743 CValidationState state;
2744 fCheckForPruning = true;
2745 FlushStateToDisk(state, FLUSH_STATE_NONE);
2746}
2747
2748/** Update chainActive and related internal data structures. */
2749void static UpdateTip(CBlockIndex *pindexNew) {
2750 const CChainParams& chainParams = Params();
2751 chainActive.SetTip(pindexNew);
2752
2753 // New best block
2754 nTimeBestReceived = GetTime();
2755 mempool.AddTransactionsUpdated(1);
2756
2757 LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__,
2758 chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx,
2759 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
2760 Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize());
2761
2762 cvBlockChange.notify_all();
2763
2764 // Check the version of the last 100 blocks to see if we need to upgrade:
2765 static bool fWarned = false;
2766 if (!IsInitialBlockDownload() && !fWarned)
2767 {
2768 int nUpgraded = 0;
2769 const CBlockIndex* pindex = chainActive.Tip();
2770 for (int i = 0; i < 100 && pindex != NULL; i++)
2771 {
2772 if (pindex->nVersion > CBlock::CURRENT_VERSION)
2773 ++nUpgraded;
2774 pindex = pindex->pprev;
2775 }
2776 if (nUpgraded > 0)
2777 LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__, nUpgraded, (int)CBlock::CURRENT_VERSION);
2778 if (nUpgraded > 100/2)
2779 {
2780 // strMiscWarning is read by GetWarnings(), called by the JSON-RPC code to warn the user:
2781 strMiscWarning = _("Warning: This version is obsolete; upgrade required!");
2782 CAlert::Notify(strMiscWarning, true);
2783 fWarned = true;
2784 }
2785 }
2786}
2787
2788/**
2789 * Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and
2790 * mempool.removeWithoutBranchId after this, with cs_main held.
2791 */
2792bool static DisconnectTip(CValidationState &state, bool fBare = false) {
2793 CBlockIndex *pindexDelete = chainActive.Tip();
2794 assert(pindexDelete);
2795 // Read block from disk.
2796 CBlock block;
2797 if (!ReadBlockFromDisk(block, pindexDelete))
2798 return AbortNode(state, "Failed to read block");
2799 // Apply the block atomically to the chain state.
2800 uint256 anchorBeforeDisconnect = pcoinsTip->GetBestAnchor();
2801 int64_t nStart = GetTimeMicros();
2802 {
2803 CCoinsViewCache view(pcoinsTip);
2804 if (!DisconnectBlock(block, state, pindexDelete, view))
2805 return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
2806 assert(view.Flush());
2807 }
2808 LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
2809 uint256 anchorAfterDisconnect = pcoinsTip->GetBestAnchor();
2810 // Write the chain state to disk, if necessary.
2811 if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
2812 return false;
2813
2814 if (!fBare) {
2815 // Resurrect mempool transactions from the disconnected block.
2816 BOOST_FOREACH(const CTransaction &tx, block.vtx) {
2817 // ignore validation errors in resurrected transactions
2818 list<CTransaction> removed;
2819 CValidationState stateDummy;
2820 if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL))
2821 mempool.remove(tx, removed, true);
2822 }
2823 if (anchorBeforeDisconnect != anchorAfterDisconnect) {
2824 // The anchor may not change between block disconnects,
2825 // in which case we don't want to evict from the mempool yet!
2826 mempool.removeWithAnchor(anchorBeforeDisconnect);
2827 }
2828 }
2829
2830 // Update chainActive and related variables.
2831 UpdateTip(pindexDelete->pprev);
2832 // Get the current commitment tree
2833 ZCIncrementalMerkleTree newTree;
2834 assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), newTree));
2835 // Let wallets know transactions went from 1-confirmed to
2836 // 0-confirmed or conflicted:
2837 BOOST_FOREACH(const CTransaction &tx, block.vtx) {
2838 SyncWithWallets(tx, NULL);
2839 }
2840 // Update cached incremental witnesses
2841 //fprintf(stderr,"chaintip false\n");
2842 GetMainSignals().ChainTip(pindexDelete, &block, newTree, false);
2843 return true;
2844}
2845
2846static int64_t nTimeReadFromDisk = 0;
2847static int64_t nTimeConnectTotal = 0;
2848static int64_t nTimeFlush = 0;
2849static int64_t nTimeChainState = 0;
2850static int64_t nTimePostConnect = 0;
2851
2852/**
2853 * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
2854 * corresponding to pindexNew, to bypass loading it again from disk.
2855 * You probably want to call mempool.removeWithoutBranchId after this, with cs_main held.
2856 */
2857bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *pblock) {
2858
2859 assert(pindexNew->pprev == chainActive.Tip());
2860 // Read block from disk.
2861 int64_t nTime1 = GetTimeMicros();
2862 CBlock block;
2863 if (!pblock) {
2864 if (!ReadBlockFromDisk(block, pindexNew))
2865 return AbortNode(state, "Failed to read block");
2866 pblock = &block;
2867 }
2868 // Get the current commitment tree
2869 ZCIncrementalMerkleTree oldTree;
2870 assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), oldTree));
2871 // Apply the block atomically to the chain state.
2872 int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
2873 int64_t nTime3;
2874 LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
2875 {
2876 CCoinsViewCache view(pcoinsTip);
2877 bool rv = ConnectBlock(*pblock, state, pindexNew, view);
2878 GetMainSignals().BlockChecked(*pblock, state);
2879 if (!rv) {
2880 if (state.IsInvalid())
2881 InvalidBlockFound(pindexNew, state);
2882 return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
2883 }
2884 mapBlockSource.erase(pindexNew->GetBlockHash());
2885 nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
2886 LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
2887 assert(view.Flush());
2888 }
2889 int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
2890 LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
2891 // Write the chain state to disk, if necessary.
2892 if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
2893 return false;
2894 int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
2895 LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
2896 // Remove conflicting transactions from the mempool.
2897 list<CTransaction> txConflicted;
2898 mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload());
2899
2900 // Remove transactions that expire at new block height from mempool
2901 mempool.removeExpired(pindexNew->nHeight);
2902
2903 // Update chainActive & related variables.
2904 UpdateTip(pindexNew);
2905 // Tell wallet about transactions that went from mempool
2906 // to conflicted:
2907 BOOST_FOREACH(const CTransaction &tx, txConflicted) {
2908 SyncWithWallets(tx, NULL);
2909 }
2910 // ... and about transactions that got confirmed:
2911 BOOST_FOREACH(const CTransaction &tx, pblock->vtx) {
2912 SyncWithWallets(tx, pblock);
2913 }
2914 // Update cached incremental witnesses
2915 //fprintf(stderr,"chaintip true\n");
2916 GetMainSignals().ChainTip(pindexNew, pblock, oldTree, true);
2917
2918 EnforceNodeDeprecation(pindexNew->nHeight);
2919
2920 int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
2921 LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
2922 LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
2923 return true;
2924}
2925
2926/**
2927 * Return the tip of the chain with the most work in it, that isn't
2928 * known to be invalid (it's however far from certain to be valid).
2929 */
2930static CBlockIndex* FindMostWorkChain() {
2931 do {
2932 CBlockIndex *pindexNew = NULL;
2933
2934 // Find the best candidate header.
2935 {
2936 std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
2937 if (it == setBlockIndexCandidates.rend())
2938 return NULL;
2939 pindexNew = *it;
2940 }
2941
2942 // Check whether all blocks on the path between the currently active chain and the candidate are valid.
2943 // Just going until the active chain is an optimization, as we know all blocks in it are valid already.
2944 CBlockIndex *pindexTest = pindexNew;
2945 bool fInvalidAncestor = false;
2946 while (pindexTest && !chainActive.Contains(pindexTest)) {
2947 assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
2948
2949 // Pruned nodes may have entries in setBlockIndexCandidates for
2950 // which block files have been deleted. Remove those as candidates
2951 // for the most work chain if we come across them; we can't switch
2952 // to a chain unless we have all the non-active-chain parent blocks.
2953 bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
2954 bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
2955 if (fFailedChain || fMissingData) {
2956 // Candidate chain is not usable (either invalid or missing data)
2957 if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
2958 pindexBestInvalid = pindexNew;
2959 CBlockIndex *pindexFailed = pindexNew;
2960 // Remove the entire chain from the set.
2961 while (pindexTest != pindexFailed) {
2962 if (fFailedChain) {
2963 pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
2964 } else if (fMissingData) {
2965 // If we're missing data, then add back to mapBlocksUnlinked,
2966 // so that if the block arrives in the future we can try adding
2967 // to setBlockIndexCandidates again.
2968 mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed));
2969 }
2970 setBlockIndexCandidates.erase(pindexFailed);
2971 pindexFailed = pindexFailed->pprev;
2972 }
2973 setBlockIndexCandidates.erase(pindexTest);
2974 fInvalidAncestor = true;
2975 break;
2976 }
2977 pindexTest = pindexTest->pprev;
2978 }
2979 if (!fInvalidAncestor)
2980 return pindexNew;
2981 } while(true);
2982}
2983
2984/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
2985static void PruneBlockIndexCandidates() {
2986 // Note that we can't delete the current block itself, as we may need to return to it later in case a
2987 // reorganization to a better block fails.
2988 std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2989 while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
2990 setBlockIndexCandidates.erase(it++);
2991 }
2992 // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
2993 assert(!setBlockIndexCandidates.empty());
2994}
2995
2996/**
2997 * Try to make some progress towards making pindexMostWork the active block.
2998 * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
2999 */
3000static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMostWork, CBlock *pblock) {
3001 AssertLockHeld(cs_main);
3002 bool fInvalidFound = false;
3003 const CBlockIndex *pindexOldTip = chainActive.Tip();
3004 const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
3005
3006 // - On ChainDB initialization, pindexOldTip will be null, so there are no removable blocks.
3007 // - If pindexMostWork is in a chain that doesn't have the same genesis block as our chain,
3008 // then pindexFork will be null, and we would need to remove the entire chain including
3009 // our genesis block. In practice this (probably) won't happen because of checks elsewhere.
3010 auto reorgLength = pindexOldTip ? pindexOldTip->nHeight - (pindexFork ? pindexFork->nHeight : -1) : 0;
3011 static_assert(MAX_REORG_LENGTH > 0, "We must be able to reorg some distance");
3012 if (reorgLength > MAX_REORG_LENGTH) {
3013 auto msg = strprintf(_(
3014 "A block chain reorganization has been detected that would roll back %d blocks! "
3015 "This is larger than the maximum of %d blocks, and so the node is shutting down for your safety."
3016 ), reorgLength, MAX_REORG_LENGTH) + "\n\n" +
3017 _("Reorganization details") + ":\n" +
3018 "- " + strprintf(_("Current tip: %s, height %d, work %s"),
3019 pindexOldTip->phashBlock->GetHex(), pindexOldTip->nHeight, pindexOldTip->nChainWork.GetHex()) + "\n" +
3020 "- " + strprintf(_("New tip: %s, height %d, work %s"),
3021 pindexMostWork->phashBlock->GetHex(), pindexMostWork->nHeight, pindexMostWork->nChainWork.GetHex()) + "\n" +
3022 "- " + strprintf(_("Fork point: %s, height %d"),
3023 pindexFork->phashBlock->GetHex(), pindexFork->nHeight) + "\n\n" +
3024 _("Please help, human!");
3025 LogPrintf("*** %s\n", msg);
3026 uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR);
3027 StartShutdown();
3028 return false;
3029 }
3030
3031 // Disconnect active blocks which are no longer in the best chain.
3032 bool fBlocksDisconnected = false;
3033 while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
3034 if (!DisconnectTip(state))
3035 return false;
3036 fBlocksDisconnected = true;
3037 }
3038 if ( KOMODO_REWIND != 0 )
3039 {
3040 fprintf(stderr,"rewind start ht.%d\n",chainActive.Tip()->nHeight);
3041 while ( KOMODO_REWIND > 0 && chainActive.Tip()->nHeight > KOMODO_REWIND )
3042 {
3043 if ( !DisconnectTip(state) )
3044 {
3045 InvalidateBlock(state,chainActive.Tip());
3046 break;
3047 }
3048 }
3049 fprintf(stderr,"reached rewind.%d, best to do: ./komodo-cli stop\n",KOMODO_REWIND);
3050 sleep(60);
3051 KOMODO_REWIND = 0;
3052 return(true);
3053 }
3054 // Build list of new blocks to connect.
3055 std::vector<CBlockIndex*> vpindexToConnect;
3056 bool fContinue = true;
3057 int nHeight = pindexFork ? pindexFork->nHeight : -1;
3058 while (fContinue && nHeight != pindexMostWork->nHeight) {
3059 // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
3060 // a few blocks along the way.
3061 int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
3062 vpindexToConnect.clear();
3063 vpindexToConnect.reserve(nTargetHeight - nHeight);
3064 CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
3065 while (pindexIter && pindexIter->nHeight != nHeight) {
3066 vpindexToConnect.push_back(pindexIter);
3067 pindexIter = pindexIter->pprev;
3068 }
3069 nHeight = nTargetHeight;
3070
3071 // Connect new blocks.
3072 BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
3073 if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) {
3074 if (state.IsInvalid()) {
3075 // The block violates a consensus rule.
3076 if (!state.CorruptionPossible())
3077 InvalidChainFound(vpindexToConnect.back());
3078 state = CValidationState();
3079 fInvalidFound = true;
3080 fContinue = false;
3081 break;
3082 } else {
3083 // A system error occurred (disk space, database error, ...).
3084 return false;
3085 }
3086 } else {
3087 PruneBlockIndexCandidates();
3088 if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
3089 // We're in a better position than we were. Return temporarily to release the lock.
3090 fContinue = false;
3091 break;
3092 }
3093 }
3094 }
3095 }
3096
3097 if (fBlocksDisconnected) {
3098 mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
3099 }
3100 mempool.removeWithoutBranchId(
3101 CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
3102 mempool.check(pcoinsTip);
3103
3104 // Callbacks/notifications for a new best chain.
3105 if (fInvalidFound)
3106 CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
3107 else
3108 CheckForkWarningConditions();
3109
3110 return true;
3111}
3112
3113/**
3114 * Make the best chain active, in multiple steps. The result is either failure
3115 * or an activated best chain. pblock is either NULL or a pointer to a block
3116 * that is already loaded (to avoid loading it again from disk).
3117 */
3118bool ActivateBestChain(CValidationState &state, CBlock *pblock) {
3119 CBlockIndex *pindexNewTip = NULL;
3120 CBlockIndex *pindexMostWork = NULL;
3121 const CChainParams& chainParams = Params();
3122 do {
3123 boost::this_thread::interruption_point();
3124
3125 bool fInitialDownload;
3126 {
3127 LOCK(cs_main);
3128 pindexMostWork = FindMostWorkChain();
3129
3130 // Whether we have anything to do at all.
3131 if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip())
3132 return true;
3133
3134 if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL))
3135 return false;
3136 pindexNewTip = chainActive.Tip();
3137 fInitialDownload = IsInitialBlockDownload();
3138 }
3139 // When we reach this point, we switched to a new tip (stored in pindexNewTip).
3140
3141 // Notifications/callbacks that can run without cs_main
3142 if (!fInitialDownload) {
3143 uint256 hashNewTip = pindexNewTip->GetBlockHash();
3144 // Relay inventory, but don't relay old inventory during initial block download.
3145 int nBlockEstimate = 0;
3146 if (fCheckpointsEnabled)
3147 nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints());
3148 // Don't relay blocks if pruning -- could cause a peer to try to download, resulting
3149 // in a stalled download if the block file is pruned before the request.
3150 if (nLocalServices & NODE_NETWORK) {
3151 LOCK(cs_vNodes);
3152 BOOST_FOREACH(CNode* pnode, vNodes)
3153 if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
3154 pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip));
3155 }
3156 // Notify external listeners about the new tip.
3157 GetMainSignals().UpdatedBlockTip(pindexNewTip);
3158 uiInterface.NotifyBlockTip(hashNewTip);
3159 } //else fprintf(stderr,"initial download skips propagation\n");
3160 } while(pindexMostWork != chainActive.Tip());
3161 CheckBlockIndex();
3162
3163 // Write changes periodically to disk, after relay.
3164 if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) {
3165 return false;
3166 }
3167
3168 return true;
3169}
3170
3171bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) {
3172 AssertLockHeld(cs_main);
3173
3174 // Mark the block itself as invalid.
3175 pindex->nStatus |= BLOCK_FAILED_VALID;
3176 setDirtyBlockIndex.insert(pindex);
3177 setBlockIndexCandidates.erase(pindex);
3178
3179 while (chainActive.Contains(pindex)) {
3180 CBlockIndex *pindexWalk = chainActive.Tip();
3181 pindexWalk->nStatus |= BLOCK_FAILED_CHILD;
3182 setDirtyBlockIndex.insert(pindexWalk);
3183 setBlockIndexCandidates.erase(pindexWalk);
3184 // ActivateBestChain considers blocks already in chainActive
3185 // unconditionally valid already, so force disconnect away from it.
3186 if (!DisconnectTip(state)) {
3187 mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
3188 mempool.removeWithoutBranchId(
3189 CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
3190 return false;
3191 }
3192 }
3193 //LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
3194
3195 // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
3196 // add it again.
3197 BlockMap::iterator it = mapBlockIndex.begin();
3198 while (it != mapBlockIndex.end() && it->second != 0 ) {
3199 if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
3200 setBlockIndexCandidates.insert(it->second);
3201 }
3202 it++;
3203 }
3204
3205 InvalidChainFound(pindex);
3206 mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
3207 mempool.removeWithoutBranchId(
3208 CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
3209 return true;
3210}
3211
3212bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) {
3213 AssertLockHeld(cs_main);
3214
3215 int nHeight = pindex->nHeight;
3216
3217 // Remove the invalidity flag from this block and all its descendants.
3218 BlockMap::iterator it = mapBlockIndex.begin();
3219 while (it != mapBlockIndex.end()) {
3220 if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
3221 it->second->nStatus &= ~BLOCK_FAILED_MASK;
3222 setDirtyBlockIndex.insert(it->second);
3223 if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
3224 setBlockIndexCandidates.insert(it->second);
3225 }
3226 if (it->second == pindexBestInvalid) {
3227 // Reset invalid block marker if it was pointing to one of those.
3228 pindexBestInvalid = NULL;
3229 }
3230 }
3231 it++;
3232 }
3233
3234 // Remove the invalidity flag from all ancestors too.
3235 while (pindex != NULL) {
3236 if (pindex->nStatus & BLOCK_FAILED_MASK) {
3237 pindex->nStatus &= ~BLOCK_FAILED_MASK;
3238 setDirtyBlockIndex.insert(pindex);
3239 }
3240 pindex = pindex->pprev;
3241 }
3242 return true;
3243}
3244
3245CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
3246{
3247 // Check for duplicate
3248 uint256 hash = block.GetHash();
3249 BlockMap::iterator it = mapBlockIndex.find(hash);
3250 if (it != mapBlockIndex.end())
3251 return it->second;
3252
3253 // Construct new block index object
3254 CBlockIndex* pindexNew = new CBlockIndex(block);
3255 assert(pindexNew);
3256 // We assign the sequence id to blocks only when the full data is available,
3257 // to avoid miners withholding blocks but broadcasting headers, to get a
3258 // competitive advantage.
3259 pindexNew->nSequenceId = 0;
3260 BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
3261 pindexNew->phashBlock = &((*mi).first);
3262 BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
3263 if (miPrev != mapBlockIndex.end())
3264 {
3265 pindexNew->pprev = (*miPrev).second;
3266 pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3267 pindexNew->BuildSkip();
3268 }
3269 pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
3270 pindexNew->RaiseValidity(BLOCK_VALID_TREE);
3271 if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
3272 pindexBestHeader = pindexNew;
3273
3274 setDirtyBlockIndex.insert(pindexNew);
3275
3276 return pindexNew;
3277}
3278
3279/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
3280bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos)
3281{
3282 pindexNew->nTx = block.vtx.size();
3283 pindexNew->nChainTx = 0;
3284 CAmount sproutValue = 0;
3285 for (auto tx : block.vtx) {
3286 for (auto js : tx.vjoinsplit) {
3287 sproutValue += js.vpub_old;
3288 sproutValue -= js.vpub_new;
3289 }
3290 }
3291 pindexNew->nSproutValue = sproutValue;
3292 pindexNew->nChainSproutValue = boost::none;
3293 pindexNew->nFile = pos.nFile;
3294 pindexNew->nDataPos = pos.nPos;
3295 pindexNew->nUndoPos = 0;
3296 pindexNew->nStatus |= BLOCK_HAVE_DATA;
3297 pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
3298 setDirtyBlockIndex.insert(pindexNew);
3299
3300 if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
3301 // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
3302 deque<CBlockIndex*> queue;
3303 queue.push_back(pindexNew);
3304
3305 // Recursively process any descendant blocks that now may be eligible to be connected.
3306 while (!queue.empty()) {
3307 CBlockIndex *pindex = queue.front();
3308 queue.pop_front();
3309 pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
3310 if (pindex->pprev) {
3311 if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) {
3312 pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue;
3313 } else {
3314 pindex->nChainSproutValue = boost::none;
3315 }
3316 } else {
3317 pindex->nChainSproutValue = pindex->nSproutValue;
3318 }
3319 {
3320 LOCK(cs_nBlockSequenceId);
3321 pindex->nSequenceId = nBlockSequenceId++;
3322 }
3323 if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
3324 setBlockIndexCandidates.insert(pindex);
3325 }
3326 std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
3327 while (range.first != range.second) {
3328 std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
3329 queue.push_back(it->second);
3330 range.first++;
3331 mapBlocksUnlinked.erase(it);
3332 }
3333 }
3334 } else {
3335 if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
3336 mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
3337 }
3338 }
3339
3340 return true;
3341}
3342
3343bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
3344{
3345 LOCK(cs_LastBlockFile);
3346
3347 unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3348 if (vinfoBlockFile.size() <= nFile) {
3349 vinfoBlockFile.resize(nFile + 1);
3350 }
3351
3352 if (!fKnown) {
3353 while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3354 nFile++;
3355 if (vinfoBlockFile.size() <= nFile) {
3356 vinfoBlockFile.resize(nFile + 1);
3357 }
3358 }
3359 pos.nFile = nFile;
3360 pos.nPos = vinfoBlockFile[nFile].nSize;
3361 }
3362
3363 if (nFile != nLastBlockFile) {
3364 if (!fKnown) {
3365 LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString());
3366 }
3367 FlushBlockFile(!fKnown);
3368 nLastBlockFile = nFile;
3369 }
3370
3371 vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3372 if (fKnown)
3373 vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3374 else
3375 vinfoBlockFile[nFile].nSize += nAddSize;
3376
3377 if (!fKnown) {
3378 unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
3379 unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
3380 if (nNewChunks > nOldChunks) {
3381 if (fPruneMode)
3382 fCheckForPruning = true;
3383 if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
3384 FILE *file = OpenBlockFile(pos);
3385 if (file) {
3386 LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
3387 AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
3388 fclose(file);
3389 }
3390 }
3391 else
3392 return state.Error("out of disk space");
3393 }
3394 }
3395
3396 setDirtyFileInfo.insert(nFile);
3397 return true;
3398}
3399
3400bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
3401{
3402 pos.nFile = nFile;
3403
3404 LOCK(cs_LastBlockFile);
3405
3406 unsigned int nNewSize;
3407 pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3408 nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
3409 setDirtyFileInfo.insert(nFile);
3410
3411 unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
3412 unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
3413 if (nNewChunks > nOldChunks) {
3414 if (fPruneMode)
3415 fCheckForPruning = true;
3416 if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
3417 FILE *file = OpenUndoFile(pos);
3418 if (file) {
3419 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
3420 AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
3421 fclose(file);
3422 }
3423 }
3424 else
3425 return state.Error("out of disk space");
3426 }
3427
3428 return true;
3429}
3430
3431bool CheckBlockHeader(int32_t height,CBlockIndex *pindex, const CBlockHeader& blockhdr, CValidationState& state, bool fCheckPOW)
3432{
3433 uint8_t pubkey33[33];
3434 // Check timestamp
3435 if ( 0 )
3436 {
3437 uint256 hash; int32_t i;
3438 hash = blockhdr.GetHash();
3439 for (i=31; i>=0; i--)
3440 fprintf(stderr,"%02x",((uint8_t *)&hash)[i]);
3441 fprintf(stderr," <- CheckBlockHeader\n");
3442 if ( chainActive.Tip() != 0 )
3443 {
3444 hash = chainActive.Tip()->GetBlockHash();
3445 for (i=31; i>=0; i--)
3446 fprintf(stderr,"%02x",((uint8_t *)&hash)[i]);
3447 fprintf(stderr," <- chainTip\n");
3448 }
3449 }
3450 if (blockhdr.GetBlockTime() > GetAdjustedTime() + 60)
3451 return state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),REJECT_INVALID, "time-too-new");
3452 // Check block version
3453 //if (block.nVersion < MIN_BLOCK_VERSION)
3454 // return state.DoS(100, error("CheckBlockHeader(): block version too low"),REJECT_INVALID, "version-too-low");
3455
3456 // Check Equihash solution is valid
3457 if ( fCheckPOW && !CheckEquihashSolution(&blockhdr, Params()) )
3458 return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),REJECT_INVALID, "invalid-solution");
3459
3460 // Check proof of work matches claimed amount
3461 komodo_index2pubkey33(pubkey33,pindex,height);
3462 if ( fCheckPOW && !CheckProofOfWork(height,pubkey33,blockhdr.GetHash(), blockhdr.nBits, Params().GetConsensus()) )
3463 return state.DoS(50, error("CheckBlockHeader(): proof of work failed"),REJECT_INVALID, "high-hash");
3464 return true;
3465}
3466
3467int32_t komodo_check_deposit(int32_t height,const CBlock& block);
3468bool CheckBlock(int32_t height,CBlockIndex *pindex,const CBlock& block, CValidationState& state,
3469 libzcash::ProofVerifier& verifier,
3470 bool fCheckPOW, bool fCheckMerkleRoot)
3471{
3472 // These are checks that are independent of context.
3473
3474 // Check that the header is valid (particularly PoW). This is mostly
3475 // redundant with the call in AcceptBlockHeader.
3476 if (!CheckBlockHeader(height,pindex,block,state,fCheckPOW))
3477 return false;
3478
3479 // Check the merkle root.
3480 if (fCheckMerkleRoot) {
3481 bool mutated;
3482 uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated);
3483 if (block.hashMerkleRoot != hashMerkleRoot2)
3484 return state.DoS(100, error("CheckBlock(): hashMerkleRoot mismatch"),
3485 REJECT_INVALID, "bad-txnmrklroot", true);
3486
3487 // Check for merkle tree malleability (CVE-2012-2459): repeating sequences
3488 // of transactions in a block without affecting the merkle root of a block,
3489 // while still invalidating it.
3490 if (mutated)
3491 return state.DoS(100, error("CheckBlock(): duplicate transaction"),
3492 REJECT_INVALID, "bad-txns-duplicate", true);
3493 }
3494
3495 // All potential-corruption validation must be done before we do any
3496 // transaction validation, as otherwise we may mark the header as invalid
3497 // because we receive the wrong transactions for it.
3498
3499 // Size limits
3500 if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
3501 return state.DoS(100, error("CheckBlock(): size limits failed"),
3502 REJECT_INVALID, "bad-blk-length");
3503
3504 // First transaction must be coinbase, the rest must not be
3505 if (!block.vtx[0].IsCoinBase())
3506 return state.DoS(100, error("CheckBlock(): first tx is not coinbase"),
3507 REJECT_INVALID, "bad-cb-missing");
3508
3509 for (unsigned int i = 1; i < block.vtx.size(); i++)
3510 if (block.vtx[i].IsCoinBase())
3511 return state.DoS(100, error("CheckBlock(): more than one coinbase"),
3512 REJECT_INVALID, "bad-cb-multiple");
3513
3514 // Check transactions
3515 BOOST_FOREACH(const CTransaction& tx, block.vtx)
3516 {
3517 if ( komodo_validate_interest(tx,height == 0 ? komodo_block2height((CBlock *)&block) : height,block.nTime,1) < 0 )
3518 return error("CheckBlock: komodo_validate_interest failed");
3519 if (!CheckTransaction(tx, state, verifier))
3520 return error("CheckBlock(): CheckTransaction failed");
3521 }
3522 unsigned int nSigOps = 0;
3523 BOOST_FOREACH(const CTransaction& tx, block.vtx)
3524 {
3525 nSigOps += GetLegacySigOpCount(tx);
3526 }
3527 if (nSigOps > MAX_BLOCK_SIGOPS)
3528 return state.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"),
3529 REJECT_INVALID, "bad-blk-sigops", true);
3530 if ( komodo_check_deposit(ASSETCHAINS_SYMBOL[0] == 0 ? height : pindex != 0 ? (int32_t)pindex->nHeight : chainActive.Tip()->nHeight+1,block) < 0 )
3531 {
3532 static uint32_t counter;
3533 if ( counter++ < 100 )
3534 fprintf(stderr,"check deposit rejection\n");
3535 return(false);
3536 }
3537 return true;
3538}
3539
3540bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev)
3541{
3542 const CChainParams& chainParams = Params();
3543 const Consensus::Params& consensusParams = chainParams.GetConsensus();
3544 uint256 hash = block.GetHash();
3545 if (hash == consensusParams.hashGenesisBlock)
3546 return true;
3547
3548 assert(pindexPrev);
3549
3550 int nHeight = pindexPrev->nHeight+1;
3551
3552 // Check proof of work
3553 if ( (nHeight < 235300 || nHeight > 236000) && block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
3554 {
3555 cout << block.nBits << " block.nBits vs. calc " << GetNextWorkRequired(pindexPrev, &block, consensusParams) << endl;
3556 return state.DoS(100, error("%s: incorrect proof of work", __func__),
3557 REJECT_INVALID, "bad-diffbits");
3558 }
3559
3560 // Check timestamp against prev
3561 if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
3562 return state.Invalid(error("%s: block's timestamp is too early", __func__),
3563 REJECT_INVALID, "time-too-old");
3564
3565 if (fCheckpointsEnabled)
3566 {
3567 // Check that the block chain matches the known block chain up to a checkpoint
3568 if (!Checkpoints::CheckBlock(chainParams.Checkpoints(), nHeight, hash))
3569 return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight),REJECT_CHECKPOINT, "checkpoint mismatch");
3570
3571 // Don't accept any forks from the main chain prior to last checkpoint
3572 CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints());
3573 int32_t notarized_height;
3574 if (pcheckpoint && (nHeight < pcheckpoint->nHeight || nHeight == 1 && chainActive.Tip() != 0 && chainActive.Tip()->nHeight > 1) )
3575 return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d) vs %d", __func__, nHeight,pcheckpoint->nHeight));
3576 else if ( komodo_checkpoint(&notarized_height,nHeight,hash) < 0 )
3577 {
3578 CBlockIndex *heightblock = chainActive[nHeight];
3579 if ( heightblock != 0 && heightblock->GetBlockHash() == hash )
3580 {
3581 //fprintf(stderr,"got a pre notarization block that matches height.%d\n",(int32_t)nHeight);
3582 return true;
3583 } else return state.DoS(100, error("%s: forked chain %d older than last notarized (height %d) vs %d", __func__,nHeight, notarized_height));
3584 }
3585 }
3586 // Reject block.nVersion < 4 blocks
3587 if (block.nVersion < 4)
3588 return state.Invalid(error("%s : rejected nVersion<4 block", __func__),
3589 REJECT_OBSOLETE, "bad-version");
3590
3591 return true;
3592}
3593
3594bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev)
3595{
3596 const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
3597 const Consensus::Params& consensusParams = Params().GetConsensus();
3598
3599 // Check that all transactions are finalized
3600 BOOST_FOREACH(const CTransaction& tx, block.vtx) {
3601
3602 // Check transaction contextually against consensus rules at block height
3603 if (!ContextualCheckTransaction(tx, state, nHeight, 100)) {
3604 return false; // Failure reason has been set in validation state object
3605 }
3606
3607 int nLockTimeFlags = 0;
3608 int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3609 ? pindexPrev->GetMedianTimePast()
3610 : block.GetBlockTime();
3611 if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
3612 return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal");
3613 }
3614 }
3615
3616 // Enforce BIP 34 rule that the coinbase starts with serialized block height.
3617 // In Zcash this has been enforced since launch, except that the genesis
3618 // block didn't include the height in the coinbase (see Zcash protocol spec
3619 // section '6.8 Bitcoin Improvement Proposals').
3620 if (nHeight > 0)
3621 {
3622 CScript expect = CScript() << nHeight;
3623 if (block.vtx[0].vin[0].scriptSig.size() < expect.size() ||
3624 !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) {
3625 return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height");
3626 }
3627 }
3628
3629 return true;
3630}
3631
3632bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
3633{
3634 const CChainParams& chainparams = Params();
3635 AssertLockHeld(cs_main);
3636 // Check for duplicate
3637 uint256 hash = block.GetHash();
3638 BlockMap::iterator miSelf = mapBlockIndex.find(hash);
3639 CBlockIndex *pindex = NULL;
3640 if (miSelf != mapBlockIndex.end()) {
3641 // Block header is already known.
3642 pindex = miSelf->second;
3643 if (ppindex)
3644 *ppindex = pindex;
3645 if (pindex != 0 && pindex->nStatus & BLOCK_FAILED_MASK)
3646 return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate");
3647 if ( pindex != 0 && IsInitialBlockDownload() == 0 ) // jl777 debug test
3648 {
3649 if (!CheckBlockHeader(pindex->nHeight,pindex, block, state))
3650 {
3651 pindex->nStatus |= BLOCK_FAILED_MASK;
3652 fprintf(stderr,"known block failing CheckBlockHeader %d\n",(int32_t)pindex->nHeight);
3653 return false;
3654 }
3655 CBlockIndex* pindexPrev = NULL;
3656 if (hash != chainparams.GetConsensus().hashGenesisBlock)
3657 {
3658 BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
3659 if (mi == mapBlockIndex.end())
3660 {
3661 pindex->nStatus |= BLOCK_FAILED_MASK;
3662 fprintf(stderr,"known block.%d failing to find prevblock\n",(int32_t)pindex->nHeight);
3663 return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk");
3664 }
3665 pindexPrev = (*mi).second;
3666 if (pindexPrev == 0 || (pindexPrev->nStatus & BLOCK_FAILED_MASK) )
3667 {
3668 pindex->nStatus |= BLOCK_FAILED_MASK;
3669 fprintf(stderr,"known block.%d found invalid prevblock\n",(int32_t)pindex->nHeight);
3670 return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
3671 }
3672 }
3673 if (!ContextualCheckBlockHeader(block, state, pindexPrev))
3674 {
3675 pindex->nStatus |= BLOCK_FAILED_MASK;
3676 fprintf(stderr,"known block.%d failing ContextualCheckBlockHeader\n",(int32_t)pindex->nHeight);
3677 return false;
3678 }
3679 }
3680
3681 return true;
3682 }
3683
3684 if (!CheckBlockHeader(*ppindex!=0?(*ppindex)->nHeight:0,*ppindex, block, state))
3685 return false;
3686
3687 // Get prev block index
3688 CBlockIndex* pindexPrev = NULL;
3689 if (hash != chainparams.GetConsensus().hashGenesisBlock) {
3690 BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
3691 if (mi == mapBlockIndex.end())
3692 {
3693 return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk");
3694 }
3695 pindexPrev = (*mi).second;
3696 if (pindexPrev == 0 || (pindexPrev->nStatus & BLOCK_FAILED_MASK) )
3697 return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
3698 }
3699 if (!ContextualCheckBlockHeader(block, state, pindexPrev))
3700 return false;
3701 if (pindex == NULL)
3702 pindex = AddToBlockIndex(block);
3703 if (ppindex)
3704 *ppindex = pindex;
3705 return true;
3706}
3707
3708bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp)
3709{
3710 const CChainParams& chainparams = Params();
3711 AssertLockHeld(cs_main);
3712
3713 CBlockIndex *&pindex = *ppindex;
3714 if (!AcceptBlockHeader(block, state, &pindex))
3715 return false;
3716 if ( pindex == 0 )
3717 {
3718 fprintf(stderr,"AcceptBlock error null pindex\n");
3719 return false;
3720 }
3721 // Try to process all requested blocks that we don't have, but only
3722 // process an unrequested block if it's new and has enough work to
3723 // advance our tip, and isn't too many blocks ahead.
3724 bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
3725 bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true);
3726 // Blocks that are too out-of-order needlessly limit the effectiveness of
3727 // pruning, because pruning will not delete block files that contain any
3728 // blocks which are too close in height to the tip. Apply this test
3729 // regardless of whether pruning is enabled; it should generally be safe to
3730 // not process unrequested blocks.
3731 bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
3732
3733 // TODO: deal better with return value and error conditions for duplicate
3734 // and unrequested blocks.
3735 if (fAlreadyHave) return true;
3736 if (!fRequested) { // If we didn't ask for it:
3737 if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
3738 if (!fHasMoreWork) return true; // Don't process less-work chains
3739 if (fTooFarAhead) return true; // Block height is too high
3740 }
3741
3742 // See method docstring for why this is always disabled
3743 auto verifier = libzcash::ProofVerifier::Disabled();
3744 if ((!CheckBlock(pindex->nHeight,pindex,block, state, verifier)) || !ContextualCheckBlock(block, state, pindex->pprev)) {
3745 if (state.IsInvalid() && !state.CorruptionPossible()) {
3746 pindex->nStatus |= BLOCK_FAILED_VALID;
3747 setDirtyBlockIndex.insert(pindex);
3748 }
3749 return false;
3750 }
3751
3752 int nHeight = pindex->nHeight;
3753
3754 // Write block to history file
3755 try {
3756 unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
3757 CDiskBlockPos blockPos;
3758 if (dbp != NULL)
3759 blockPos = *dbp;
3760 if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL))
3761 return error("AcceptBlock(): FindBlockPos failed");
3762 if (dbp == NULL)
3763 if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
3764 AbortNode(state, "Failed to write block");
3765 if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
3766 return error("AcceptBlock(): ReceivedBlockTransactions failed");
3767 } catch (const std::runtime_error& e) {
3768 return AbortNode(state, std::string("System error: ") + e.what());
3769 }
3770
3771 if (fCheckForPruning)
3772 FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files
3773
3774 return true;
3775}
3776
3777static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams)
3778{
3779 unsigned int nFound = 0;
3780 for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++)
3781 {
3782 if (pstart->nVersion >= minVersion)
3783 ++nFound;
3784 pstart = pstart->pprev;
3785 }
3786 return (nFound >= nRequired);
3787}
3788
3789void komodo_currentheight_set(int32_t height);
3790
3791bool ProcessNewBlock(int32_t height,CValidationState &state, CNode* pfrom, CBlock* pblock, bool fForceProcessing, CDiskBlockPos *dbp)
3792{
3793 // Preliminary checks
3794 bool checked;
3795 auto verifier = libzcash::ProofVerifier::Disabled();
3796 if ( chainActive.Tip() != 0 )
3797 komodo_currentheight_set(chainActive.Tip()->nHeight);
3798 if ( ASSETCHAINS_SYMBOL[0] == 0 )
3799 checked = CheckBlock(height!=0?height:komodo_block2height(pblock),0,*pblock, state, verifier);
3800 else checked = CheckBlock(height!=0?height:komodo_block2height(pblock),0,*pblock, state, verifier);
3801 {
3802 LOCK(cs_main);
3803 bool fRequested = MarkBlockAsReceived(pblock->GetHash());
3804 fRequested |= fForceProcessing;
3805 if (!checked) {
3806 if ( pfrom != 0 )
3807 Misbehaving(pfrom->GetId(), 1);
3808 return error("%s: CheckBlock FAILED", __func__);
3809 }
3810
3811 // Store to disk
3812 CBlockIndex *pindex = NULL;
3813 bool ret = AcceptBlock(*pblock, state, &pindex, fRequested, dbp);
3814 if (pindex && pfrom) {
3815 mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
3816 }
3817 CheckBlockIndex();
3818 if (!ret)
3819 return error("%s: AcceptBlock FAILED", __func__);
3820 }
3821
3822 if (!ActivateBestChain(state, pblock))
3823 return error("%s: ActivateBestChain failed", __func__);
3824
3825 return true;
3826}
3827
3828bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
3829{
3830 AssertLockHeld(cs_main);
3831 assert(pindexPrev == chainActive.Tip());
3832
3833 CCoinsViewCache viewNew(pcoinsTip);
3834 CBlockIndex indexDummy(block);
3835 indexDummy.pprev = pindexPrev;
3836 indexDummy.nHeight = pindexPrev->nHeight + 1;
3837 // JoinSplit proofs are verified in ConnectBlock
3838 auto verifier = libzcash::ProofVerifier::Disabled();
3839
3840 // NOTE: CheckBlockHeader is called by CheckBlock
3841 if (!ContextualCheckBlockHeader(block, state, pindexPrev))
3842 {
3843 fprintf(stderr,"TestBlockValidity failure A\n");
3844 return false;
3845 }
3846 if (!CheckBlock(indexDummy.nHeight,0,block, state, verifier, fCheckPOW, fCheckMerkleRoot))
3847 {
3848 //fprintf(stderr,"TestBlockValidity failure B\n");
3849 return false;
3850 }
3851 if (!ContextualCheckBlock(block, state, pindexPrev))
3852 {
3853 fprintf(stderr,"TestBlockValidity failure C\n");
3854 return false;
3855 }
3856 if (!ConnectBlock(block, state, &indexDummy, viewNew, true))
3857 {
3858 fprintf(stderr,"TestBlockValidity failure D\n");
3859 return false;
3860 }
3861 assert(state.IsValid());
3862
3863 return true;
3864}
3865
3866/**
3867 * BLOCK PRUNING CODE
3868 */
3869
3870/* Calculate the amount of disk space the block & undo files currently use */
3871uint64_t CalculateCurrentUsage()
3872{
3873 uint64_t retval = 0;
3874 BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) {
3875 retval += file.nSize + file.nUndoSize;
3876 }
3877 return retval;
3878}
3879
3880/* Prune a block file (modify associated database entries)*/
3881void PruneOneBlockFile(const int fileNumber)
3882{
3883 for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
3884 CBlockIndex* pindex = it->second;
3885 if (pindex->nFile == fileNumber) {
3886 pindex->nStatus &= ~BLOCK_HAVE_DATA;
3887 pindex->nStatus &= ~BLOCK_HAVE_UNDO;
3888 pindex->nFile = 0;
3889 pindex->nDataPos = 0;
3890 pindex->nUndoPos = 0;
3891 setDirtyBlockIndex.insert(pindex);
3892
3893 // Prune from mapBlocksUnlinked -- any block we prune would have
3894 // to be downloaded again in order to consider its chain, at which
3895 // point it would be considered as a candidate for
3896 // mapBlocksUnlinked or setBlockIndexCandidates.
3897 std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev);
3898 while (range.first != range.second) {
3899 std::multimap<CBlockIndex *, CBlockIndex *>::iterator it = range.first;
3900 range.first++;
3901 if (it->second == pindex) {
3902 mapBlocksUnlinked.erase(it);
3903 }
3904 }
3905 }
3906 }
3907
3908 vinfoBlockFile[fileNumber].SetNull();
3909 setDirtyFileInfo.insert(fileNumber);
3910}
3911
3912
3913void UnlinkPrunedFiles(std::set<int>& setFilesToPrune)
3914{
3915 for (set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
3916 CDiskBlockPos pos(*it, 0);
3917 boost::filesystem::remove(GetBlockPosFilename(pos, "blk"));
3918 boost::filesystem::remove(GetBlockPosFilename(pos, "rev"));
3919 LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
3920 }
3921}
3922
3923/* Calculate the block/rev files that should be deleted to remain under target*/
3924void FindFilesToPrune(std::set<int>& setFilesToPrune)
3925{
3926 LOCK2(cs_main, cs_LastBlockFile);
3927 if (chainActive.Tip() == NULL || nPruneTarget == 0) {
3928 return;
3929 }
3930 if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) {
3931 return;
3932 }
3933
3934 unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
3935 uint64_t nCurrentUsage = CalculateCurrentUsage();
3936 // We don't check to prune until after we've allocated new space for files
3937 // So we should leave a buffer under our target to account for another allocation
3938 // before the next pruning.
3939 uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
3940 uint64_t nBytesToPrune;
3941 int count=0;
3942
3943 if (nCurrentUsage + nBuffer >= nPruneTarget) {
3944 for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
3945 nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
3946
3947 if (vinfoBlockFile[fileNumber].nSize == 0)
3948 continue;
3949
3950 if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
3951 break;
3952
3953 // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
3954 if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
3955 continue;
3956
3957 PruneOneBlockFile(fileNumber);
3958 // Queue up the files for removal
3959 setFilesToPrune.insert(fileNumber);
3960 nCurrentUsage -= nBytesToPrune;
3961 count++;
3962 }
3963 }
3964
3965 LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
3966 nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
3967 ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
3968 nLastBlockWeCanPrune, count);
3969}
3970
3971bool CheckDiskSpace(uint64_t nAdditionalBytes)
3972{
3973 uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available;
3974
3975 // Check for nMinDiskSpace bytes (currently 50MB)
3976 if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
3977 return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
3978
3979 return true;
3980}
3981
3982FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
3983{
3984 if (pos.IsNull())
3985 return NULL;
3986 boost::filesystem::path path = GetBlockPosFilename(pos, prefix);
3987 boost::filesystem::create_directories(path.parent_path());
3988 FILE* file = fopen(path.string().c_str(), "rb+");
3989 if (!file && !fReadOnly)
3990 file = fopen(path.string().c_str(), "wb+");
3991 if (!file) {
3992 LogPrintf("Unable to open file %s\n", path.string());
3993 return NULL;
3994 }
3995 if (pos.nPos) {
3996 if (fseek(file, pos.nPos, SEEK_SET)) {
3997 LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
3998 fclose(file);
3999 return NULL;
4000 }
4001 }
4002 return file;
4003}
4004
4005FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
4006 return OpenDiskFile(pos, "blk", fReadOnly);
4007}
4008
4009FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
4010 return OpenDiskFile(pos, "rev", fReadOnly);
4011}
4012
4013boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
4014{
4015 return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
4016}
4017
4018CBlockIndex * InsertBlockIndex(uint256 hash)
4019{
4020 if (hash.IsNull())
4021 return NULL;
4022
4023 // Return existing
4024 BlockMap::iterator mi = mapBlockIndex.find(hash);
4025 if (mi != mapBlockIndex.end())
4026 return (*mi).second;
4027
4028 // Create new
4029 CBlockIndex* pindexNew = new CBlockIndex();
4030 if (!pindexNew)
4031 throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
4032 mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
4033 pindexNew->phashBlock = &((*mi).first);
4034
4035 return pindexNew;
4036}
4037
4038bool static LoadBlockIndexDB()
4039{
4040 const CChainParams& chainparams = Params();
4041 if (!pblocktree->LoadBlockIndexGuts())
4042 return false;
4043
4044 boost::this_thread::interruption_point();
4045
4046 // Calculate nChainWork
4047 vector<pair<int, CBlockIndex*> > vSortedByHeight;
4048 vSortedByHeight.reserve(mapBlockIndex.size());
4049 BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
4050 {
4051 CBlockIndex* pindex = item.second;
4052 vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
4053 }
4054 sort(vSortedByHeight.begin(), vSortedByHeight.end());
4055 BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
4056 {
4057 CBlockIndex* pindex = item.second;
4058 pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
4059 // We can link the chain of blocks for which we've received transactions at some point.
4060 // Pruned nodes may have deleted the block.
4061 if (pindex->nTx > 0) {
4062 if (pindex->pprev) {
4063 if (pindex->pprev->nChainTx) {
4064 pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
4065 if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) {
4066 pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue;
4067 } else {
4068 pindex->nChainSproutValue = boost::none;
4069 }
4070 } else {
4071 pindex->nChainTx = 0;
4072 pindex->nChainSproutValue = boost::none;
4073 mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
4074 }
4075 } else {
4076 pindex->nChainTx = pindex->nTx;
4077 pindex->nChainSproutValue = pindex->nSproutValue;
4078 }
4079 }
4080 // Construct in-memory chain of branch IDs.
4081 // Relies on invariant: a block that does not activate a network upgrade
4082 // will always be valid under the same consensus rules as its parent.
4083 // Genesis block has a branch ID of zero by definition, but has no
4084 // validity status because it is side-loaded into a fresh chain.
4085 // Activation blocks will have branch IDs set (read from disk).
4086 if (pindex->pprev) {
4087 if (pindex->IsValid(BLOCK_VALID_CONSENSUS) && !pindex->nCachedBranchId) {
4088 pindex->nCachedBranchId = pindex->pprev->nCachedBranchId;
4089 }
4090 } else {
4091 pindex->nCachedBranchId = SPROUT_BRANCH_ID;
4092 }
4093 if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
4094 setBlockIndexCandidates.insert(pindex);
4095 if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
4096 pindexBestInvalid = pindex;
4097 if (pindex->pprev)
4098 pindex->BuildSkip();
4099 if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
4100 pindexBestHeader = pindex;
4101 }
4102
4103 // Load block file info
4104 pblocktree->ReadLastBlockFile(nLastBlockFile);
4105 vinfoBlockFile.resize(nLastBlockFile + 1);
4106 LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4107 for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4108 pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4109 }
4110 LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
4111 for (int nFile = nLastBlockFile + 1; true; nFile++) {
4112 CBlockFileInfo info;
4113 if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4114 vinfoBlockFile.push_back(info);
4115 } else {
4116 break;
4117 }
4118 }
4119
4120 // Check presence of blk files
4121 LogPrintf("Checking all blk files are present...\n");
4122 set<int> setBlkDataFiles;
4123 BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
4124 {
4125 CBlockIndex* pindex = item.second;
4126 if (pindex->nStatus & BLOCK_HAVE_DATA) {
4127 setBlkDataFiles.insert(pindex->nFile);
4128 }
4129 }
4130 for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
4131 {
4132 CDiskBlockPos pos(*it, 0);
4133 if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
4134 return false;
4135 }
4136 }
4137
4138 // Check whether we have ever pruned block & undo files
4139 pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4140 if (fHavePruned)
4141 LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
4142
4143 // Check whether we need to continue reindexing
4144 bool fReindexing = false;
4145 pblocktree->ReadReindexing(fReindexing);
4146 fReindex |= fReindexing;
4147
4148 // Check whether we have a transaction index
4149 pblocktree->ReadFlag("txindex", fTxIndex);
4150 LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
4151
4152 // Fill in-memory data
4153 BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
4154 {
4155 CBlockIndex* pindex = item.second;
4156 // - This relationship will always be true even if pprev has multiple
4157 // children, because hashAnchor is technically a property of pprev,
4158 // not its children.
4159 // - This will miss chain tips; we handle the best tip below, and other
4160 // tips will be handled by ConnectTip during a re-org.
4161 if (pindex->pprev) {
4162 pindex->pprev->hashAnchorEnd = pindex->hashAnchor;
4163 }
4164 }
4165
4166 // Load pointer to end of best chain
4167 BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
4168 if (it == mapBlockIndex.end())
4169 return true;
4170 chainActive.SetTip(it->second);
4171 // Set hashAnchorEnd for the end of best chain
4172 it->second->hashAnchorEnd = pcoinsTip->GetBestAnchor();
4173
4174 PruneBlockIndexCandidates();
4175
4176 LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
4177 chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
4178 DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
4179 Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.Tip()));
4180
4181 EnforceNodeDeprecation(chainActive.Height(), true);
4182
4183 return true;
4184}
4185
4186CVerifyDB::CVerifyDB()
4187{
4188 uiInterface.ShowProgress(_("Verifying blocks..."), 0);
4189}
4190
4191CVerifyDB::~CVerifyDB()
4192{
4193 uiInterface.ShowProgress("", 100);
4194}
4195
4196bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
4197{
4198 LOCK(cs_main);
4199 if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL)
4200 return true;
4201
4202 // Verify blocks in the best chain
4203 if (nCheckDepth <= 0)
4204 nCheckDepth = 1000000000; // suffices until the year 19000
4205 if (nCheckDepth > chainActive.Height())
4206 nCheckDepth = chainActive.Height();
4207 nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4208 LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
4209 CCoinsViewCache coins(coinsview);
4210 CBlockIndex* pindexState = chainActive.Tip();
4211 CBlockIndex* pindexFailure = NULL;
4212 int nGoodTransactions = 0;
4213 CValidationState state;
4214 // No need to verify JoinSplits twice
4215 auto verifier = libzcash::ProofVerifier::Disabled();
4216 for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev)
4217 {
4218 boost::this_thread::interruption_point();
4219 uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))));
4220 if (pindex->nHeight < chainActive.Height()-nCheckDepth)
4221 break;
4222 CBlock block;
4223 // check level 0: read from disk
4224 if (!ReadBlockFromDisk(block, pindex))
4225 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4226 // check level 1: verify block validity
4227 if (nCheckLevel >= 1 && !CheckBlock(pindex->nHeight,pindex,block, state, verifier))
4228 return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4229 // check level 2: verify undo validity
4230 if (nCheckLevel >= 2 && pindex) {
4231 CBlockUndo undo;
4232 CDiskBlockPos pos = pindex->GetUndoPos();
4233 if (!pos.IsNull()) {
4234 if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash()))
4235 return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
4236 }
4237 }
4238 // check level 3: check for inconsistencies during memory-only disconnect of tip blocks
4239 if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
4240 bool fClean = true;
4241 if (!DisconnectBlock(block, state, pindex, coins, &fClean))
4242 return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4243 pindexState = pindex->pprev;
4244 if (!fClean) {
4245 nGoodTransactions = 0;
4246 pindexFailure = pindex;
4247 } else
4248 nGoodTransactions += block.vtx.size();
4249 }
4250 if (ShutdownRequested())
4251 return true;
4252 }
4253 if (pindexFailure)
4254 return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
4255
4256 // check level 4: try reconnecting blocks
4257 if (nCheckLevel >= 4) {
4258 CBlockIndex *pindex = pindexState;
4259 while (pindex != chainActive.Tip()) {
4260 boost::this_thread::interruption_point();
4261 uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))));
4262 pindex = chainActive.Next(pindex);
4263 CBlock block;
4264 if (!ReadBlockFromDisk(block, pindex))
4265 return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4266 if (!ConnectBlock(block, state, pindex, coins))
4267 return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
4268 }
4269 }
4270
4271 LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions);
4272
4273 return true;
4274}
4275
4276bool RewindBlockIndex(const CChainParams& params)
4277{
4278 LOCK(cs_main);
4279
4280 // RewindBlockIndex is called after LoadBlockIndex, so at this point every block
4281 // index will have nCachedBranchId set based on the values previously persisted
4282 // to disk. By definition, a set nCachedBranchId means that the block was
4283 // fully-validated under the corresponding consensus rules. Thus we can quickly
4284 // identify whether the current active chain matches our expected sequence of
4285 // consensus rule changes, with two checks:
4286 //
4287 // - BLOCK_ACTIVATES_UPGRADE is set only on blocks that activate upgrades.
4288 // - nCachedBranchId for each block matches what we expect.
4289 auto sufficientlyValidated = [&params](const CBlockIndex* pindex) {
4290 auto consensus = params.GetConsensus();
4291 bool fFlagSet = pindex->nStatus & BLOCK_ACTIVATES_UPGRADE;
4292 bool fFlagExpected = IsActivationHeightForAnyUpgrade(pindex->nHeight, consensus);
4293 return fFlagSet == fFlagExpected &&
4294 pindex->nCachedBranchId &&
4295 *pindex->nCachedBranchId == CurrentEpochBranchId(pindex->nHeight, consensus);
4296 };
4297
4298 int nHeight = 1;
4299 while (nHeight <= chainActive.Height()) {
4300 if (!sufficientlyValidated(chainActive[nHeight])) {
4301 break;
4302 }
4303 nHeight++;
4304 }
4305
4306 // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
4307 auto rewindLength = chainActive.Height() - nHeight;
4308 if (rewindLength > 0 && rewindLength > MAX_REORG_LENGTH) {
4309 auto pindexOldTip = chainActive.Tip();
4310 auto pindexRewind = chainActive[nHeight - 1];
4311 auto msg = strprintf(_(
4312 "A block chain rewind has been detected that would roll back %d blocks! "
4313 "This is larger than the maximum of %d blocks, and so the node is shutting down for your safety."
4314 ), rewindLength, MAX_REORG_LENGTH) + "\n\n" +
4315 _("Rewind details") + ":\n" +
4316 "- " + strprintf(_("Current tip: %s, height %d"),
4317 pindexOldTip->phashBlock->GetHex(), pindexOldTip->nHeight) + "\n" +
4318 "- " + strprintf(_("Rewinding to: %s, height %d"),
4319 pindexRewind->phashBlock->GetHex(), pindexRewind->nHeight) + "\n\n" +
4320 _("Please help, human!");
4321 LogPrintf("*** %s\n", msg);
4322 uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR);
4323 StartShutdown();
4324 return false;
4325 }
4326
4327 CValidationState state;
4328 CBlockIndex* pindex = chainActive.Tip();
4329 while (chainActive.Height() >= nHeight) {
4330 if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) {
4331 // If pruning, don't try rewinding past the HAVE_DATA point;
4332 // since older blocks can't be served anyway, there's
4333 // no need to walk further, and trying to DisconnectTip()
4334 // will fail (and require a needless reindex/redownload
4335 // of the blockchain).
4336 break;
4337 }
4338 if (!DisconnectTip(state, true)) {
4339 return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight);
4340 }
4341 // Occasionally flush state to disk.
4342 if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC))
4343 return false;
4344 }
4345
4346 // Reduce validity flag and have-data flags.
4347 // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
4348 // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
4349 for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
4350 CBlockIndex* pindexIter = it->second;
4351
4352 // Note: If we encounter an insufficiently validated block that
4353 // is on chainActive, it must be because we are a pruning node, and
4354 // this block or some successor doesn't HAVE_DATA, so we were unable to
4355 // rewind all the way. Blocks remaining on chainActive at this point
4356 // must not have their validity reduced.
4357 if (!sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
4358 // Reduce validity
4359 pindexIter->nStatus =
4360 std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) |
4361 (pindexIter->nStatus & ~BLOCK_VALID_MASK);
4362 // Remove have-data flags
4363 pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
4364 // Remove branch ID
4365 pindexIter->nStatus &= ~BLOCK_ACTIVATES_UPGRADE;
4366 pindexIter->nCachedBranchId = boost::none;
4367 // Remove storage location
4368 pindexIter->nFile = 0;
4369 pindexIter->nDataPos = 0;
4370 pindexIter->nUndoPos = 0;
4371 // Remove various other things
4372 pindexIter->nTx = 0;
4373 pindexIter->nChainTx = 0;
4374 pindexIter->nSproutValue = boost::none;
4375 pindexIter->nChainSproutValue = boost::none;
4376 pindexIter->nSequenceId = 0;
4377 // Make sure it gets written
4378 setDirtyBlockIndex.insert(pindexIter);
4379 // Update indices
4380 setBlockIndexCandidates.erase(pindexIter);
4381 auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
4382 while (ret.first != ret.second) {
4383 if (ret.first->second == pindexIter) {
4384 mapBlocksUnlinked.erase(ret.first++);
4385 } else {
4386 ++ret.first;
4387 }
4388 }
4389 } else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) {
4390 setBlockIndexCandidates.insert(pindexIter);
4391 }
4392 }
4393
4394 PruneBlockIndexCandidates();
4395
4396 CheckBlockIndex();
4397
4398 if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) {
4399 return false;
4400 }
4401
4402 return true;
4403}
4404
4405void UnloadBlockIndex()
4406{
4407 LOCK(cs_main);
4408 setBlockIndexCandidates.clear();
4409 chainActive.SetTip(NULL);
4410 pindexBestInvalid = NULL;
4411 pindexBestHeader = NULL;
4412 mempool.clear();
4413 mapOrphanTransactions.clear();
4414 mapOrphanTransactionsByPrev.clear();
4415 nSyncStarted = 0;
4416 mapBlocksUnlinked.clear();
4417 vinfoBlockFile.clear();
4418 nLastBlockFile = 0;
4419 nBlockSequenceId = 1;
4420 mapBlockSource.clear();
4421 mapBlocksInFlight.clear();
4422 nQueuedValidatedHeaders = 0;
4423 nPreferredDownload = 0;
4424 setDirtyBlockIndex.clear();
4425 setDirtyFileInfo.clear();
4426 mapNodeState.clear();
4427 recentRejects.reset(NULL);
4428
4429 BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) {
4430 delete entry.second;
4431 }
4432 mapBlockIndex.clear();
4433 fHavePruned = false;
4434}
4435
4436bool LoadBlockIndex()
4437{
4438 extern int32_t KOMODO_LOADINGBLOCKS;
4439 // Load block index from databases
4440 KOMODO_LOADINGBLOCKS = 1;
4441 if (!fReindex && !LoadBlockIndexDB())
4442 {
4443 KOMODO_LOADINGBLOCKS = 0;
4444 return false;
4445 }
4446 KOMODO_LOADINGBLOCKS = 0;
4447 fprintf(stderr,"finished loading blocks %s\n",ASSETCHAINS_SYMBOL);
4448 return true;
4449}
4450
4451
4452bool InitBlockIndex() {
4453 const CChainParams& chainparams = Params();
4454 LOCK(cs_main);
4455
4456 // Initialize global variables that cannot be constructed at startup.
4457 recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
4458
4459 // Check whether we're already initialized
4460 if (chainActive.Genesis() != NULL)
4461 return true;
4462
4463 // Use the provided setting for -txindex in the new database
4464 fTxIndex = GetBoolArg("-txindex", true);
4465 pblocktree->WriteFlag("txindex", fTxIndex);
4466 LogPrintf("Initializing databases...\n");
4467
4468 // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
4469 if (!fReindex) {
4470 try {
4471 CBlock &block = const_cast<CBlock&>(Params().GenesisBlock());
4472 // Start new block file
4473 unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
4474 CDiskBlockPos blockPos;
4475 CValidationState state;
4476 if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime()))
4477 return error("LoadBlockIndex(): FindBlockPos failed");
4478 if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
4479 return error("LoadBlockIndex(): writing genesis block to disk failed");
4480 CBlockIndex *pindex = AddToBlockIndex(block);
4481 if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
4482 return error("LoadBlockIndex(): genesis block not accepted");
4483 if (!ActivateBestChain(state, &block))
4484 return error("LoadBlockIndex(): genesis block cannot be activated");
4485 // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
4486 return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
4487 } catch (const std::runtime_error& e) {
4488 return error("LoadBlockIndex(): failed to initialize block database: %s", e.what());
4489 }
4490 }
4491
4492 return true;
4493}
4494
4495
4496
4497bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
4498{
4499 const CChainParams& chainparams = Params();
4500 // Map of disk positions for blocks with unknown parent (only used for reindex)
4501 static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
4502 int64_t nStart = GetTimeMillis();
4503
4504 int nLoaded = 0;
4505 try {
4506 // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
4507 CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
4508 uint64_t nRewind = blkdat.GetPos();
4509 while (!blkdat.eof()) {
4510 boost::this_thread::interruption_point();
4511
4512 blkdat.SetPos(nRewind);
4513 nRewind++; // start one byte further next time, in case of failure
4514 blkdat.SetLimit(); // remove former limit
4515 unsigned int nSize = 0;
4516 try {
4517 // locate a header
4518 unsigned char buf[MESSAGE_START_SIZE];
4519 blkdat.FindByte(Params().MessageStart()[0]);
4520 nRewind = blkdat.GetPos()+1;
4521 blkdat >> FLATDATA(buf);
4522 if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE))
4523 continue;
4524 // read size
4525 blkdat >> nSize;
4526 if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
4527 continue;
4528 } catch (const std::exception&) {
4529 // no valid block header found; don't complain
4530 break;
4531 }
4532 try {
4533 // read block
4534 uint64_t nBlockPos = blkdat.GetPos();
4535 if (dbp)
4536 dbp->nPos = nBlockPos;
4537 blkdat.SetLimit(nBlockPos + nSize);
4538 blkdat.SetPos(nBlockPos);
4539 CBlock block;
4540 blkdat >> block;
4541 nRewind = blkdat.GetPos();
4542
4543 // detect out of order blocks, and store them for later
4544 uint256 hash = block.GetHash();
4545 if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
4546 LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
4547 block.hashPrevBlock.ToString());
4548 if (dbp)
4549 mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
4550 continue;
4551 }
4552
4553 // process in case the block isn't known yet
4554 if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
4555 CValidationState state;
4556 if (ProcessNewBlock(0,state, NULL, &block, true, dbp))
4557 nLoaded++;
4558 if (state.IsError())
4559 break;
4560 } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) {
4561 LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
4562 }
4563
4564 // Recursively process earlier encountered successors of this block
4565 deque<uint256> queue;
4566 queue.push_back(hash);
4567 while (!queue.empty()) {
4568 uint256 head = queue.front();
4569 queue.pop_front();
4570 std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
4571 while (range.first != range.second) {
4572 std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
4573 if (ReadBlockFromDisk(mapBlockIndex[hash]!=0?mapBlockIndex[hash]->nHeight:0,block, it->second))
4574 {
4575 LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
4576 head.ToString());
4577 CValidationState dummy;
4578 if (ProcessNewBlock(0,dummy, NULL, &block, true, &it->second))
4579 {
4580 nLoaded++;
4581 queue.push_back(block.GetHash());
4582 }
4583 }
4584 range.first++;
4585 mapBlocksUnknownParent.erase(it);
4586 }
4587 }
4588 } catch (const std::exception& e) {
4589 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
4590 }
4591 }
4592 } catch (const std::runtime_error& e) {
4593 AbortNode(std::string("System error: ") + e.what());
4594 }
4595 if (nLoaded > 0)
4596 LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
4597 return nLoaded > 0;
4598}
4599
4600void static CheckBlockIndex()
4601{
4602 const Consensus::Params& consensusParams = Params().GetConsensus();
4603 if (!fCheckBlockIndex) {
4604 return;
4605 }
4606
4607 LOCK(cs_main);
4608
4609 // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
4610 // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
4611 // iterating the block tree require that chainActive has been initialized.)
4612 if (chainActive.Height() < 0) {
4613 assert(mapBlockIndex.size() <= 1);
4614 return;
4615 }
4616
4617 // Build forward-pointing map of the entire block tree.
4618 std::multimap<CBlockIndex*,CBlockIndex*> forward;
4619 for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
4620 forward.insert(std::make_pair(it->second->pprev, it->second));
4621 }
4622
4623 assert(forward.size() == mapBlockIndex.size());
4624
4625 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL);
4626 CBlockIndex *pindex = rangeGenesis.first->second;
4627 rangeGenesis.first++;
4628 assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL.
4629
4630 // Iterate over the entire block tree, using depth-first search.
4631 // Along the way, remember whether there are blocks on the path from genesis
4632 // block being explored which are the first to have certain properties.
4633 size_t nNodes = 0;
4634 int nHeight = 0;
4635 CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid.
4636 CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
4637 CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0.
4638 CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
4639 CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
4640 CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
4641 CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
4642 while (pindex != NULL) {
4643 nNodes++;
4644 if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
4645 if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
4646 if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
4647 if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
4648 if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
4649 if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
4650 if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
4651
4652 // Begin: actual consistency checks.
4653 if (pindex->pprev == NULL) {
4654 // Genesis block checks.
4655 assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
4656 assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
4657 }
4658 if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked
4659 // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
4660 // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
4661 if (!fHavePruned) {
4662 // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
4663 assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
4664 assert(pindexFirstMissing == pindexFirstNeverProcessed);
4665 } else {
4666 // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
4667 if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
4668 }
4669 if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
4670 assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
4671 // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
4672 assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
4673 assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0));
4674 assert(pindex->nHeight == nHeight); // nHeight must be consistent.
4675 assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
4676 assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
4677 assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid
4678 if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid
4679 if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid
4680 if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid
4681 if (pindexFirstInvalid == NULL) {
4682 // Checks for not-invalid blocks.
4683 assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
4684 }
4685 if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) {
4686 if (pindexFirstInvalid == NULL) {
4687 // If this block sorts at least as good as the current tip and
4688 // is valid and we have all data for its parents, it must be in
4689 // setBlockIndexCandidates. chainActive.Tip() must also be there
4690 // even if some data has been pruned.
4691 if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) {
4692 assert(setBlockIndexCandidates.count(pindex));
4693 }
4694 // If some parent is missing, then it could be that this block was in
4695 // setBlockIndexCandidates but had to be removed because of the missing data.
4696 // In this case it must be in mapBlocksUnlinked -- see test below.
4697 }
4698 } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
4699 assert(setBlockIndexCandidates.count(pindex) == 0);
4700 }
4701 // Check whether this block is in mapBlocksUnlinked.
4702 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
4703 bool foundInUnlinked = false;
4704 while (rangeUnlinked.first != rangeUnlinked.second) {
4705 assert(rangeUnlinked.first->first == pindex->pprev);
4706 if (rangeUnlinked.first->second == pindex) {
4707 foundInUnlinked = true;
4708 break;
4709 }
4710 rangeUnlinked.first++;
4711 }
4712 if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) {
4713 // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
4714 assert(foundInUnlinked);
4715 }
4716 if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
4717 if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
4718 if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) {
4719 // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
4720 assert(fHavePruned); // We must have pruned.
4721 // This block may have entered mapBlocksUnlinked if:
4722 // - it has a descendant that at some point had more work than the
4723 // tip, and
4724 // - we tried switching to that descendant but were missing
4725 // data for some intermediate block between chainActive and the
4726 // tip.
4727 // So if this block is itself better than chainActive.Tip() and it wasn't in
4728 // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
4729 if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
4730 if (pindexFirstInvalid == NULL) {
4731 assert(foundInUnlinked);
4732 }
4733 }
4734 }
4735 // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
4736 // End: actual consistency checks.
4737
4738 // Try descending into the first subnode.
4739 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
4740 if (range.first != range.second) {
4741 // A subnode was found.
4742 pindex = range.first->second;
4743 nHeight++;
4744 continue;
4745 }
4746 // This is a leaf node.
4747 // Move upwards until we reach a node of which we have not yet visited the last child.
4748 while (pindex) {
4749 // We are going to either move to a parent or a sibling of pindex.
4750 // If pindex was the first with a certain property, unset the corresponding variable.
4751 if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL;
4752 if (pindex == pindexFirstMissing) pindexFirstMissing = NULL;
4753 if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL;
4754 if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL;
4755 if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL;
4756 if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL;
4757 if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL;
4758 // Find our parent.
4759 CBlockIndex* pindexPar = pindex->pprev;
4760 // Find which child we just visited.
4761 std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
4762 while (rangePar.first->second != pindex) {
4763 assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
4764 rangePar.first++;
4765 }
4766 // Proceed to the next one.
4767 rangePar.first++;
4768 if (rangePar.first != rangePar.second) {
4769 // Move to the sibling.
4770 pindex = rangePar.first->second;
4771 break;
4772 } else {
4773 // Move up further.
4774 pindex = pindexPar;
4775 nHeight--;
4776 continue;
4777 }
4778 }
4779 }
4780
4781 // Check that we actually traversed the entire map.
4782 assert(nNodes == forward.size());
4783}
4784
4785//////////////////////////////////////////////////////////////////////////////
4786//
4787// CAlert
4788//
4789
4790std::string GetWarnings(const std::string& strFor)
4791{
4792 int nPriority = 0;
4793 string strStatusBar;
4794 string strRPC;
4795
4796 if (!CLIENT_VERSION_IS_RELEASE)
4797 strStatusBar = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
4798
4799 if (GetBoolArg("-testsafemode", false))
4800 strStatusBar = strRPC = "testsafemode enabled";
4801
4802 // Misc warnings like out of disk space and clock is wrong
4803 if (strMiscWarning != "")
4804 {
4805 nPriority = 1000;
4806 strStatusBar = strMiscWarning;
4807 }
4808
4809 if (fLargeWorkForkFound)
4810 {
4811 nPriority = 2000;
4812 strStatusBar = strRPC = _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
4813 }
4814 else if (fLargeWorkInvalidChainFound)
4815 {
4816 nPriority = 2000;
4817 strStatusBar = strRPC = _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
4818 }
4819
4820 // Alerts
4821 {
4822 LOCK(cs_mapAlerts);
4823 BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
4824 {
4825 const CAlert& alert = item.second;
4826 if (alert.AppliesToMe() && alert.nPriority > nPriority)
4827 {
4828 nPriority = alert.nPriority;
4829 strStatusBar = alert.strStatusBar;
4830 if (alert.nPriority >= ALERT_PRIORITY_SAFE_MODE) {
4831 strRPC = alert.strRPCError;
4832 }
4833 }
4834 }
4835 }
4836
4837 if (strFor == "statusbar")
4838 return strStatusBar;
4839 else if (strFor == "rpc")
4840 return strRPC;
4841 assert(!"GetWarnings(): invalid parameter");
4842 return "error";
4843}
4844
4845
4846
4847
4848
4849
4850
4851
4852//////////////////////////////////////////////////////////////////////////////
4853//
4854// Messages
4855//
4856
4857
4858bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
4859{
4860 switch (inv.type)
4861 {
4862 case MSG_TX:
4863 {
4864 assert(recentRejects);
4865 if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
4866 {
4867 // If the chain tip has changed previously rejected transactions
4868 // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
4869 // or a double-spend. Reset the rejects filter and give those
4870 // txs a second chance.
4871 hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
4872 recentRejects->reset();
4873 }
4874
4875 return recentRejects->contains(inv.hash) ||
4876 mempool.exists(inv.hash) ||
4877 mapOrphanTransactions.count(inv.hash) ||
4878 pcoinsTip->HaveCoins(inv.hash);
4879 }
4880 case MSG_BLOCK:
4881 return mapBlockIndex.count(inv.hash);
4882 }
4883 // Don't know what it is, just say we already got one
4884 return true;
4885}
4886
4887void static ProcessGetData(CNode* pfrom)
4888{
4889 std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
4890
4891 vector<CInv> vNotFound;
4892
4893 LOCK(cs_main);
4894
4895 while (it != pfrom->vRecvGetData.end()) {
4896 // Don't bother if send buffer is too full to respond anyway
4897 if (pfrom->nSendSize >= SendBufferSize())
4898 break;
4899
4900 const CInv &inv = *it;
4901 {
4902 boost::this_thread::interruption_point();
4903 it++;
4904
4905 if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
4906 {
4907 bool send = false;
4908 BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
4909 if (mi != mapBlockIndex.end())
4910 {
4911 if (chainActive.Contains(mi->second)) {
4912 send = true;
4913 } else {
4914 static const int nOneMonth = 30 * 24 * 60 * 60;
4915 // To prevent fingerprinting attacks, only send blocks outside of the active
4916 // chain if they are valid, and no more than a month older (both in time, and in
4917 // best equivalent proof of work) than the best header chain we know about.
4918 send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
4919 (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
4920 (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth);
4921 if (!send) {
4922 LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
4923 }
4924 }
4925 }
4926 // Pruned nodes may have deleted the block, so check whether
4927 // it's available before trying to send.
4928 if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
4929 {
4930 // Send block from disk
4931 CBlock block;
4932 if (!ReadBlockFromDisk(block, (*mi).second))
4933 {
4934 assert(!"cannot load block from disk");
4935 }
4936 else
4937 {
4938 if (inv.type == MSG_BLOCK)
4939 {
4940 //uint256 hash; int32_t z;
4941 //hash = block.GetHash();
4942 //for (z=31; z>=0; z--)
4943 // fprintf(stderr,"%02x",((uint8_t *)&hash)[z]);
4944 //fprintf(stderr," send block %d\n",komodo_block2height(&block));
4945 pfrom->PushMessage("block", block);
4946 }
4947 else // MSG_FILTERED_BLOCK)
4948 {
4949 LOCK(pfrom->cs_filter);
4950 if (pfrom->pfilter)
4951 {
4952 CMerkleBlock merkleBlock(block, *pfrom->pfilter);
4953 pfrom->PushMessage("merkleblock", merkleBlock);
4954 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
4955 // This avoids hurting performance by pointlessly requiring a round-trip
4956 // Note that there is currently no way for a node to request any single transactions we didn't send here -
4957 // they must either disconnect and retry or request the full block.
4958 // Thus, the protocol spec specified allows for us to provide duplicate txn here,
4959 // however we MUST always provide at least what the remote peer needs
4960 typedef std::pair<unsigned int, uint256> PairType;
4961 BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
4962 if (!pfrom->setInventoryKnown.count(CInv(MSG_TX, pair.second)))
4963 pfrom->PushMessage("tx", block.vtx[pair.first]);
4964 }
4965 // else
4966 // no response
4967 }
4968 }
4969 // Trigger the peer node to send a getblocks request for the next batch of inventory
4970 if (inv.hash == pfrom->hashContinue)
4971 {
4972 // Bypass PushInventory, this must send even if redundant,
4973 // and we want it right after the last block so they don't
4974 // wait for other stuff first.
4975 vector<CInv> vInv;
4976 vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
4977 pfrom->PushMessage("inv", vInv);
4978 pfrom->hashContinue.SetNull();
4979 }
4980 }
4981 }
4982 else if (inv.IsKnownType())
4983 {
4984 // Send stream from relay memory
4985 bool pushed = false;
4986 {
4987 LOCK(cs_mapRelay);
4988 map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
4989 if (mi != mapRelay.end()) {
4990 pfrom->PushMessage(inv.GetCommand(), (*mi).second);
4991 pushed = true;
4992 }
4993 }
4994 if (!pushed && inv.type == MSG_TX) {
4995 CTransaction tx;
4996 if (mempool.lookup(inv.hash, tx)) {
4997 CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
4998 ss.reserve(1000);
4999 ss << tx;
5000 pfrom->PushMessage("tx", ss);
5001 pushed = true;
5002 }
5003 }
5004 if (!pushed) {
5005 vNotFound.push_back(inv);
5006 }
5007 }
5008
5009 // Track requests for our stuff.
5010 GetMainSignals().Inventory(inv.hash);
5011
5012 if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
5013 break;
5014 }
5015 }
5016
5017 pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
5018
5019 if (!vNotFound.empty()) {
5020 // Let the peer know that we didn't find what it asked for, so it doesn't
5021 // have to wait around forever. Currently only SPV clients actually care
5022 // about this message: it's needed when they are recursively walking the
5023 // dependencies of relevant unconfirmed transactions. SPV clients want to
5024 // do that because they want to know about (and store and rebroadcast and
5025 // risk analyze) the dependencies of transactions relevant to them, without
5026 // having to download the entire memory pool.
5027 pfrom->PushMessage("notfound", vNotFound);
5028 }
5029}
5030
5031bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived)
5032{
5033 const CChainParams& chainparams = Params();
5034 LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
5035 //fprintf(stderr, "recv: %s peer=%d\n", SanitizeString(strCommand).c_str(), (int32_t)pfrom->GetId());
5036 if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
5037 {
5038 LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
5039 return true;
5040 }
5041
5042
5043
5044
5045 if (strCommand == "version")
5046 {
5047 // Each connection can only send one version message
5048 if (pfrom->nVersion != 0)
5049 {
5050 pfrom->PushMessage("reject", strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
5051 Misbehaving(pfrom->GetId(), 1);
5052 return false;
5053 }
5054
5055 int64_t nTime;
5056 CAddress addrMe;
5057 CAddress addrFrom;
5058 uint64_t nNonce = 1;
5059 vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
5060 if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
5061 {
5062 // disconnect from peers older than this proto version
5063 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
5064 pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
5065 strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION));
5066 pfrom->fDisconnect = true;
5067 return false;
5068 }
5069
5070 // When Overwinter is active, reject incoming connections from non-Overwinter nodes
5071 const Consensus::Params& params = Params().GetConsensus();
5072 if (NetworkUpgradeActive(GetHeight(), params, Consensus::UPGRADE_OVERWINTER)
5073 && pfrom->nVersion < params.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion)
5074 {
5075 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
5076 pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
5077 strprintf("Version must be %d or greater",
5078 params.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion));
5079 pfrom->fDisconnect = true;
5080 return false;
5081 }
5082
5083 if (pfrom->nVersion == 10300)
5084 pfrom->nVersion = 300;
5085 if (!vRecv.empty())
5086 vRecv >> addrFrom >> nNonce;
5087 if (!vRecv.empty()) {
5088 vRecv >> LIMITED_STRING(pfrom->strSubVer, 256);
5089 pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
5090 }
5091 if (!vRecv.empty())
5092 vRecv >> pfrom->nStartingHeight;
5093 if (!vRecv.empty())
5094 vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
5095 else
5096 pfrom->fRelayTxes = true;
5097
5098 // Disconnect if we connected to ourself
5099 if (nNonce == nLocalHostNonce && nNonce > 1)
5100 {
5101 LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
5102 pfrom->fDisconnect = true;
5103 return true;
5104 }
5105
5106 pfrom->addrLocal = addrMe;
5107 if (pfrom->fInbound && addrMe.IsRoutable())
5108 {
5109 SeenLocal(addrMe);
5110 }
5111
5112 // Be shy and don't send version until we hear
5113 if (pfrom->fInbound)
5114 pfrom->PushVersion();
5115
5116 pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
5117
5118 // Potentially mark this peer as a preferred download peer.
5119 UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
5120
5121 // Change version
5122 pfrom->PushMessage("verack");
5123 pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
5124
5125 if (!pfrom->fInbound)
5126 {
5127 // Advertise our address
5128 if (fListen && !IsInitialBlockDownload())
5129 {
5130 CAddress addr = GetLocalAddress(&pfrom->addr);
5131 if (addr.IsRoutable())
5132 {
5133 LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString());
5134 pfrom->PushAddress(addr);
5135 } else if (IsPeerAddrLocalGood(pfrom)) {
5136 addr.SetIP(pfrom->addrLocal);
5137 LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString());
5138 pfrom->PushAddress(addr);
5139 }
5140 }
5141
5142 // Get recent addresses
5143 if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
5144 {
5145 pfrom->PushMessage("getaddr");
5146 pfrom->fGetAddr = true;
5147 }
5148 addrman.Good(pfrom->addr);
5149 } else {
5150 if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
5151 {
5152 addrman.Add(addrFrom, addrFrom);
5153 addrman.Good(addrFrom);
5154 }
5155 }
5156
5157 // Relay alerts
5158 {
5159 LOCK(cs_mapAlerts);
5160 BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
5161 item.second.RelayTo(pfrom);
5162 }
5163
5164 pfrom->fSuccessfullyConnected = true;
5165
5166 string remoteAddr;
5167 if (fLogIPs)
5168 remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
5169
5170 LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
5171 pfrom->cleanSubVer, pfrom->nVersion,
5172 pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
5173 remoteAddr);
5174
5175 int64_t nTimeOffset = nTime - GetTime();
5176 pfrom->nTimeOffset = nTimeOffset;
5177 AddTimeData(pfrom->addr, nTimeOffset);
5178 }
5179
5180
5181 else if (pfrom->nVersion == 0)
5182 {
5183 // Must have a version message before anything else
5184 Misbehaving(pfrom->GetId(), 1);
5185 return false;
5186 }
5187
5188
5189 else if (strCommand == "verack")
5190 {
5191 pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
5192
5193 // Mark this node as currently connected, so we update its timestamp later.
5194 if (pfrom->fNetworkNode) {
5195 LOCK(cs_main);
5196 State(pfrom->GetId())->fCurrentlyConnected = true;
5197 }
5198 }
5199
5200
5201 // Disconnect existing peer connection when:
5202 // 1. The version message has been received
5203 // 2. Overwinter is active
5204 // 3. Peer version is pre-Overwinter
5205 else if (NetworkUpgradeActive(GetHeight(), chainparams.GetConsensus(), Consensus::UPGRADE_OVERWINTER)
5206 && (pfrom->nVersion < chainparams.GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion))
5207 {
5208 LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
5209 pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
5210 strprintf("Version must be %d or greater",
5211 chainparams.GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion));
5212 pfrom->fDisconnect = true;
5213 return false;
5214 }
5215
5216
5217 else if (strCommand == "addr")
5218 {
5219 vector<CAddress> vAddr;
5220 vRecv >> vAddr;
5221
5222 // Don't want addr from older versions unless seeding
5223 if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
5224 return true;
5225 if (vAddr.size() > 1000)
5226 {
5227 Misbehaving(pfrom->GetId(), 20);
5228 return error("message addr size() = %u", vAddr.size());
5229 }
5230
5231 // Store the new addresses
5232 vector<CAddress> vAddrOk;
5233 int64_t nNow = GetAdjustedTime();
5234 int64_t nSince = nNow - 10 * 60;
5235 BOOST_FOREACH(CAddress& addr, vAddr)
5236 {
5237 boost::this_thread::interruption_point();
5238
5239 if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
5240 addr.nTime = nNow - 5 * 24 * 60 * 60;
5241 pfrom->AddAddressKnown(addr);
5242 bool fReachable = IsReachable(addr);
5243 if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
5244 {
5245 // Relay to a limited number of other nodes
5246 {
5247 LOCK(cs_vNodes);
5248 // Use deterministic randomness to send to the same nodes for 24 hours
5249 // at a time so the addrKnowns of the chosen nodes prevent repeats
5250 static uint256 hashSalt;
5251 if (hashSalt.IsNull())
5252 hashSalt = GetRandHash();
5253 uint64_t hashAddr = addr.GetHash();
5254 uint256 hashRand = ArithToUint256(UintToArith256(hashSalt) ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60)));
5255 hashRand = Hash(BEGIN(hashRand), END(hashRand));
5256 multimap<uint256, CNode*> mapMix;
5257 BOOST_FOREACH(CNode* pnode, vNodes)
5258 {
5259 if (pnode->nVersion < CADDR_TIME_VERSION)
5260 continue;
5261 unsigned int nPointer;
5262 memcpy(&nPointer, &pnode, sizeof(nPointer));
5263 uint256 hashKey = ArithToUint256(UintToArith256(hashRand) ^ nPointer);
5264 hashKey = Hash(BEGIN(hashKey), END(hashKey));
5265 mapMix.insert(make_pair(hashKey, pnode));
5266 }
5267 int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
5268 for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
5269 ((*mi).second)->PushAddress(addr);
5270 }
5271 }
5272 // Do not store addresses outside our network
5273 if (fReachable)
5274 vAddrOk.push_back(addr);
5275 }
5276 addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60);
5277 if (vAddr.size() < 1000)
5278 pfrom->fGetAddr = false;
5279 if (pfrom->fOneShot)
5280 pfrom->fDisconnect = true;
5281 }
5282
5283
5284 else if (strCommand == "inv")
5285 {
5286 vector<CInv> vInv;
5287 vRecv >> vInv;
5288 if (vInv.size() > MAX_INV_SZ)
5289 {
5290 Misbehaving(pfrom->GetId(), 20);
5291 return error("message inv size() = %u", vInv.size());
5292 }
5293
5294 LOCK(cs_main);
5295
5296 std::vector<CInv> vToFetch;
5297
5298 for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
5299 {
5300 const CInv &inv = vInv[nInv];
5301
5302 boost::this_thread::interruption_point();
5303 pfrom->AddInventoryKnown(inv);
5304
5305 bool fAlreadyHave = AlreadyHave(inv);
5306 LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
5307
5308 if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK)
5309 pfrom->AskFor(inv);
5310
5311 if (inv.type == MSG_BLOCK) {
5312 UpdateBlockAvailability(pfrom->GetId(), inv.hash);
5313 if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
5314 // First request the headers preceding the announced block. In the normal fully-synced
5315 // case where a new block is announced that succeeds the current tip (no reorganization),
5316 // there are no such headers.
5317 // Secondly, and only when we are close to being synced, we request the announced block directly,
5318 // to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
5319 // time the block arrives, the header chain leading up to it is already validated. Not
5320 // doing this will result in the received block being rejected as an orphan in case it is
5321 // not a direct successor.
5322 pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash);
5323 CNodeState *nodestate = State(pfrom->GetId());
5324 if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 &&
5325 nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5326 vToFetch.push_back(inv);
5327 // Mark block as in flight already, even though the actual "getdata" message only goes out
5328 // later (within the same cs_main lock, though).
5329 MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
5330 }
5331 LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
5332 }
5333 }
5334
5335 // Track requests for our stuff
5336 GetMainSignals().Inventory(inv.hash);
5337
5338 if (pfrom->nSendSize > (SendBufferSize() * 2)) {
5339 Misbehaving(pfrom->GetId(), 50);
5340 return error("send buffer size() = %u", pfrom->nSendSize);
5341 }
5342 }
5343
5344 if (!vToFetch.empty())
5345 pfrom->PushMessage("getdata", vToFetch);
5346 }
5347
5348
5349 else if (strCommand == "getdata")
5350 {
5351 vector<CInv> vInv;
5352 vRecv >> vInv;
5353 if (vInv.size() > MAX_INV_SZ)
5354 {
5355 Misbehaving(pfrom->GetId(), 20);
5356 return error("message getdata size() = %u", vInv.size());
5357 }
5358
5359 if (fDebug || (vInv.size() != 1))
5360 LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
5361
5362 if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
5363 LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
5364
5365 pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
5366 ProcessGetData(pfrom);
5367 }
5368
5369
5370 else if (strCommand == "getblocks")
5371 {
5372 CBlockLocator locator;
5373 uint256 hashStop;
5374 vRecv >> locator >> hashStop;
5375
5376 LOCK(cs_main);
5377
5378 // Find the last block the caller has in the main chain
5379 CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
5380
5381 // Send the rest of the chain
5382 if (pindex)
5383 pindex = chainActive.Next(pindex);
5384 int nLimit = 500;
5385 LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
5386 for (; pindex; pindex = chainActive.Next(pindex))
5387 {
5388 if (pindex->GetBlockHash() == hashStop)
5389 {
5390 LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
5391 break;
5392 }
5393 pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
5394 if (--nLimit <= 0)
5395 {
5396 // When this block is requested, we'll send an inv that'll
5397 // trigger the peer to getblocks the next batch of inventory.
5398 LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
5399 pfrom->hashContinue = pindex->GetBlockHash();
5400 break;
5401 }
5402 }
5403 }
5404
5405
5406 else if (strCommand == "getheaders")
5407 {
5408 CBlockLocator locator;
5409 uint256 hashStop;
5410 vRecv >> locator >> hashStop;
5411
5412 LOCK(cs_main);
5413
5414 if (IsInitialBlockDownload())
5415 return true;
5416
5417 CBlockIndex* pindex = NULL;
5418 if (locator.IsNull())
5419 {
5420 // If locator is null, return the hashStop block
5421 BlockMap::iterator mi = mapBlockIndex.find(hashStop);
5422 if (mi == mapBlockIndex.end())
5423 return true;
5424 pindex = (*mi).second;
5425 }
5426 else
5427 {
5428 // Find the last block the caller has in the main chain
5429 pindex = FindForkInGlobalIndex(chainActive, locator);
5430 if (pindex)
5431 pindex = chainActive.Next(pindex);
5432 }
5433
5434 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
5435 vector<CBlock> vHeaders;
5436 int nLimit = MAX_HEADERS_RESULTS;
5437 LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id);
5438 if ( pfrom->lasthdrsreq >= chainActive.Height()-MAX_HEADERS_RESULTS || pfrom->lasthdrsreq != (int32_t)(pindex ? pindex->nHeight : -1) )
5439 {
5440 pfrom->lasthdrsreq = (int32_t)(pindex ? pindex->nHeight : -1);
5441 for (; pindex; pindex = chainActive.Next(pindex))
5442 {
5443 vHeaders.push_back(pindex->GetBlockHeader());
5444 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
5445 break;
5446 }
5447 pfrom->PushMessage("headers", vHeaders);
5448 }
5449 else if ( NOTARY_PUBKEY33[0] != 0 )
5450 {
5451 static uint32_t counter;
5452 if ( counter++ < 3 )
5453 fprintf(stderr,"you can ignore redundant getheaders from peer.%d %d prev.%d\n",(int32_t)pfrom->id,(int32_t)(pindex ? pindex->nHeight : -1),pfrom->lasthdrsreq);
5454 }
5455 }
5456
5457
5458 else if (strCommand == "tx")
5459 {
5460 vector<uint256> vWorkQueue;
5461 vector<uint256> vEraseQueue;
5462 CTransaction tx;
5463 vRecv >> tx;
5464
5465 CInv inv(MSG_TX, tx.GetHash());
5466 pfrom->AddInventoryKnown(inv);
5467
5468 LOCK(cs_main);
5469
5470 bool fMissingInputs = false;
5471 CValidationState state;
5472
5473 pfrom->setAskFor.erase(inv.hash);
5474 mapAlreadyAskedFor.erase(inv);
5475
5476 if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs))
5477 {
5478 mempool.check(pcoinsTip);
5479 RelayTransaction(tx);
5480 vWorkQueue.push_back(inv.hash);
5481
5482 LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s: accepted %s (poolsz %u)\n",
5483 pfrom->id, pfrom->cleanSubVer,
5484 tx.GetHash().ToString(),
5485 mempool.mapTx.size());
5486
5487 // Recursively process any orphan transactions that depended on this one
5488 set<NodeId> setMisbehaving;
5489 for (unsigned int i = 0; i < vWorkQueue.size(); i++)
5490 {
5491 map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]);
5492 if (itByPrev == mapOrphanTransactionsByPrev.end())
5493 continue;
5494 for (set<uint256>::iterator mi = itByPrev->second.begin();
5495 mi != itByPrev->second.end();
5496 ++mi)
5497 {
5498 const uint256& orphanHash = *mi;
5499 const CTransaction& orphanTx = mapOrphanTransactions[orphanHash].tx;
5500 NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer;
5501 bool fMissingInputs2 = false;
5502 // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
5503 // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
5504 // anyone relaying LegitTxX banned)
5505 CValidationState stateDummy;
5506
5507
5508 if (setMisbehaving.count(fromPeer))
5509 continue;
5510 if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2))
5511 {
5512 LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
5513 RelayTransaction(orphanTx);
5514 vWorkQueue.push_back(orphanHash);
5515 vEraseQueue.push_back(orphanHash);
5516 }
5517 else if (!fMissingInputs2)
5518 {
5519 int nDos = 0;
5520 if (stateDummy.IsInvalid(nDos) && nDos > 0)
5521 {
5522 // Punish peer that gave us an invalid orphan tx
5523 Misbehaving(fromPeer, nDos);
5524 setMisbehaving.insert(fromPeer);
5525 LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
5526 }
5527 // Has inputs but not accepted to mempool
5528 // Probably non-standard or insufficient fee/priority
5529 LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
5530 vEraseQueue.push_back(orphanHash);
5531 assert(recentRejects);
5532 recentRejects->insert(orphanHash);
5533 }
5534 mempool.check(pcoinsTip);
5535 }
5536 }
5537
5538 BOOST_FOREACH(uint256 hash, vEraseQueue)
5539 EraseOrphanTx(hash);
5540 }
5541 // TODO: currently, prohibit joinsplits from entering mapOrphans
5542 else if (fMissingInputs && tx.vjoinsplit.size() == 0)
5543 {
5544 AddOrphanTx(tx, pfrom->GetId());
5545
5546 // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
5547 unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
5548 unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
5549 if (nEvicted > 0)
5550 LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
5551 } else {
5552 assert(recentRejects);
5553 recentRejects->insert(tx.GetHash());
5554
5555 if (pfrom->fWhitelisted) {
5556 // Always relay transactions received from whitelisted peers, even
5557 // if they were already in the mempool or rejected from it due
5558 // to policy, allowing the node to function as a gateway for
5559 // nodes hidden behind it.
5560 //
5561 // Never relay transactions that we would assign a non-zero DoS
5562 // score for, as we expect peers to do the same with us in that
5563 // case.
5564 int nDoS = 0;
5565 if (!state.IsInvalid(nDoS) || nDoS == 0) {
5566 LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
5567 RelayTransaction(tx);
5568 } else {
5569 LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s (code %d))\n",
5570 tx.GetHash().ToString(), pfrom->id, state.GetRejectReason(), state.GetRejectCode());
5571 }
5572 }
5573 }
5574 int nDoS = 0;
5575 if (state.IsInvalid(nDoS))
5576 {
5577 LogPrint("mempool", "%s from peer=%d %s was not accepted into the memory pool: %s\n", tx.GetHash().ToString(),
5578 pfrom->id, pfrom->cleanSubVer,
5579 state.GetRejectReason());
5580 pfrom->PushMessage("reject", strCommand, state.GetRejectCode(),
5581 state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
5582 if (nDoS > 0)
5583 Misbehaving(pfrom->GetId(), nDoS);
5584 }
5585 }
5586
5587
5588 else if (strCommand == "headers" && !fImporting && !fReindex) // Ignore headers received while importing
5589 {
5590 std::vector<CBlockHeader> headers;
5591
5592 // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
5593 unsigned int nCount = ReadCompactSize(vRecv);
5594 if (nCount > MAX_HEADERS_RESULTS) {
5595 Misbehaving(pfrom->GetId(), 20);
5596 return error("headers message size = %u", nCount);
5597 }
5598 headers.resize(nCount);
5599 for (unsigned int n = 0; n < nCount; n++) {
5600 vRecv >> headers[n];
5601 ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
5602 }
5603
5604 LOCK(cs_main);
5605
5606 if (nCount == 0) {
5607 // Nothing interesting. Stop asking this peers for more headers.
5608 return true;
5609 }
5610
5611 CBlockIndex *pindexLast = NULL;
5612 BOOST_FOREACH(const CBlockHeader& header, headers) {
5613 CValidationState state;
5614 if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
5615 Misbehaving(pfrom->GetId(), 20);
5616 return error("non-continuous headers sequence");
5617 }
5618 if (!AcceptBlockHeader(header, state, &pindexLast)) {
5619 int nDoS;
5620 if (state.IsInvalid(nDoS)) {
5621 if (nDoS > 0)
5622 Misbehaving(pfrom->GetId(), nDoS/nDoS);
5623 return error("invalid header received");
5624 }
5625 }
5626 }
5627
5628 if (pindexLast)
5629 UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
5630
5631 if (nCount == MAX_HEADERS_RESULTS && pindexLast) {
5632 // Headers message had its maximum size; the peer may have more headers.
5633 // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
5634 // from there instead.
5635 if ( pfrom->sendhdrsreq >= chainActive.Height()-MAX_HEADERS_RESULTS || pindexLast->nHeight != pfrom->sendhdrsreq )
5636 {
5637 pfrom->sendhdrsreq = (int32_t)pindexLast->nHeight;
5638 LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
5639 pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256());
5640 }
5641 }
5642
5643 CheckBlockIndex();
5644 }
5645
5646 else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing
5647 {
5648 CBlock block;
5649 vRecv >> block;
5650
5651 CInv inv(MSG_BLOCK, block.GetHash());
5652 LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id);
5653
5654 pfrom->AddInventoryKnown(inv);
5655
5656 CValidationState state;
5657 // Process all blocks from whitelisted peers, even if not requested,
5658 // unless we're still syncing with the network.
5659 // Such an unrequested block may still be processed, subject to the
5660 // conditions in AcceptBlock().
5661 bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
5662 ProcessNewBlock(0,state, pfrom, &block, forceProcessing, NULL);
5663 int nDoS;
5664 if (state.IsInvalid(nDoS)) {
5665 pfrom->PushMessage("reject", strCommand, state.GetRejectCode(),
5666 state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
5667 if (nDoS > 0) {
5668 LOCK(cs_main);
5669 Misbehaving(pfrom->GetId(), nDoS);
5670 }
5671 }
5672
5673 }
5674
5675
5676 // This asymmetric behavior for inbound and outbound connections was introduced
5677 // to prevent a fingerprinting attack: an attacker can send specific fake addresses
5678 // to users' AddrMan and later request them by sending getaddr messages.
5679 // Making nodes which are behind NAT and can only make outgoing connections ignore
5680 // the getaddr message mitigates the attack.
5681 else if ((strCommand == "getaddr") && (pfrom->fInbound))
5682 {
5683 // Only send one GetAddr response per connection to reduce resource waste
5684 // and discourage addr stamping of INV announcements.
5685 if (pfrom->fSentAddr) {
5686 LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
5687 return true;
5688 }
5689 pfrom->fSentAddr = true;
5690
5691 pfrom->vAddrToSend.clear();
5692 vector<CAddress> vAddr = addrman.GetAddr();
5693 BOOST_FOREACH(const CAddress &addr, vAddr)
5694 pfrom->PushAddress(addr);
5695 }
5696
5697
5698 else if (strCommand == "mempool")
5699 {
5700 LOCK2(cs_main, pfrom->cs_filter);
5701
5702 std::vector<uint256> vtxid;
5703 mempool.queryHashes(vtxid);
5704 vector<CInv> vInv;
5705 BOOST_FOREACH(uint256& hash, vtxid) {
5706 CInv inv(MSG_TX, hash);
5707 CTransaction tx;
5708 bool fInMemPool = mempool.lookup(hash, tx);
5709 if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
5710 if ((pfrom->pfilter && pfrom->pfilter->IsRelevantAndUpdate(tx)) ||
5711 (!pfrom->pfilter))
5712 vInv.push_back(inv);
5713 if (vInv.size() == MAX_INV_SZ) {
5714 pfrom->PushMessage("inv", vInv);
5715 vInv.clear();
5716 }
5717 }
5718 if (vInv.size() > 0)
5719 pfrom->PushMessage("inv", vInv);
5720 }
5721
5722
5723 else if (strCommand == "ping")
5724 {
5725 if (pfrom->nVersion > BIP0031_VERSION)
5726 {
5727 uint64_t nonce = 0;
5728 vRecv >> nonce;
5729 // Echo the message back with the nonce. This allows for two useful features:
5730 //
5731 // 1) A remote node can quickly check if the connection is operational
5732 // 2) Remote nodes can measure the latency of the network thread. If this node
5733 // is overloaded it won't respond to pings quickly and the remote node can
5734 // avoid sending us more work, like chain download requests.
5735 //
5736 // The nonce stops the remote getting confused between different pings: without
5737 // it, if the remote node sends a ping once per second and this node takes 5
5738 // seconds to respond to each, the 5th ping the remote sends would appear to
5739 // return very quickly.
5740 pfrom->PushMessage("pong", nonce);
5741 }
5742 }
5743
5744
5745 else if (strCommand == "pong")
5746 {
5747 int64_t pingUsecEnd = nTimeReceived;
5748 uint64_t nonce = 0;
5749 size_t nAvail = vRecv.in_avail();
5750 bool bPingFinished = false;
5751 std::string sProblem;
5752
5753 if (nAvail >= sizeof(nonce)) {
5754 vRecv >> nonce;
5755
5756 // Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
5757 if (pfrom->nPingNonceSent != 0) {
5758 if (nonce == pfrom->nPingNonceSent) {
5759 // Matching pong received, this ping is no longer outstanding
5760 bPingFinished = true;
5761 int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
5762 if (pingUsecTime > 0) {
5763 // Successful ping time measurement, replace previous
5764 pfrom->nPingUsecTime = pingUsecTime;
5765 pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime);
5766 } else {
5767 // This should never happen
5768 sProblem = "Timing mishap";
5769 }
5770 } else {
5771 // Nonce mismatches are normal when pings are overlapping
5772 sProblem = "Nonce mismatch";
5773 if (nonce == 0) {
5774 // This is most likely a bug in another implementation somewhere; cancel this ping
5775 bPingFinished = true;
5776 sProblem = "Nonce zero";
5777 }
5778 }
5779 } else {
5780 sProblem = "Unsolicited pong without ping";
5781 }
5782 } else {
5783 // This is most likely a bug in another implementation somewhere; cancel this ping
5784 bPingFinished = true;
5785 sProblem = "Short payload";
5786 }
5787
5788 if (!(sProblem.empty())) {
5789 LogPrint("net", "pong peer=%d %s: %s, %x expected, %x received, %u bytes\n",
5790 pfrom->id,
5791 pfrom->cleanSubVer,
5792 sProblem,
5793 pfrom->nPingNonceSent,
5794 nonce,
5795 nAvail);
5796 }
5797 if (bPingFinished) {
5798 pfrom->nPingNonceSent = 0;
5799 }
5800 }
5801
5802
5803 else if (fAlerts && strCommand == "alert")
5804 {
5805 CAlert alert;
5806 vRecv >> alert;
5807
5808 uint256 alertHash = alert.GetHash();
5809 if (pfrom->setKnown.count(alertHash) == 0)
5810 {
5811 if (alert.ProcessAlert(Params().AlertKey()))
5812 {
5813 // Relay
5814 pfrom->setKnown.insert(alertHash);
5815 {
5816 LOCK(cs_vNodes);
5817 BOOST_FOREACH(CNode* pnode, vNodes)
5818 alert.RelayTo(pnode);
5819 }
5820 }
5821 else {
5822 // Small DoS penalty so peers that send us lots of
5823 // duplicate/expired/invalid-signature/whatever alerts
5824 // eventually get banned.
5825 // This isn't a Misbehaving(100) (immediate ban) because the
5826 // peer might be an older or different implementation with
5827 // a different signature key, etc.
5828 Misbehaving(pfrom->GetId(), 10);
5829 }
5830 }
5831 }
5832
5833
5834 else if (strCommand == "filterload")
5835 {
5836 CBloomFilter filter;
5837 vRecv >> filter;
5838
5839 if (!filter.IsWithinSizeConstraints())
5840 // There is no excuse for sending a too-large filter
5841 Misbehaving(pfrom->GetId(), 100);
5842 else
5843 {
5844 LOCK(pfrom->cs_filter);
5845 delete pfrom->pfilter;
5846 pfrom->pfilter = new CBloomFilter(filter);
5847 pfrom->pfilter->UpdateEmptyFull();
5848 }
5849 pfrom->fRelayTxes = true;
5850 }
5851
5852
5853 else if (strCommand == "filteradd")
5854 {
5855 vector<unsigned char> vData;
5856 vRecv >> vData;
5857
5858 // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
5859 // and thus, the maximum size any matched object can have) in a filteradd message
5860 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE)
5861 {
5862 Misbehaving(pfrom->GetId(), 100);
5863 } else {
5864 LOCK(pfrom->cs_filter);
5865 if (pfrom->pfilter)
5866 pfrom->pfilter->insert(vData);
5867 else
5868 Misbehaving(pfrom->GetId(), 100);
5869 }
5870 }
5871
5872
5873 else if (strCommand == "filterclear")
5874 {
5875 LOCK(pfrom->cs_filter);
5876 delete pfrom->pfilter;
5877 pfrom->pfilter = new CBloomFilter();
5878 pfrom->fRelayTxes = true;
5879 }
5880
5881
5882 else if (strCommand == "reject")
5883 {
5884 if (fDebug) {
5885 try {
5886 string strMsg; unsigned char ccode; string strReason;
5887 vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
5888
5889 ostringstream ss;
5890 ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
5891
5892 if (strMsg == "block" || strMsg == "tx")
5893 {
5894 uint256 hash;
5895 vRecv >> hash;
5896 ss << ": hash " << hash.ToString();
5897 }
5898 LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
5899 } catch (const std::ios_base::failure&) {
5900 // Avoid feedback loops by preventing reject messages from triggering a new reject message.
5901 LogPrint("net", "Unparseable reject message received\n");
5902 }
5903 }
5904 }
5905 else if (strCommand == "notfound") {
5906 // We do not care about the NOTFOUND message, but logging an Unknown Command
5907 // message would be undesirable as we transmit it ourselves.
5908 }
5909
5910 else {
5911 // Ignore unknown commands for extensibility
5912 LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
5913 }
5914
5915
5916
5917 return true;
5918}
5919
5920// requires LOCK(cs_vRecvMsg)
5921bool ProcessMessages(CNode* pfrom)
5922{
5923 //if (fDebug)
5924 // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
5925
5926 //
5927 // Message format
5928 // (4) message start
5929 // (12) command
5930 // (4) size
5931 // (4) checksum
5932 // (x) data
5933 //
5934 bool fOk = true;
5935
5936 if (!pfrom->vRecvGetData.empty())
5937 ProcessGetData(pfrom);
5938
5939 // this maintains the order of responses
5940 if (!pfrom->vRecvGetData.empty()) return fOk;
5941
5942 std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
5943 while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
5944 // Don't bother if send buffer is too full to respond anyway
5945 if (pfrom->nSendSize >= SendBufferSize())
5946 break;
5947
5948 // get next message
5949 CNetMessage& msg = *it;
5950
5951 //if (fDebug)
5952 // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
5953 // msg.hdr.nMessageSize, msg.vRecv.size(),
5954 // msg.complete() ? "Y" : "N");
5955
5956 // end, if an incomplete message is found
5957 if (!msg.complete())
5958 break;
5959
5960 // at this point, any failure means we can delete the current message
5961 it++;
5962
5963 // Scan for message start
5964 if (memcmp(msg.hdr.pchMessageStart, Params().MessageStart(), MESSAGE_START_SIZE) != 0) {
5965 LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
5966 fOk = false;
5967 break;
5968 }
5969
5970 // Read header
5971 CMessageHeader& hdr = msg.hdr;
5972 if (!hdr.IsValid(Params().MessageStart()))
5973 {
5974 LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
5975 continue;
5976 }
5977 string strCommand = hdr.GetCommand();
5978
5979 // Message size
5980 unsigned int nMessageSize = hdr.nMessageSize;
5981
5982 // Checksum
5983 CDataStream& vRecv = msg.vRecv;
5984 uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
5985 unsigned int nChecksum = ReadLE32((unsigned char*)&hash);
5986 if (nChecksum != hdr.nChecksum)
5987 {
5988 LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__,
5989 SanitizeString(strCommand), nMessageSize, nChecksum, hdr.nChecksum);
5990 continue;
5991 }
5992
5993 // Process message
5994 bool fRet = false;
5995 try
5996 {
5997 fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime);
5998 boost::this_thread::interruption_point();
5999 }
6000 catch (const std::ios_base::failure& e)
6001 {
6002 pfrom->PushMessage("reject", strCommand, REJECT_MALFORMED, string("error parsing message"));
6003 if (strstr(e.what(), "end of data"))
6004 {
6005 // Allow exceptions from under-length message on vRecv
6006 LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
6007 }
6008 else if (strstr(e.what(), "size too large"))
6009 {
6010 // Allow exceptions from over-long size
6011 LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
6012 }
6013 else
6014 {
6015 //PrintExceptionContinue(&e, "ProcessMessages()");
6016 }
6017 }
6018 catch (const boost::thread_interrupted&) {
6019 throw;
6020 }
6021 catch (const std::exception& e) {
6022 PrintExceptionContinue(&e, "ProcessMessages()");
6023 } catch (...) {
6024 PrintExceptionContinue(NULL, "ProcessMessages()");
6025 }
6026
6027 if (!fRet)
6028 LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
6029
6030 break;
6031 }
6032
6033 // In case the connection got shut down, its receive buffer was wiped
6034 if (!pfrom->fDisconnect)
6035 pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
6036
6037 return fOk;
6038}
6039
6040
6041bool SendMessages(CNode* pto, bool fSendTrickle)
6042{
6043 const Consensus::Params& consensusParams = Params().GetConsensus();
6044 {
6045 // Don't send anything until we get its version message
6046 if (pto->nVersion == 0)
6047 return true;
6048
6049 //
6050 // Message: ping
6051 //
6052 bool pingSend = false;
6053 if (pto->fPingQueued) {
6054 // RPC ping request by user
6055 pingSend = true;
6056 }
6057 if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
6058 // Ping automatically sent as a latency probe & keepalive.
6059 pingSend = true;
6060 }
6061 if (pingSend) {
6062 uint64_t nonce = 0;
6063 while (nonce == 0) {
6064 GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
6065 }
6066 pto->fPingQueued = false;
6067 pto->nPingUsecStart = GetTimeMicros();
6068 if (pto->nVersion > BIP0031_VERSION) {
6069 pto->nPingNonceSent = nonce;
6070 pto->PushMessage("ping", nonce);
6071 } else {
6072 // Peer is too old to support ping command with nonce, pong will never arrive.
6073 pto->nPingNonceSent = 0;
6074 pto->PushMessage("ping");
6075 }
6076 }
6077
6078 TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
6079 if (!lockMain)
6080 return true;
6081
6082 // Address refresh broadcast
6083 static int64_t nLastRebroadcast;
6084 if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60))
6085 {
6086 LOCK(cs_vNodes);
6087 BOOST_FOREACH(CNode* pnode, vNodes)
6088 {
6089 // Periodically clear addrKnown to allow refresh broadcasts
6090 if (nLastRebroadcast)
6091 pnode->addrKnown.reset();
6092
6093 // Rebroadcast our address
6094 AdvertizeLocal(pnode);
6095 }
6096 if (!vNodes.empty())
6097 nLastRebroadcast = GetTime();
6098 }
6099
6100 //
6101 // Message: addr
6102 //
6103 if (fSendTrickle)
6104 {
6105 vector<CAddress> vAddr;
6106 vAddr.reserve(pto->vAddrToSend.size());
6107 BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
6108 {
6109 if (!pto->addrKnown.contains(addr.GetKey()))
6110 {
6111 pto->addrKnown.insert(addr.GetKey());
6112 vAddr.push_back(addr);
6113 // receiver rejects addr messages larger than 1000
6114 if (vAddr.size() >= 1000)
6115 {
6116 pto->PushMessage("addr", vAddr);
6117 vAddr.clear();
6118 }
6119 }
6120 }
6121 pto->vAddrToSend.clear();
6122 if (!vAddr.empty())
6123 pto->PushMessage("addr", vAddr);
6124 }
6125
6126 CNodeState &state = *State(pto->GetId());
6127 if (state.fShouldBan) {
6128 if (pto->fWhitelisted)
6129 LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
6130 else {
6131 pto->fDisconnect = true;
6132 if (pto->addr.IsLocal())
6133 LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
6134 else
6135 {
6136 CNode::Ban(pto->addr);
6137 }
6138 }
6139 state.fShouldBan = false;
6140 }
6141
6142 BOOST_FOREACH(const CBlockReject& reject, state.rejects)
6143 pto->PushMessage("reject", (string)"block", reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
6144 state.rejects.clear();
6145
6146 // Start block sync
6147 if (pindexBestHeader == NULL)
6148 pindexBestHeader = chainActive.Tip();
6149 bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
6150 if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
6151 // Only actively request headers from a single peer, unless we're close to today.
6152 if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
6153 state.fSyncStarted = true;
6154 nSyncStarted++;
6155 CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader;
6156 LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
6157 pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256());
6158 }
6159 }
6160
6161 // Resend wallet transactions that haven't gotten in a block yet
6162 // Except during reindex, importing and IBD, when old wallet
6163 // transactions become unconfirmed and spams other nodes.
6164 if (!fReindex && !fImporting && !IsInitialBlockDownload())
6165 {
6166 GetMainSignals().Broadcast(nTimeBestReceived);
6167 }
6168
6169 //
6170 // Message: inventory
6171 //
6172 vector<CInv> vInv;
6173 vector<CInv> vInvWait;
6174 {
6175 LOCK(pto->cs_inventory);
6176 vInv.reserve(pto->vInventoryToSend.size());
6177 vInvWait.reserve(pto->vInventoryToSend.size());
6178 BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
6179 {
6180 if (pto->setInventoryKnown.count(inv))
6181 continue;
6182
6183 // trickle out tx inv to protect privacy
6184 if (inv.type == MSG_TX && !fSendTrickle)
6185 {
6186 // 1/4 of tx invs blast to all immediately
6187 static uint256 hashSalt;
6188 if (hashSalt.IsNull())
6189 hashSalt = GetRandHash();
6190 uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
6191 hashRand = Hash(BEGIN(hashRand), END(hashRand));
6192 bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
6193
6194 if (fTrickleWait)
6195 {
6196 vInvWait.push_back(inv);
6197 continue;
6198 }
6199 }
6200
6201 // returns true if wasn't already contained in the set
6202 if (pto->setInventoryKnown.insert(inv).second)
6203 {
6204 vInv.push_back(inv);
6205 if (vInv.size() >= 1000)
6206 {
6207 pto->PushMessage("inv", vInv);
6208 vInv.clear();
6209 }
6210 }
6211 }
6212 pto->vInventoryToSend = vInvWait;
6213 }
6214 if (!vInv.empty())
6215 pto->PushMessage("inv", vInv);
6216
6217 // Detect whether we're stalling
6218 int64_t nNow = GetTimeMicros();
6219 if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
6220 // Stalling only triggers when the block download window cannot move. During normal steady state,
6221 // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
6222 // should only happen during initial block download.
6223 LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
6224 pto->fDisconnect = true;
6225 }
6226 // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval
6227 // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to
6228 // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
6229 // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
6230 // to unreasonably increase our timeout.
6231 // We also compare the block download timeout originally calculated against the time at which we'd disconnect
6232 // if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're
6233 // only looking at this peer's oldest request). This way a large queue in the past doesn't result in a
6234 // permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing
6235 // more quickly than once every 5 minutes, then we'll shorten the download window for this block).
6236 if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) {
6237 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
6238 int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams);
6239 if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) {
6240 LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow);
6241 queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow;
6242 }
6243 if (queuedBlock.nTimeDisconnect < nNow) {
6244 LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
6245 pto->fDisconnect = true;
6246 }
6247 }
6248
6249 //
6250 // Message: getdata (blocks)
6251 //
6252 vector<CInv> vGetData;
6253 if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
6254 vector<CBlockIndex*> vToDownload;
6255 NodeId staller = -1;
6256 FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
6257 BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
6258 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
6259 MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
6260 LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
6261 pindex->nHeight, pto->id);
6262 }
6263 if (state.nBlocksInFlight == 0 && staller != -1) {
6264 if (State(staller)->nStallingSince == 0) {
6265 State(staller)->nStallingSince = nNow;
6266 LogPrint("net", "Stall started peer=%d\n", staller);
6267 }
6268 }
6269 }
6270
6271 //
6272 // Message: getdata (non-blocks)
6273 //
6274 while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
6275 {
6276 const CInv& inv = (*pto->mapAskFor.begin()).second;
6277 if (!AlreadyHave(inv))
6278 {
6279 if (fDebug)
6280 LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
6281 vGetData.push_back(inv);
6282 if (vGetData.size() >= 1000)
6283 {
6284 pto->PushMessage("getdata", vGetData);
6285 vGetData.clear();
6286 }
6287 } else {
6288 //If we're not going to ask, don't expect a response.
6289 pto->setAskFor.erase(inv.hash);
6290 }
6291 pto->mapAskFor.erase(pto->mapAskFor.begin());
6292 }
6293 if (!vGetData.empty())
6294 pto->PushMessage("getdata", vGetData);
6295
6296 }
6297 return true;
6298}
6299
6300 std::string CBlockFileInfo::ToString() const {
6301 return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
6302 }
6303
6304
6305
6306class CMainCleanup
6307{
6308public:
6309 CMainCleanup() {}
6310 ~CMainCleanup() {
6311 // block headers
6312 BlockMap::iterator it1 = mapBlockIndex.begin();
6313 for (; it1 != mapBlockIndex.end(); it1++)
6314 delete (*it1).second;
6315 mapBlockIndex.clear();
6316
6317 // orphan transactions
6318 mapOrphanTransactions.clear();
6319 mapOrphanTransactionsByPrev.clear();
6320 }
6321} instance_of_cmaincleanup;
6322
6323extern "C" const char* getDataDir()
6324{
6325 return GetDataDir().string().c_str();
6326}
6327
6328
6329// Set default values of new CMutableTransaction based on consensus rules at given height.
6330CMutableTransaction CreateNewContextualCMutableTransaction(const Consensus::Params& consensusParams, int nHeight)
6331{
6332 CMutableTransaction mtx;
6333
6334 bool isOverwintered = NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_OVERWINTER);
6335 if (isOverwintered) {
6336 mtx.fOverwintered = true;
6337 mtx.nVersionGroupId = OVERWINTER_VERSION_GROUP_ID;
6338 mtx.nVersion = 3;
6339 // Expiry height is not set. Only fields required for a parser to treat as a valid Overwinter V3 tx.
6340
6341 // TODO: In future, when moving from Overwinter to Sapling, it will be useful
6342 // to set the expiry height to: min(activation_height - 1, default_expiry_height)
6343 }
6344 return mtx;
6345}
This page took 0.110562 seconds and 4 git commands to generate.