#include "wallet/asyncrpcoperation_sendmany.h"
#include "wallet/asyncrpcoperation_shieldcoinbase.h"
+#include <cstring>
+#include <algorithm>
+#include <atomic>
#include <sstream>
+#include <map>
+#include <unordered_map>
+#include <vector>
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
# error "Zcash cannot be compiled without assertions."
#endif
+#include "librustzcash.h"
/**
* Global state
BlockMap mapBlockIndex;
CChain chainActive;
CBlockIndex *pindexBestHeader = NULL;
-int64_t nTimeBestReceived = 0;
+static int64_t nTimeBestReceived = 0;
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
bool fAlerts = DEFAULT_ALERTS;
+/* If the tip is older than this (in seconds), the node is considered to be in initial block download.
+ */
+int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
unsigned int expiryDelta = DEFAULT_TX_EXPIRY_DELTA;
{
bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
// First sort by most total work, ...
- if (pa->nChainWork > pb->nChainWork) return false;
- if (pa->nChainWork < pb->nChainWork) return true;
+ if (pa->chainPower > pb->chainPower) return false;
+ if (pa->chainPower < pb->chainPower) return true;
// ... then by earliest time received, ...
if (pa->nSequenceId < pb->nSequenceId) return false;
* missing the data for the block.
*/
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
+
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0;
+
/** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
*/
int GetHeight()
{
- LOCK(cs_main);
- return chainActive.Height();
+ return chainActive.LastTip()->GetHeight();
}
void UpdatePreferredDownload(CNode* node, CNodeState* state)
if (!state->hashLastUnknownBlock.IsNull()) {
BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
- if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0)
+ if (itOld != mapBlockIndex.end() && itOld->second != 0 && (itOld->second->chainPower > CChainPower()))
{
- if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
+ if (state->pindexBestKnownBlock == NULL || itOld->second->chainPower >= state->pindexBestKnownBlock->chainPower)
state->pindexBestKnownBlock = itOld->second;
state->hashLastUnknownBlock.SetNull();
}
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non-NULL. */
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
- if (pa->nHeight > pb->nHeight) {
- pa = pa->GetAncestor(pb->nHeight);
- } else if (pb->nHeight > pa->nHeight) {
- pb = pb->GetAncestor(pa->nHeight);
+ if (pa->GetHeight() > pb->GetHeight()) {
+ pa = pa->GetAncestor(pb->GetHeight());
+ } else if (pb->GetHeight() > pa->GetHeight()) {
+ pb = pb->GetAncestor(pa->GetHeight());
}
while (pa != pb && pa && pb) {
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
- if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
+ if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->chainPower < chainActive.Tip()->chainPower) {
// This peer has nothing interesting.
return;
}
if (state->pindexLastCommonBlock == NULL) {
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
- state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
+ state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->GetHeight(), chainActive.Height())];
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
- int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
- int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
+ int nWindowEnd = state->pindexLastCommonBlock->GetHeight() + BLOCK_DOWNLOAD_WINDOW;
+ int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->GetHeight(), nWindowEnd + 1);
NodeId waitingfor = -1;
- while (pindexWalk->nHeight < nMaxHeight) {
+ while (pindexWalk->GetHeight() < nMaxHeight) {
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
// as iterating over ~100 CBlockIndex* entries anyway.
- int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
+ int nToFetch = std::min(nMaxHeight - pindexWalk->GetHeight(), std::max<int>(count - vBlocks.size(), 128));
vToFetch.resize(nToFetch);
- pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
+ pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->GetHeight() + nToFetch);
vToFetch[nToFetch - 1] = pindexWalk;
for (unsigned int i = nToFetch - 1; i > 0; i--) {
vToFetch[i - 1] = vToFetch[i]->pprev;
state->pindexLastCommonBlock = pindex;
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
// The block is not already downloaded, and not yet in flight.
- if (pindex->nHeight > nWindowEnd) {
+ if (pindex->GetHeight() > nWindowEnd) {
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != nodeid) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
if (state == NULL)
return false;
stats.nMisbehavior = state->nMisbehavior;
- stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
- stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
+ stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->GetHeight() : -1;
+ stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->GetHeight() : -1;
BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
if (queue.pindex)
- stats.vHeightInFlight.push_back(queue.pindex->nHeight);
+ stats.vHeightInFlight.push_back(queue.pindex->GetHeight());
}
return true;
}
if (fAddressIndex) {
if ( pblocktree != 0 ) {
- result = pblocktree->Snapshot(top);
+ result = pblocktree->Snapshot(top);
} else {
- fprintf(stderr,"null pblocktree start with -addressindex=true\n");
+ fprintf(stderr,"null pblocktree start with -addressindex=1\n");
}
} else {
- fprintf(stderr,"getsnapshot requires -addressindex=true\n");
+ fprintf(stderr,"getsnapshot requires -addressindex=1\n");
}
return(result);
}
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
- unsigned int sz = tx.GetSerializeSize(SER_NETWORK, tx.nVersion);
+ unsigned int sz = GetSerializeSize(tx, SER_NETWORK, tx.nVersion);
if (sz > 5000)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
bool IsStandardTx(const CTransaction& tx, string& reason, const int nHeight)
{
- bool isOverwinter = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
-
- if (isOverwinter) {
+ bool overwinterActive = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
+ bool saplingActive = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_SAPLING);
+
+ if (saplingActive) {
+ // Sapling standard rules apply
+ if (tx.nVersion > CTransaction::SAPLING_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SAPLING_MIN_CURRENT_VERSION) {
+ reason = "sapling-version";
+ return false;
+ }
+ } else if (overwinterActive) {
// Overwinter standard rules apply
if (tx.nVersion > CTransaction::OVERWINTER_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::OVERWINTER_MIN_CURRENT_VERSION) {
reason = "overwinter-version";
if (whichType == TX_NULL_DATA)
{
+ if ( txout.scriptPubKey.size() > IGUANA_MAXSCRIPTSIZE )
+ {
+ reason = "opreturn too big";
+ return(false);
+ }
nDataOut++;
//fprintf(stderr,"is OP_RETURN\n");
}
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
+ //printf("Previous script: %s\n", prevScript.ToString().c_str());
+
if (!Solver(prevScript, whichType, vSolutions))
return false;
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
// IsStandardTx() will have already returned false
// and this method isn't called.
vector<vector<unsigned char> > stack;
+ //printf("Checking script: %s\n", tx.vin[i].scriptSig.ToString().c_str());
if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), consensusBranchId))
return false;
return nSigOps;
}
+/**
+ * Ensure that a coinbase transaction is structured according to the consensus rules of the
+ * chain
+ */
+bool ContextualCheckCoinbaseTransaction(const CTransaction& tx, const int nHeight)
+{
+ // if time locks are on, ensure that this coin base is time locked exactly as it should be
+ if (((uint64_t)(tx.GetValueOut()) >= ASSETCHAINS_TIMELOCKGTE) ||
+ (((nHeight >= 31680) || strcmp(ASSETCHAINS_SYMBOL, "VRSC") != 0) && komodo_ac_block_subsidy(nHeight) >= ASSETCHAINS_TIMELOCKGTE))
+ {
+ CScriptID scriptHash;
+
+ // to be valid, it must be a P2SH transaction and have an op_return in vout[1] that
+ // holds the full output script, which may include multisig, etc., but starts with
+ // the time lock verify of the correct time lock for this block height
+ if (tx.vout.size() == 2 &&
+ CScriptExt(tx.vout[0].scriptPubKey).IsPayToScriptHash(&scriptHash) &&
+ tx.vout[1].scriptPubKey.size() >= 7 && // minimum for any possible future to prevent out of bounds
+ tx.vout[1].scriptPubKey[0] == OP_RETURN)
+ {
+ opcodetype op;
+ std::vector<uint8_t> opretData = std::vector<uint8_t>();
+ CScript::const_iterator it = tx.vout[1].scriptPubKey.begin() + 1;
+ if (tx.vout[1].scriptPubKey.GetOp2(it, op, &opretData))
+ {
+ if (opretData.size() > 0 && opretData.data()[0] == OPRETTYPE_TIMELOCK)
+ {
+ int64_t unlocktime;
+ CScriptExt opretScript = CScriptExt(&opretData[1], &opretData[opretData.size()]);
+
+ if (CScriptID(opretScript) == scriptHash &&
+ opretScript.IsCheckLockTimeVerify(&unlocktime) &&
+ komodo_block_unlocktime(nHeight) == unlocktime)
+ {
+ return(true);
+ }
+ }
+ }
+ }
+ return(false);
+ }
+ return(true);
+}
+
/**
* Check a transaction contextually against a set of consensus rules valid at a given block height.
*
* 1. AcceptToMemoryPool calls CheckTransaction and this function.
* 2. ProcessNewBlock calls AcceptBlock, which calls CheckBlock (which calls CheckTransaction)
* and ContextualCheckBlock (which calls this function).
+ * 3. The isInitBlockDownload argument is only to assist with testing.
*/
-bool ContextualCheckTransaction(const CTransaction& tx, CValidationState &state, const int nHeight, const int dosLevel)
+bool ContextualCheckTransaction(
+ const CTransaction& tx,
+ CValidationState &state,
+ const int nHeight,
+ const int dosLevel,
+ bool (*isInitBlockDownload)())
{
- bool isOverwinter = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
- bool isSprout = !isOverwinter;
-
+ bool overwinterActive = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
+ bool saplingActive = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_SAPLING);
+ bool isSprout = !overwinterActive;
+
// If Sprout rules apply, reject transactions which are intended for Overwinter and beyond
if (isSprout && tx.fOverwintered) {
- return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter is not active yet"),
+ return state.DoS(isInitBlockDownload() ? 0 : dosLevel,
+ error("ContextualCheckTransaction(): ht.%d activates.%d dosLevel.%d overwinter is not active yet",
+ nHeight, Params().GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nActivationHeight, dosLevel),
REJECT_INVALID, "tx-overwinter-not-active");
}
-
- // If Overwinter rules apply:
- if (isOverwinter) {
+
+ if (saplingActive) {
+ // Reject transactions with valid version but missing overwintered flag
+ if (tx.nVersion >= SAPLING_MIN_TX_VERSION && !tx.fOverwintered) {
+ return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwintered flag must be set"),
+ REJECT_INVALID, "tx-overwintered-flag-not-set");
+ }
+
+ // Reject transactions with non-Sapling version group ID
+ if (tx.fOverwintered && tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) {
+ return state.DoS(dosLevel, error("CheckTransaction(): invalid Sapling tx version"),
+ REJECT_INVALID, "bad-sapling-tx-version-group-id");
+ }
+
+ // Reject transactions with invalid version
+ if (tx.fOverwintered && tx.nVersion < SAPLING_MIN_TX_VERSION ) {
+ return state.DoS(100, error("CheckTransaction(): Sapling version too low"),
+ REJECT_INVALID, "bad-tx-sapling-version-too-low");
+ }
+
+ // Reject transactions with invalid version
+ if (tx.fOverwintered && tx.nVersion > SAPLING_MAX_TX_VERSION ) {
+ return state.DoS(100, error("CheckTransaction(): Sapling version too high"),
+ REJECT_INVALID, "bad-tx-sapling-version-too-high");
+ }
+ } else if (overwinterActive) {
// Reject transactions with valid version but missing overwinter flag
if (tx.nVersion >= OVERWINTER_MIN_TX_VERSION && !tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter flag must be set"),
REJECT_INVALID, "tx-overwinter-flag-not-set");
}
-
+
+ // Reject transactions with non-Overwinter version group ID
+ if (tx.fOverwintered && tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) {
+ return state.DoS(dosLevel, error("CheckTransaction(): invalid Overwinter tx version"),
+ REJECT_INVALID, "bad-overwinter-tx-version-group-id");
+ }
+
// Reject transactions with invalid version
if (tx.fOverwintered && tx.nVersion > OVERWINTER_MAX_TX_VERSION ) {
return state.DoS(100, error("CheckTransaction(): overwinter version too high"),
REJECT_INVALID, "bad-tx-overwinter-version-too-high");
}
-
+ }
+
+ // Rules that apply to Overwinter or later:
+ if (overwinterActive) {
// Reject transactions intended for Sprout
if (!tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction: overwinter is active"),
// Check that all transactions are unexpired
if (IsExpiredTx(tx, nHeight)) {
- return state.DoS(dosLevel, error("ContextualCheckTransaction(): transaction is expired"), REJECT_INVALID, "tx-overwinter-expired");
+ // Don't increase banscore if the transaction only just expired
+ int expiredDosLevel = IsExpiredTx(tx, nHeight - 1) ? (dosLevel > 10 ? dosLevel : 10) : 0;
+ return state.DoS(expiredDosLevel, error("ContextualCheckTransaction(): transaction is expired"), REJECT_INVALID, "tx-overwinter-expired");
}
}
- if (!(tx.IsMint() || tx.vjoinsplit.empty())) {
+ // Rules that apply before Sapling:
+ if (!saplingActive) {
+ // Size limits
+ BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE > MAX_TX_SIZE_BEFORE_SAPLING); // sanity
+ if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_BEFORE_SAPLING)
+ return state.DoS(100, error("ContextualCheckTransaction(): size limits failed"),
+ REJECT_INVALID, "bad-txns-oversize");
+ }
+
+ uint256 dataToBeSigned;
+
+ if (!tx.IsMint() &&
+ (!tx.vjoinsplit.empty() ||
+ !tx.vShieldedSpend.empty() ||
+ !tx.vShieldedOutput.empty()))
+ {
auto consensusBranchId = CurrentEpochBranchId(nHeight, Params().GetConsensus());
// Empty output script.
CScript scriptCode;
- uint256 dataToBeSigned;
try {
dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL, 0, consensusBranchId);
} catch (std::logic_error ex) {
REJECT_INVALID, "error-computing-signature-hash");
}
+ }
+
+ if (!(tx.IsMint() || tx.vjoinsplit.empty()))
+ {
BOOST_STATIC_ASSERT(crypto_sign_PUBLICKEYBYTES == 32);
// We rely on libsodium to check that the signature is canonical.
dataToBeSigned.begin(), 32,
tx.joinSplitPubKey.begin()
) != 0) {
- return state.DoS(100, error("CheckTransaction(): invalid joinsplit signature"),
- REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
+ return state.DoS(isInitBlockDownload() ? 0 : 100,
+ error("CheckTransaction(): invalid joinsplit signature"),
+ REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
+ }
+ }
+
+ if (tx.IsCoinBase())
+ {
+ if (!ContextualCheckCoinbaseTransaction(tx, nHeight))
+ return state.DoS(100, error("CheckTransaction(): invalid script data for coinbase time lock"),
+ REJECT_INVALID, "bad-txns-invalid-script-data-for-coinbase-time-lock");
+ }
+
+ if (!tx.vShieldedSpend.empty() ||
+ !tx.vShieldedOutput.empty())
+ {
+ auto ctx = librustzcash_sapling_verification_ctx_init();
+
+ for (const SpendDescription &spend : tx.vShieldedSpend) {
+ if (!librustzcash_sapling_check_spend(
+ ctx,
+ spend.cv.begin(),
+ spend.anchor.begin(),
+ spend.nullifier.begin(),
+ spend.rk.begin(),
+ spend.zkproof.begin(),
+ spend.spendAuthSig.begin(),
+ dataToBeSigned.begin()
+ ))
+ {
+ librustzcash_sapling_verification_ctx_free(ctx);
+ return state.DoS(100, error("ContextualCheckTransaction(): Sapling spend description invalid"),
+ REJECT_INVALID, "bad-txns-sapling-spend-description-invalid");
+ }
+ }
+
+ for (const OutputDescription &output : tx.vShieldedOutput) {
+ if (!librustzcash_sapling_check_output(
+ ctx,
+ output.cv.begin(),
+ output.cm.begin(),
+ output.ephemeralKey.begin(),
+ output.zkproof.begin()
+ ))
+ {
+ librustzcash_sapling_verification_ctx_free(ctx);
+ return state.DoS(100, error("ContextualCheckTransaction(): Sapling output description invalid"),
+ REJECT_INVALID, "bad-txns-sapling-output-description-invalid");
+ }
+ }
+
+ if (!librustzcash_sapling_final_check(
+ ctx,
+ tx.valueBalance,
+ tx.bindingSig.begin(),
+ dataToBeSigned.begin()
+ ))
+ {
+ librustzcash_sapling_verification_ctx_free(ctx);
+ return state.DoS(100, error("ContextualCheckTransaction(): Sapling binding signature invalid"),
+ REJECT_INVALID, "bad-txns-sapling-binding-signature-invalid");
}
+
+ librustzcash_sapling_verification_ctx_free(ctx);
}
return true;
}
{
static uint32_t counter;
if ( counter++ < 100 )
- printf("MEMPOOL: banned tx.%d being used at ht.%d vout.%d\n",k,(int32_t)chainActive.Tip()->nHeight,j);
+ printf("MEMPOOL: banned tx.%d being used at ht.%d vout.%d\n",k,(int32_t)chainActive.Tip()->GetHeight(),j);
return(false);
}
}
return state.DoS(100, error("CheckTransaction(): overwinter version too low"),
REJECT_INVALID, "bad-tx-overwinter-version-too-low");
}
- if (tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) {
+ if (tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID &&
+ tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) {
return state.DoS(100, error("CheckTransaction(): unknown tx version group id"),
REJECT_INVALID, "bad-tx-version-group-id");
}
REJECT_INVALID, "bad-tx-expiry-height-too-high");
}
}
-
- // Transactions can contain empty `vin` and `vout` so long as
- // `vjoinsplit` is non-empty.
- // Migrations may also have empty `vin`
- if (tx.vin.empty() && tx.vjoinsplit.empty())
+
+ // Transactions containing empty `vin` must have either non-empty
+ // `vjoinsplit` or non-empty `vShieldedSpend`.
+ if (tx.vin.empty() && tx.vjoinsplit.empty() && tx.vShieldedSpend.empty())
return state.DoS(10, error("CheckTransaction(): vin empty"),
REJECT_INVALID, "bad-txns-vin-empty");
- if (tx.vout.empty() && tx.vjoinsplit.empty())
+ // Transactions containing empty `vout` must have either non-empty
+ // `vjoinsplit` or non-empty `vShieldedOutput`.
+ if (tx.vout.empty() && tx.vjoinsplit.empty() && tx.vShieldedOutput.empty())
return state.DoS(10, error("CheckTransaction(): vout empty"),
REJECT_INVALID, "bad-txns-vout-empty");
// Size limits
- BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE > MAX_TX_SIZE); // sanity
- if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE)
+ BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE >= MAX_TX_SIZE_AFTER_SAPLING); // sanity
+ BOOST_STATIC_ASSERT(MAX_TX_SIZE_AFTER_SAPLING > MAX_TX_SIZE_BEFORE_SAPLING); // sanity
+ if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_AFTER_SAPLING)
return state.DoS(100, error("CheckTransaction(): size limits failed"),
REJECT_INVALID, "bad-txns-oversize");
if ( ASSETCHAINS_PRIVATE != 0 )
{
fprintf(stderr,"private chain nValue %.8f iscoinbase.%d\n",(double)txout.nValue/COIN,iscoinbase);
- if ( (txout.nValue > 0 && iscoinbase == 0) || tx.GetJoinSplitValueOut() > 0 )
+ if ( (txout.nValue > 0 && iscoinbase == 0) || tx.GetValueOut() > 0 )
return state.DoS(100, error("CheckTransaction(): this is a private chain, no public allowed"),REJECT_INVALID, "bad-txns-acprivacy-chain");
}
if ( txout.scriptPubKey.size() > IGUANA_MAXSCRIPTSIZE )
return state.DoS(100, error("CheckTransaction(): txout total out of range"),
REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
-
+
+ // Check for non-zero valueBalance when there are no Sapling inputs or outputs
+ if (tx.vShieldedSpend.empty() && tx.vShieldedOutput.empty() && tx.valueBalance != 0) {
+ return state.DoS(100, error("CheckTransaction(): tx.valueBalance has no sources or sinks"),
+ REJECT_INVALID, "bad-txns-valuebalance-nonzero");
+ }
+
+ // Check for overflow valueBalance
+ if (tx.valueBalance > MAX_MONEY || tx.valueBalance < -MAX_MONEY) {
+ return state.DoS(100, error("CheckTransaction(): abs(tx.valueBalance) too large"),
+ REJECT_INVALID, "bad-txns-valuebalance-toolarge");
+ }
+
+ if (tx.valueBalance <= 0) {
+ // NB: negative valueBalance "takes" money from the transparent value pool just as outputs do
+ nValueOut += -tx.valueBalance;
+
+ if (!MoneyRange(nValueOut)) {
+ return state.DoS(100, error("CheckTransaction(): txout total out of range"),
+ REJECT_INVALID, "bad-txns-txouttotal-toolarge");
+ }
+ }
+
// Ensure that joinsplit values are well-formed
BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
{
REJECT_INVALID, "bad-txns-txintotal-toolarge");
}
}
+
+ // Also check for Sapling
+ if (tx.valueBalance >= 0) {
+ // NB: positive valueBalance "adds" money to the transparent value pool, just as inputs do
+ nValueIn += tx.valueBalance;
+
+ if (!MoneyRange(nValueIn)) {
+ return state.DoS(100, error("CheckTransaction(): txin total out of range"),
+ REJECT_INVALID, "bad-txns-txintotal-toolarge");
+ }
+ }
}
-
-
+
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
}
// Check for duplicate joinsplit nullifiers in this transaction
- set<uint256> vJoinSplitNullifiers;
- BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
{
- BOOST_FOREACH(const uint256& nf, joinsplit.nullifiers)
+ set<uint256> vJoinSplitNullifiers;
+ BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
+ {
+ BOOST_FOREACH(const uint256& nf, joinsplit.nullifiers)
+ {
+ if (vJoinSplitNullifiers.count(nf))
+ return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"),
+ REJECT_INVALID, "bad-joinsplits-nullifiers-duplicate");
+
+ vJoinSplitNullifiers.insert(nf);
+ }
+ }
+ }
+
+ // Check for duplicate sapling nullifiers in this transaction
+ {
+ set<uint256> vSaplingNullifiers;
+ BOOST_FOREACH(const SpendDescription& spend_desc, tx.vShieldedSpend)
{
- if (vJoinSplitNullifiers.count(nf))
+ if (vSaplingNullifiers.count(spend_desc.nullifier))
return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"),
- REJECT_INVALID, "bad-joinsplits-nullifiers-duplicate");
-
- vJoinSplitNullifiers.insert(nf);
+ REJECT_INVALID, "bad-spend-description-nullifiers-duplicate");
+
+ vSaplingNullifiers.insert(spend_desc.nullifier);
}
}
if (tx.vjoinsplit.size() > 0)
return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"),
REJECT_INVALID, "bad-cb-has-joinsplits");
-
+
+ // A coinbase transaction cannot have spend descriptions or output descriptions
+ if (tx.vShieldedSpend.size() > 0)
+ return state.DoS(100, error("CheckTransaction(): coinbase has spend descriptions"),
+ REJECT_INVALID, "bad-cb-has-spend-description");
+ if (tx.vShieldedOutput.size() > 0)
+ return state.DoS(100, error("CheckTransaction(): coinbase has output descriptions"),
+ REJECT_INVALID, "bad-cb-has-output-description");
+
if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
return state.DoS(100, error("CheckTransaction(): coinbase script size"),
REJECT_INVALID, "bad-cb-length");
}
-bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,bool* pfMissingInputs, bool fRejectAbsurdFee)
+bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,bool* pfMissingInputs, bool fRejectAbsurdFee, int dosLevel)
{
AssertLockHeld(cs_main);
if (pfMissingInputs)
// Node operator can choose to reject tx by number of transparent inputs
static_assert(std::numeric_limits<size_t>::max() >= std::numeric_limits<int64_t>::max(), "size_t too small");
size_t limit = (size_t) GetArg("-mempooltxinputlimit", 0);
+ if (NetworkUpgradeActive(nextBlockHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER)) {
+ limit = 0;
+ }
if (limit > 0) {
size_t n = tx.vin.size();
if (n > limit) {
return false;
}
}
-
+
auto verifier = libzcash::ProofVerifier::Strict();
- if ( komodo_validate_interest(tx,chainActive.LastTip()->nHeight+1,chainActive.LastTip()->GetMedianTimePast() + 777,0) < 0 )
+ if ( komodo_validate_interest(tx,chainActive.LastTip()->GetHeight()+1,chainActive.LastTip()->GetMedianTimePast() + 777,0) < 0 )
{
//fprintf(stderr,"AcceptToMemoryPool komodo_validate_interest failure\n");
return error("AcceptToMemoryPool: komodo_validate_interest failed");
}
// DoS level set to 10 to be more forgiving.
// Check transaction contextually against the set of consensus rules which apply in the next block to be mined.
- if (!ContextualCheckTransaction(tx, state, nextBlockHeight, 10))
+ if (!ContextualCheckTransaction(tx, state, nextBlockHeight, (dosLevel == -1) ? 10 : dosLevel))
{
return error("AcceptToMemoryPool: ContextualCheckTransaction failed");
}
-
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
{
string reason;
if (Params().RequireStandard() && !IsStandardTx(tx, reason, nextBlockHeight))
{
- //fprintf(stderr,"AcceptToMemoryPool reject nonstandard transaction: %s\n",reason.c_str());
+ //
+ //fprintf(stderr,"AcceptToMemoryPool reject nonstandard transaction: %s\nscriptPubKey: %s\n",reason.c_str(),tx.vout[0].scriptPubKey.ToString().c_str());
return state.DoS(0,error("AcceptToMemoryPool: nonstandard transaction: %s", reason),REJECT_NONSTANDARD, reason);
}
// Only accept nLockTime-using transactions that can be mined in the next
//fprintf(stderr,"AcceptToMemoryPool reject non-final\n");
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
}
+
+ // if this is a valid stake transaction, don't put it in the mempool
+ CStakeParams p;
+ if (ValidateStakeTransaction(tx, p, false))
+ {
+ return state.DoS(0, false, REJECT_INVALID, "staking");
+ }
+
// is it already in the memory pool?
uint256 hash = tx.GetHash();
if (pool.exists(hash))
COutPoint outpoint = tx.vin[i].prevout;
if (pool.mapNextTx.count(outpoint))
{
- //static uint32_t counter;
// Disable replacement feature for now
- //if ( counter++ < 100 )
- //fprintf(stderr,"Disable replacement feature for now\n");
return false;
}
}
- BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit)
- {
- BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers)
- {
- if (pool.mapNullifiers.count(nf))
- {
+ BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
+ BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
+ if (pool.nullifierExists(nf, SPROUT)) {
fprintf(stderr,"pool.mapNullifiers.count\n");
return false;
}
}
}
+ for (const SpendDescription &spendDescription : tx.vShieldedSpend) {
+ if (pool.nullifierExists(spendDescription.nullifier, SAPLING)) {
+ return false;
+ }
+ }
}
-
+
{
CCoinsView dummy;
CCoinsViewCache view(&dummy);
if (pfMissingInputs)
*pfMissingInputs = true;
//fprintf(stderr,"missing inputs\n");
- return false;
+ return state.DoS(0, error("AcceptToMemoryPool: tx inputs not found"),REJECT_INVALID, "bad-txns-inputs-missing");
}
}
// Bring the best block into scope
view.GetBestBlock();
- nValueIn = view.GetValueIn(chainActive.LastTip()->nHeight,&interest,tx,chainActive.LastTip()->nTime);
+ nValueIn = view.GetValueIn(chainActive.LastTip()->GetHeight(),&interest,tx,chainActive.LastTip()->nTime);
if ( 0 && interest != 0 )
fprintf(stderr,"add interest %.8f\n",(double)interest/COIN);
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
if (nSigOps > MAX_STANDARD_TX_SIGOPS)
{
fprintf(stderr,"accept failure.4\n");
- return state.DoS(0, error("AcceptToMemoryPool: too many sigops %s, %d > %d", hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS),REJECT_NONSTANDARD, "bad-txns-too-many-sigops");
+ return state.DoS(1, error("AcceptToMemoryPool: too many sigops %s, %d > %d", hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS),REJECT_NONSTANDARD, "bad-txns-too-many-sigops");
}
CAmount nValueOut = tx.GetValueOut();
LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
-
- if (fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000 && nFees > nValueOut/19 )
+
+ if (fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000 && nFees > nValueOut/19)
{
- fprintf(stderr,"accept failure.8\n");
- return error("AcceptToMemoryPool: absurdly high fees %s, %d > %d",hash.ToString(), nFees, ::minRelayTxFee.GetFee(nSize) * 10000);
+ string errmsg = strprintf("absurdly high fees %s, %d > %d",
+ hash.ToString(),
+ nFees, ::minRelayTxFee.GetFee(nSize) * 10000);
+ LogPrint("mempool", errmsg.c_str());
+ return state.Error("AcceptToMemoryPool: " + errmsg);
}
-
+
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata(tx);
if ( KOMODO_CONNECTING <= 0 && chainActive.LastTip() != 0 )
{
flag = 1;
- KOMODO_CONNECTING = (1<<30) + (int32_t)chainActive.LastTip()->nHeight + 1;
+ KOMODO_CONNECTING = (1<<30) + (int32_t)chainActive.LastTip()->GetHeight() + 1;
}
if (!ContextualCheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId))
{
KOMODO_ON_DEMAND++;
pool.addUnchecked(hash, entry, !IsInitialBlockDownload());
- // Add memory address index
- if (fAddressIndex) {
- pool.addAddressIndex(entry, view);
- }
+ if (!tx.IsCoinImport())
+ {
+ // Add memory address index
+ if (fAddressIndex) {
+ pool.addAddressIndex(entry, view);
+ }
- // Add memory spent index
- if (fSpentIndex) {
- pool.addSpentIndex(entry, view);
+ // Add memory spent index
+ if (fSpentIndex) {
+ pool.addSpentIndex(entry, view);
+ }
}
}
else return(coins.vout[n].nValue);
}*/
-bool myAddtomempool(CTransaction &tx)
+bool myAddtomempool(CTransaction &tx, CValidationState *pstate)
{
- CValidationState state; CTransaction Ltx; bool fMissingInputs,fOverrideFees = false;
+ CValidationState state;
+ if (!pstate)
+ pstate = &state;
+ CTransaction Ltx; bool fMissingInputs,fOverrideFees = false;
if ( mempool.lookup(tx.GetHash(),Ltx) == 0 )
- return(AcceptToMemoryPool(mempool, state, tx, false, &fMissingInputs, !fOverrideFees));
+ return(AcceptToMemoryPool(mempool, *pstate, tx, false, &fMissingInputs, !fOverrideFees));
else return(true);
}
return error("WriteBlockToDisk: OpenBlockFile failed");
// Write index header
- unsigned int nSize = fileout.GetSerializeSize(block);
+ unsigned int nSize = GetSerializeSize(fileout, block);
fileout << FLATDATA(messageStart) << nSize;
// Write block
if ( 0 && checkPOW != 0 )
{
komodo_block2pubkey33(pubkey33,(CBlock *)&block);
- if (!(CheckEquihashSolution(&block, Params()) && CheckProofOfWork(height,pubkey33,block.GetHash(), block.nBits, Params().GetConsensus(),block.nTime)))
+ if (!(CheckEquihashSolution(&block, Params()) && CheckProofOfWork(block, pubkey33, height, Params().GetConsensus())))
{
int32_t i; for (i=0; i<33; i++)
fprintf(stderr,"%02x",pubkey33[i]);
{
if ( pindex == 0 )
return false;
- if (!ReadBlockFromDisk(pindex->nHeight,block, pindex->GetBlockPos(),checkPOW))
+ if (!ReadBlockFromDisk(pindex->GetHeight(),block, pindex->GetBlockPos(),checkPOW))
return false;
if (block.GetHash() != pindex->GetBlockHash())
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
//uint64_t komodo_moneysupply(int32_t height);
extern char ASSETCHAINS_SYMBOL[KOMODO_ASSETCHAIN_MAXLEN];
+extern uint64_t ASSETCHAINS_ENDSUBSIDY[ASSETCHAINS_MAX_ERAS], ASSETCHAINS_REWARD[ASSETCHAINS_MAX_ERAS], ASSETCHAINS_HALVING[ASSETCHAINS_MAX_ERAS];
extern uint32_t ASSETCHAINS_MAGIC;
-extern uint64_t ASSETCHAINS_STAKED,ASSETCHAINS_ENDSUBSIDY,ASSETCHAINS_REWARD,ASSETCHAINS_HALVING,ASSETCHAINS_LINEAR,ASSETCHAINS_COMMISSION,ASSETCHAINS_SUPPLY;
+extern uint64_t ASSETCHAINS_STAKED,ASSETCHAINS_LINEAR,ASSETCHAINS_COMMISSION,ASSETCHAINS_SUPPLY;
extern uint8_t ASSETCHAINS_PUBLIC,ASSETCHAINS_PRIVATE;
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
{
- static uint64_t cached_subsidy; static int32_t cached_numhalvings;
int32_t numhalvings,i; uint64_t numerator; CAmount nSubsidy = 3 * COIN;
if ( ASSETCHAINS_SYMBOL[0] == 0 )
{
}
else
{
- if ( nHeight == 1 )
- {
- if ( ASSETCHAINS_STAKED == 0 || strcmp("VRSC",ASSETCHAINS_SYMBOL) == 0 )
- return(ASSETCHAINS_SUPPLY * COIN + (ASSETCHAINS_MAGIC & 0xffffff));
- else return(ASSETCHAINS_SUPPLY * COIN + ASSETCHAINS_MAGIC);
- }
- else if ( ASSETCHAINS_ENDSUBSIDY == 0 || nHeight < ASSETCHAINS_ENDSUBSIDY )
- {
- if ( ASSETCHAINS_REWARD == 0 )
- return(10000);
- else if ( ASSETCHAINS_ENDSUBSIDY != 0 && nHeight >= ASSETCHAINS_ENDSUBSIDY )
- return(0);
- else
- {
- nSubsidy = ASSETCHAINS_REWARD;
- if ( ASSETCHAINS_HALVING != 0 )
- {
- if ( (numhalvings= (nHeight / ASSETCHAINS_HALVING)) > 0 )
- {
- if ( numhalvings >= 64 && ASSETCHAINS_DECAY == 0 )
- return(0);
- if ( ASSETCHAINS_DECAY == 0 )
- nSubsidy >>= numhalvings;
- else if ( ASSETCHAINS_DECAY == 100000000 && ASSETCHAINS_ENDSUBSIDY != 0 )
- {
- numerator = (ASSETCHAINS_ENDSUBSIDY - nHeight);
- nSubsidy = (nSubsidy * numerator) / ASSETCHAINS_ENDSUBSIDY;
- }
- else
- {
- if ( cached_subsidy > 0 && cached_numhalvings == numhalvings )
- nSubsidy = cached_subsidy;
- else
- {
- for (i=0; i<numhalvings&&nSubsidy!=0; i++)
- nSubsidy = (nSubsidy * ASSETCHAINS_DECAY) / 100000000;
- cached_subsidy = nSubsidy;
- cached_numhalvings = numhalvings;
- }
- }
- }
- }
- }
- return(nSubsidy);
- } else return(0);
+ return(komodo_ac_block_subsidy(nHeight));
}
/*
// Mining slow start
// Subsidy is cut in half every 840,000 blocks which will occur approximately every 4 years.
//nSubsidy >>= halvings;
- return nSubsidy;
+ //return nSubsidy;
}
bool IsInitialBlockDownload()
{
const CChainParams& chainParams = Params();
+
+ // Once this function has returned false, it must remain false.
+ static std::atomic<bool> latchToFalse{false};
+ // Optimization: pre-test latch before taking the lock.
+ if (latchToFalse.load(std::memory_order_relaxed))
+ return false;
+
LOCK(cs_main);
+ if (latchToFalse.load(std::memory_order_relaxed))
+ return false;
+
if (fImporting || fReindex)
{
//fprintf(stderr,"IsInitialBlockDownload: fImporting %d || %d fReindex\n",(int32_t)fImporting,(int32_t)fReindex);
return true;
}
+
if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()))
{
- //fprintf(stderr,"IsInitialBlockDownload: checkpoint -> initialdownload\n");
+ //fprintf(stderr,"IsInitialBlockDownload: checkpoint -> initialdownload - %d blocks\n", Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()));
return true;
}
- static bool lockIBDState = false;
- if (lockIBDState)
- {
- //fprintf(stderr,"lockIBDState true %d < %d\n",chainActive.Height(),pindexBestHeader->nHeight - 10);
- return false;
- }
- bool state; CBlockIndex *ptr = chainActive.Tip();
- if ( ptr == 0 )
- ptr = pindexBestHeader;
- else if ( pindexBestHeader != 0 && pindexBestHeader->nHeight > ptr->nHeight )
- ptr = pindexBestHeader;
- //if ( ASSETCHAINS_SYMBOL[0] == 0 )
- state = ((chainActive.Height() < ptr->nHeight - 24*60) ||
- ptr->GetBlockTime() < (GetTime() - chainParams.MaxTipAge()));
- //else state = (chainActive.Height() < ptr->nHeight - 24*60);
- //fprintf(stderr,"state.%d ht.%d vs %d, t.%u %u\n",state,(int32_t)chainActive.Height(),(uint32_t)ptr->nHeight,(int32_t)ptr->GetBlockTime(),(uint32_t)(GetTime() - chainParams.MaxTipAge()));
+
+ bool state;
+ arith_uint256 bigZero = arith_uint256();
+ arith_uint256 minWork = UintToArith256(chainParams.GetConsensus().nMinimumChainWork);
+ CBlockIndex *ptr = chainActive.Tip();
+
+ if (ptr == NULL)
+ return true;
+ if (ptr->chainPower < CChainPower(ptr, bigZero, minWork))
+ return true;
+
+ state = ((chainActive.Height() < ptr->GetHeight() - 24*60) ||
+ ptr->GetBlockTime() < (GetTime() - nMaxTipAge));
+
+ //fprintf(stderr,"state.%d ht.%d vs %d, t.%u %u\n",state,(int32_t)chainActive.Height(),(uint32_t)ptr->GetHeight(),(int32_t)ptr->GetBlockTime(),(uint32_t)(GetTime() - chainParams.MaxTipAge()));
if (!state)
{
- lockIBDState = true;
+ LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
+ latchToFalse.store(true, std::memory_order_relaxed);
}
return state;
}
-bool fLargeWorkForkFound = false;
-bool fLargeWorkInvalidChainFound = false;
-CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL;
+// determine if we are in sync with the best chain
+int IsNotInSync()
+{
+ const CChainParams& chainParams = Params();
+
+ LOCK(cs_main);
+ if (fImporting || fReindex)
+ {
+ //fprintf(stderr,"IsNotInSync: fImporting %d || %d fReindex\n",(int32_t)fImporting,(int32_t)fReindex);
+ return true;
+ }
+ if (fCheckpointsEnabled)
+ {
+ if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()))
+ {
+ //fprintf(stderr,"IsNotInSync: checkpoint -> initialdownload chainActive.Height().%d GetTotalBlocksEstimate(chainParams.Checkpoints().%d\n", chainActive.Height(), Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()));
+ return true;
+ }
+ }
+
+ CBlockIndex *pbi = chainActive.Tip();
+ int longestchain = komodo_longestchain();
+ if ( !pbi ||
+ (pindexBestHeader == 0) ||
+ ((pindexBestHeader->GetHeight() - 1) > pbi->GetHeight()) ||
+ (longestchain != 0 && longestchain > pbi->GetHeight()) )
+ {
+ return (pbi && pindexBestHeader && (pindexBestHeader->GetHeight() - 1) > pbi->GetHeight()) ?
+ pindexBestHeader->GetHeight() - pbi->GetHeight() :
+ true;
+ }
+
+ return false;
+}
+
+static bool fLargeWorkForkFound = false;
+static bool fLargeWorkInvalidChainFound = false;
+static CBlockIndex *pindexBestForkTip = NULL;
+static CBlockIndex *pindexBestForkBase = NULL;
void CheckForkWarningConditions()
{
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
- // (we assume we don't get stuck on a fork before the last checkpoint)
+ // (we assume we don't get stuck on a fork before finishing our initial sync)
if (IsInitialBlockDownload())
return;
// If our best fork is no longer within 288 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
- if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 288)
+ if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->GetHeight() >= 288)
pindexBestForkTip = NULL;
- if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.LastTip()->nChainWork + (GetBlockProof(*chainActive.LastTip()) * 6)))
+ if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->chainPower > (chainActive.LastTip()->chainPower + (GetBlockProof(*chainActive.LastTip()) * 6))))
{
if (!fLargeWorkForkFound && pindexBestForkBase)
{
if (pindexBestForkTip && pindexBestForkBase)
{
LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
- pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
- pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
+ pindexBestForkBase->GetHeight(), pindexBestForkBase->phashBlock->ToString(),
+ pindexBestForkTip->GetHeight(), pindexBestForkTip->phashBlock->ToString());
fLargeWorkForkFound = true;
}
else
CBlockIndex* plonger = chainActive.LastTip();
while (pfork && pfork != plonger)
{
- while (plonger && plonger->nHeight > pfork->nHeight)
+ while (plonger && plonger->GetHeight() > pfork->GetHeight())
plonger = plonger->pprev;
if (pfork == plonger)
break;
// or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
// We define it this way because it allows us to only store the highest fork tip (+ base) which meets
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
- if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
- pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
- chainActive.Height() - pindexNewForkTip->nHeight < 72)
+ if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->GetHeight() > pindexBestForkTip->GetHeight())) &&
+ pindexNewForkTip->chainPower - pfork->chainPower > (GetBlockProof(*pfork) * 7) &&
+ chainActive.Height() - pindexNewForkTip->GetHeight() < 72)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
void static InvalidChainFound(CBlockIndex* pindexNew)
{
- if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
+ if (!pindexBestInvalid || pindexNew->chainPower > pindexBestInvalid->chainPower)
pindexBestInvalid = pindexNew;
-
- LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
- pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
- log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
- pindexNew->GetBlockTime()));
+
+ LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g log2_stake=%.8g date=%s\n", __func__,
+ pindexNew->GetBlockHash().ToString(), pindexNew->GetHeight(),
+ log(pindexNew->chainPower.chainWork.getdouble())/log(2.0),
+ log(pindexNew->chainPower.chainStake.getdouble())/log(2.0),
+ DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime()));
CBlockIndex *tip = chainActive.LastTip();
assert (tip);
- LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
- tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
+ LogPrintf("%s: current best=%s height=%d log2_work=%.8g log2_stake=%.8g date=%s\n", __func__,
+ tip->GetBlockHash().ToString(), chainActive.Height(),
+ log(tip->chainPower.chainWork.getdouble())/log(2.0),
+ log(tip->chainPower.chainStake.getdouble())/log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
CheckForkWarningConditions();
}
}
}
}
- BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) { // spend nullifiers
- BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
- inputs.SetNullifier(nf, true);
- }
- }
+
+ // spend nullifiers
+ inputs.SetNullifiers(tx, true);
+
inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); // add outputs
// Unorthodox state
// add a tombstone for the burnTx
AddImportTombstone(tx, inputs, nHeight);
}
-
}
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
{
LOCK(cs_main);
CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
- return pindexPrev->nHeight + 1;
+ return pindexPrev->GetHeight() + 1;
}
namespace Consensus {
const COutPoint &prevout = tx.vin[i].prevout;
const CCoins *coins = inputs.AccessCoins(prevout.hash);
assert(coins);
-
+
if (coins->IsCoinBase()) {
- // Ensure that coinbases are matured
+ // ensure that output of coinbases are not still time locked
+ if (coins->TotalTxValue() >= ASSETCHAINS_TIMELOCKGTE)
+ {
+ uint64_t unlockTime = komodo_block_unlocktime(coins->nHeight);
+ if (nSpendHeight < unlockTime) {
+ return state.DoS(10,
+ error("CheckInputs(): tried to spend coinbase that is timelocked until block %d", unlockTime),
+ REJECT_INVALID, "bad-txns-premature-spend-of-coinbase");
+ }
+ }
+
+ // Ensure that coinbases are matured, no DoS as retry may work later
if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) {
- return state.Invalid(
- error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight),
- REJECT_INVALID, "bad-txns-premature-spend-of-coinbase");
+ return state.DoS(0,
+ error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight),
+ REJECT_INVALID, "bad-txns-premature-spend-of-coinbase");
}
-
+
// Ensure that coinbases cannot be spent to transparent outputs
// Disabled on regtest
if (fCoinbaseEnforcedProtectionEnabled &&
consensusParams.fCoinbaseMustBeProtected &&
- !tx.vout.empty()) {
- return state.Invalid(
- error("CheckInputs(): tried to spend coinbase with transparent outputs"),
- REJECT_INVALID, "bad-txns-coinbase-spend-has-transparent-outputs");
+ !(tx.vout.size() == 0 || (tx.vout.size() == 1 && tx.vout[0].nValue == 0)) &&
+ (strcmp(ASSETCHAINS_SYMBOL, "VRSC") != 0 || (nSpendHeight >= 12800 && coins->nHeight >= 12800))) {
+ return state.DoS(100,
+ error("CheckInputs(): tried to spend coinbase with transparent outputs"),
+ REJECT_INVALID, "bad-txns-coinbase-spend-has-transparent-outputs");
}
}
// Check for negative or overflow input values
nValueIn += coins->vout[prevout.n].nValue;
#ifdef KOMODO_ENABLE_INTEREST
- if ( ASSETCHAINS_SYMBOL[0] == 0 && nSpendHeight > 60000 )//chainActive.LastTip() != 0 && chainActive.LastTip()->nHeight >= 60000 )
+ if ( ASSETCHAINS_SYMBOL[0] == 0 && nSpendHeight > 60000 )//chainActive.LastTip() != 0 && chainActive.LastTip()->GetHeight() >= 60000 )
{
if ( coins->vout[prevout.n].nValue >= 10*COIN )
{
REJECT_INVALID, "bad-txns-inputvalues-outofrange");
}
-
- nValueIn += tx.GetJoinSplitValueIn();
+
+ nValueIn += tx.GetShieldedValueIn();
if (!MoneyRange(nValueIn))
- return state.DoS(100, error("CheckInputs(): vpub_old values out of range"),
+ return state.DoS(100, error("CheckInputs(): shielded input to transparent value pool out of range"),
REJECT_INVALID, "bad-txns-inputvalues-outofrange");
if (nValueIn < tx.GetValueOut())
// While checking, GetBestBlock() refers to the parent block.
// This is also true for mempool checks.
CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
- int nSpendHeight = pindexPrev->nHeight + 1;
+ int nSpendHeight = pindexPrev->GetHeight() + 1;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const COutPoint &prevout = tx.vin[i].prevout;
return error("%s: OpenUndoFile failed", __func__);
// Write index header
- unsigned int nSize = fileout.GetSerializeSize(blockundo);
+ unsigned int nSize = GetSerializeSize(fileout, blockundo);
fileout << FLATDATA(messageStart) << nSize;
// Write undo data
// Record Notarisations
NotarisationsInBlock notarisations = ScanBlockNotarisations(block, height);
if (notarisations.size() > 0) {
- CLevelDBBatch batch;
+ CDBBatch batch = CDBBatch(*pnotarisations);
batch.Write(block.GetHash(), notarisations);
WriteBackNotarisations(notarisations, batch);
pnotarisations->WriteBatch(batch, true);
// Delete from notarisations cache
NotarisationsInBlock nibs;
if (GetBlockNotarisations(block.GetHash(), nibs)) {
- CLevelDBBatch batch;
+ CDBBatch batch = CDBBatch(*pnotarisations);
batch.Erase(block.GetHash());
EraseBackNotarisations(nibs, batch);
pnotarisations->WriteBatch(batch, true);
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+2, out.scriptPubKey.begin()+22);
// undo receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(2, uint160(hashBytes), pindex->nHeight, i, hash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(2, uint160(hashBytes), pindex->GetHeight(), i, hash, k, false), out.nValue));
// undo unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(2, uint160(hashBytes), hash, k), CAddressUnspentValue()));
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+3, out.scriptPubKey.begin()+23);
// undo receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, uint160(hashBytes), pindex->nHeight, i, hash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, uint160(hashBytes), pindex->GetHeight(), i, hash, k, false), out.nValue));
// undo unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, uint160(hashBytes), hash, k), CAddressUnspentValue()));
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+1, out.scriptPubKey.begin()+34);
// undo receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->nHeight, i, hash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->GetHeight(), i, hash, k, false), out.nValue));
// undo unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), hash, k), CAddressUnspentValue()));
vector<unsigned char> hashBytes(out.scriptPubKey.begin(), out.scriptPubKey.end());
// undo receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->nHeight, i, hash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->GetHeight(), i, hash, k, false), out.nValue));
// undo unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), hash, k), CAddressUnspentValue()));
CCoinsModifier outs = view.ModifyCoins(hash);
outs->ClearUnspendable();
- CCoins outsBlock(tx, pindex->nHeight);
+ CCoins outsBlock(tx, pindex->GetHeight());
// The CCoins serialization does not serialize negative numbers.
// No network rules currently depend on the version here, so an inconsistency is harmless
// but it must be corrected before txout nversion ever influences a network rule.
}
// unspend nullifiers
- BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
- BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
- view.SetNullifier(nf, false);
- }
- }
-
+ view.SetNullifiers(tx, false);
+
// restore inputs
if (!tx.IsMint()) {
const CTxUndo &txundo = blockUndo.vtxundo[i-1];
vector<unsigned char> hashBytes(prevout.scriptPubKey.begin()+2, prevout.scriptPubKey.begin()+22);
// undo spending activity
- addressIndex.push_back(make_pair(CAddressIndexKey(2, uint160(hashBytes), pindex->nHeight, i, hash, j, true), prevout.nValue * -1));
+ addressIndex.push_back(make_pair(CAddressIndexKey(2, uint160(hashBytes), pindex->GetHeight(), i, hash, j, true), prevout.nValue * -1));
// restore unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(2, uint160(hashBytes), input.prevout.hash, input.prevout.n), CAddressUnspentValue(prevout.nValue, prevout.scriptPubKey, undo.nHeight)));
vector<unsigned char> hashBytes(prevout.scriptPubKey.begin()+3, prevout.scriptPubKey.begin()+23);
// undo spending activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, uint160(hashBytes), pindex->nHeight, i, hash, j, true), prevout.nValue * -1));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, uint160(hashBytes), pindex->GetHeight(), i, hash, j, true), prevout.nValue * -1));
// restore unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, uint160(hashBytes), input.prevout.hash, input.prevout.n), CAddressUnspentValue(prevout.nValue, prevout.scriptPubKey, undo.nHeight)));
vector<unsigned char> hashBytes(prevout.scriptPubKey.begin()+1, prevout.scriptPubKey.begin()+34);
// undo spending activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->nHeight, i, hash, j, true), prevout.nValue * -1));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->GetHeight(), i, hash, j, true), prevout.nValue * -1));
// restore unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), input.prevout.hash, input.prevout.n), CAddressUnspentValue(prevout.nValue, prevout.scriptPubKey, undo.nHeight)));
vector<unsigned char> hashBytes(prevout.scriptPubKey.begin(), prevout.scriptPubKey.end());
// undo spending activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->nHeight, i, hash, j, true), prevout.nValue * -1));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->GetHeight(), i, hash, j, true), prevout.nValue * -1));
// restore unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), input.prevout.hash, input.prevout.n), CAddressUnspentValue(prevout.nValue, prevout.scriptPubKey, undo.nHeight)));
}
}
- // set the old best anchor back
- view.PopAnchor(blockUndo.old_tree_root);
-
+ // set the old best Sprout anchor back
+ view.PopAnchor(blockUndo.old_sprout_tree_root, SPROUT);
+
+ // set the old best Sapling anchor back
+ // We can get this from the `hashFinalSaplingRoot` of the last block
+ // However, this is only reliable if the last block was on or after
+ // the Sapling activation height. Otherwise, the last anchor was the
+ // empty root.
+ if (NetworkUpgradeActive(pindex->pprev->GetHeight(), Params().GetConsensus(), Consensus::UPGRADE_SAPLING)) {
+ view.PopAnchor(pindex->pprev->hashFinalSaplingRoot, SAPLING);
+ } else {
+ view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING);
+ }
+
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash());
const int FIFTY_YEARS = 50*365*24*60*60;
double alertThreshold = 1.0 / (FIFTY_YEARS / SPAN_SECONDS);
- if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED)
+ if (bestHeader->GetHeight() > BLOCKS_EXPECTED)
{
- // Many fewer blocks than expected: alert!
- strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
- nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
- }
- else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED)
- {
- // Many more blocks than expected: alert!
- strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
- nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
+ if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED)
+ {
+ // Many fewer blocks than expected: alert!
+ strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
+ nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
+ }
+ else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED)
+ {
+ // Many more blocks than expected: alert!
+ strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
+ nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
+ }
}
if (!strWarning.empty())
{
bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck,bool fCheckPOW)
{
const CChainParams& chainparams = Params();
- if ( KOMODO_STOPAT != 0 && pindex->nHeight > KOMODO_STOPAT )
+ if ( KOMODO_STOPAT != 0 && pindex->GetHeight() > KOMODO_STOPAT )
return(false);
- //fprintf(stderr,"connectblock ht.%d\n",(int32_t)pindex->nHeight);
+ //fprintf(stderr,"connectblock ht.%d\n",(int32_t)pindex->GetHeight());
AssertLockHeld(cs_main);
bool fExpensiveChecks = true;
if (fCheckpointsEnabled) {
CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
- if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->nHeight) == pindex) {
+ if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->GetHeight()) == pindex) {
// This block is an ancestor of a checkpoint: disable script checks
fExpensiveChecks = false;
}
auto disabledVerifier = libzcash::ProofVerifier::Disabled();
int32_t futureblock;
// Check it again to verify JoinSplit proofs, and in case a previous version let a bad block in
- if (!CheckBlock(&futureblock,pindex->nHeight,pindex,block, state, fExpensiveChecks ? verifier : disabledVerifier, fCheckPOW, !fJustCheck) || futureblock != 0 )
+ if (!CheckBlock(&futureblock,pindex->GetHeight(),pindex,block, state, fExpensiveChecks ? verifier : disabledVerifier, fCheckPOW, !fJustCheck) || futureblock != 0 )
{
//fprintf(stderr,"checkblock failure in connectblock futureblock.%d\n",futureblock);
return false;
if (!fJustCheck) {
view.SetBestBlock(pindex->GetBlockHash());
// Before the genesis block, there was an empty tree
- ZCIncrementalMerkleTree tree;
- pindex->hashAnchor = tree.root();
+ SproutMerkleTree tree;
+ pindex->hashSproutAnchor = tree.root();
// The genesis block contained no JoinSplits
- pindex->hashAnchorEnd = pindex->hashAnchor;
+ pindex->hashFinalSproutRoot = pindex->hashSproutAnchor;
}
return true;
}
- bool fScriptChecks = (!fCheckpointsEnabled || pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()));
- //if ( KOMODO_TESTNET_EXPIRATION != 0 && pindex->nHeight > KOMODO_TESTNET_EXPIRATION ) // "testnet"
+ bool fScriptChecks = (!fCheckpointsEnabled || pindex->GetHeight() >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()));
+ //if ( KOMODO_TESTNET_EXPIRATION != 0 && pindex->GetHeight() > KOMODO_TESTNET_EXPIRATION ) // "testnet"
// return(false);
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
std::vector<std::pair<CSpentIndexKey, CSpentIndexValue> > spentIndex;
// Construct the incremental merkle tree at the current
// block position,
- auto old_tree_root = view.GetBestAnchor();
+ auto old_sprout_tree_root = view.GetBestAnchor(SPROUT);
// saving the top anchor in the block index as we go.
if (!fJustCheck) {
- pindex->hashAnchor = old_tree_root;
+ pindex->hashSproutAnchor = old_sprout_tree_root;
}
- ZCIncrementalMerkleTree tree;
+
+ SproutMerkleTree sprout_tree;
+
// This should never fail: we should always be able to get the root
// that is on the tip of our chain
- assert(view.GetAnchorAt(old_tree_root, tree));
-
+ assert(view.GetSproutAnchorAt(old_sprout_tree_root, sprout_tree));
+
{
// Consistency check: the root of the tree we're given should
// match what we asked for.
- assert(tree.root() == old_tree_root);
+ assert(sprout_tree.root() == old_sprout_tree_root);
}
-
+
+ SaplingMerkleTree sapling_tree;
+ assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree));
+
// Grab the consensus branch ID for the block's height
- auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, Params().GetConsensus());
+ auto consensusBranchId = CurrentEpochBranchId(pindex->GetHeight(), Params().GetConsensus());
std::vector<PrecomputedTransactionData> txdata;
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
- //fprintf(stderr,"ht.%d vout0 t%u\n",pindex->nHeight,tx.nLockTime);
+ //fprintf(stderr,"ht.%d vout0 t%u\n",pindex->GetHeight(),tx.nLockTime);
if (!tx.IsMint())
{
if (!view.HaveInputs(tx))
if (fAddressIndex && addressType > 0) {
// record spending activity
- addressIndex.push_back(make_pair(CAddressIndexKey(addressType, hashBytes, pindex->nHeight, i, txhash, j, true), prevout.nValue * -1));
+ addressIndex.push_back(make_pair(CAddressIndexKey(addressType, hashBytes, pindex->GetHeight(), i, txhash, j, true), prevout.nValue * -1));
// remove address from unspent index
addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(addressType, hashBytes, input.prevout.hash, input.prevout.n), CAddressUnspentValue()));
if (fSpentIndex) {
// add the spent index to determine the txid and input that spent an output
// and to find the amount and address from an input
- spentIndex.push_back(make_pair(CSpentIndexKey(input.prevout.hash, input.prevout.n), CSpentIndexValue(txhash, j, pindex->nHeight, prevout.nValue, addressType, hashBytes)));
+ spentIndex.push_back(make_pair(CSpentIndexKey(input.prevout.hash, input.prevout.n), CSpentIndexValue(txhash, j, pindex->GetHeight(), prevout.nValue, addressType, hashBytes)));
}
}
if (!tx.IsCoinBase())
{
- nFees += view.GetValueIn(chainActive.LastTip()->nHeight,&interest,tx,chainActive.LastTip()->nTime) - tx.GetValueOut();
+ nFees += view.GetValueIn(chainActive.LastTip()->GetHeight(),&interest,tx,chainActive.LastTip()->nTime) - tx.GetValueOut();
sum += interest;
std::vector<CScriptCheck> vChecks;
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+2, out.scriptPubKey.begin()+22);
// record receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(2, uint160(hashBytes), pindex->nHeight, i, txhash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(2, uint160(hashBytes), pindex->GetHeight(), i, txhash, k, false), out.nValue));
// record unspent output
- addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(2, uint160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->nHeight)));
+ addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(2, uint160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->GetHeight())));
}
else if (out.scriptPubKey.IsPayToPublicKeyHash()) {
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+3, out.scriptPubKey.begin()+23);
// record receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, uint160(hashBytes), pindex->nHeight, i, txhash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, uint160(hashBytes), pindex->GetHeight(), i, txhash, k, false), out.nValue));
// record unspent output
- addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, uint160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->nHeight)));
+ addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, uint160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->GetHeight())));
}
else if (out.scriptPubKey.IsPayToPublicKey()) {
vector<unsigned char> hashBytes(out.scriptPubKey.begin()+1, out.scriptPubKey.begin()+34);
// record receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->nHeight, i, txhash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->GetHeight(), i, txhash, k, false), out.nValue));
// record unspent output
- addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->nHeight)));
+ addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->GetHeight())));
}
else if (out.scriptPubKey.IsPayToCryptoCondition()) {
vector<unsigned char> hashBytes(out.scriptPubKey.begin(), out.scriptPubKey.end());
// record receiving activity
- addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->nHeight, i, txhash, k, false), out.nValue));
+ addressIndex.push_back(make_pair(CAddressIndexKey(1, Hash160(hashBytes), pindex->GetHeight(), i, txhash, k, false), out.nValue));
// record unspent output
- addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->nHeight)));
+ addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(1, Hash160(hashBytes), txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->GetHeight())));
}
else {
}
//if ( ASSETCHAINS_SYMBOL[0] == 0 )
- // komodo_earned_interest(pindex->nHeight,sum);
+ // komodo_earned_interest(pindex->GetHeight(),sum);
CTxUndo undoDummy;
if (i > 0) {
blockundo.vtxundo.push_back(CTxUndo());
}
- UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
+ UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->GetHeight());
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
BOOST_FOREACH(const uint256 ¬e_commitment, joinsplit.commitments) {
// Insert the note commitments into our temporary tree.
-
- tree.append(note_commitment);
+
+ sprout_tree.append(note_commitment);
}
}
-
+
+ BOOST_FOREACH(const OutputDescription &outputDescription, tx.vShieldedOutput) {
+ sapling_tree.append(outputDescription.cm);
+ }
+
vPos.push_back(std::make_pair(tx.GetHash(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
-
- view.PushAnchor(tree);
+
+ view.PushAnchor(sprout_tree);
+ view.PushAnchor(sapling_tree);
if (!fJustCheck) {
- pindex->hashAnchorEnd = tree.root();
+ pindex->hashFinalSproutRoot = sprout_tree.root();
+ }
+ blockundo.old_sprout_tree_root = old_sprout_tree_root;
+
+ // If Sapling is active, block.hashFinalSaplingRoot must be the
+ // same as the root of the Sapling tree
+ if (NetworkUpgradeActive(pindex->GetHeight(), chainparams.GetConsensus(), Consensus::UPGRADE_SAPLING)) {
+ if (block.hashFinalSaplingRoot != sapling_tree.root()) {
+ return state.DoS(100,
+ error("ConnectBlock(): block's hashFinalSaplingRoot is incorrect"),
+ REJECT_INVALID, "bad-sapling-root-in-block");
+ }
}
- blockundo.old_tree_root = old_tree_root;
-
int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart;
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001);
- CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()) + sum;
+ CAmount blockReward = nFees + GetBlockSubsidy(pindex->GetHeight(), chainparams.GetConsensus()) + sum;
if ( ASSETCHAINS_OVERRIDE_PUBKEY33[0] != 0 && ASSETCHAINS_COMMISSION != 0 )
{
uint64_t checktoshis;
else fprintf(stderr,"checktoshis %.8f numvouts %d\n",dstr(checktoshis),(int32_t)block.vtx[0].vout.size());
}
}
+ if (ASSETCHAINS_SYMBOL[0] != 0 && pindex->GetHeight() == 1 && block.vtx[0].GetValueOut() != blockReward)
+ {
+ return state.DoS(100, error("ConnectBlock(): coinbase for block 1 pays wrong amount (actual=%d vs correct=%d)", block.vtx[0].GetValueOut(), blockReward),
+ REJECT_INVALID, "bad-cb-amount");
+ }
if ( block.vtx[0].GetValueOut() > blockReward+1 )
{
- if ( ASSETCHAINS_SYMBOL[0] != 0 || pindex->nHeight >= KOMODO_NOTARIES_HEIGHT1 || block.vtx[0].vout[0].nValue > blockReward )
+ if ( ASSETCHAINS_SYMBOL[0] != 0 || pindex->GetHeight() >= KOMODO_NOTARIES_HEIGHT1 || block.vtx[0].vout[0].nValue > blockReward )
{
return state.DoS(100,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0].GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
} else if ( IS_KOMODO_NOTARY != 0 )
- fprintf(stderr,"allow nHeight.%d coinbase %.8f vs %.8f interest %.8f\n",(int32_t)pindex->nHeight,dstr(block.vtx[0].GetValueOut()),dstr(blockReward),dstr(sum));
+ fprintf(stderr,"allow nHeight.%d coinbase %.8f vs %.8f interest %.8f\n",(int32_t)pindex->GetHeight(),dstr(block.vtx[0].GetValueOut()),dstr(blockReward),dstr(sum));
}
if (!control.Wait())
return state.DoS(100, false);
// Move this if BLOCK_VALID_CONSENSUS is ever altered.
static_assert(BLOCK_VALID_CONSENSUS == BLOCK_VALID_SCRIPTS,
"nCachedBranchId must be set after all consensus rules have been validated.");
- if (IsActivationHeightForAnyUpgrade(pindex->nHeight, Params().GetConsensus())) {
+ if (IsActivationHeightForAnyUpgrade(pindex->GetHeight(), Params().GetConsensus())) {
pindex->nStatus |= BLOCK_ACTIVATES_UPGRADE;
- pindex->nCachedBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
+ pindex->nCachedBranchId = CurrentEpochBranchId(pindex->GetHeight(), chainparams.GetConsensus());
} else if (pindex->pprev) {
pindex->nCachedBranchId = pindex->pprev->nCachedBranchId;
}
setDirtyBlockIndex.insert(pindex);
}
- ConnectNotarisations(block, pindex->nHeight);
+ ConnectNotarisations(block, pindex->GetHeight());
if (fTxIndex)
if (!pblocktree->WriteTxIndex(vPos))
progress = (longestchain > 0 ) ? (double) chainActive.Height() / longestchain : 1.0;
}
- LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__,
+ LogPrintf("%s: new best=%s height=%d log2_work=%.8g log2_stake=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__,
chainActive.LastTip()->GetBlockHash().ToString(), chainActive.Height(),
- log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.LastTip()->nChainTx,
+ log(chainActive.Tip()->chainPower.chainWork.getdouble())/log(2.0),
+ log(chainActive.Tip()->chainPower.chainStake.getdouble())/log(2.0),
+ (unsigned long)chainActive.LastTip()->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.LastTip()->GetBlockTime()), progress,
pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize());
const CBlockIndex* pindex = chainActive.Tip();
for (int i = 0; i < 100 && pindex != NULL; i++)
{
- if (pindex->nVersion > CBlock::CURRENT_VERSION)
+ if (pindex->nVersion > CBlockHeader::GetVersionByHeight(pindex->GetHeight()))
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
- LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__, nUpgraded, (int)CBlock::CURRENT_VERSION);
+ LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__, nUpgraded, (int)CBlock::VERUS_V2);
if (nUpgraded > 100/2)
{
// strMiscWarning is read by GetWarnings(), called by the JSON-RPC code to warn the user:
if (!ReadBlockFromDisk(block, pindexDelete,1))
return AbortNode(state, "Failed to read block");
// Apply the block atomically to the chain state.
- uint256 anchorBeforeDisconnect = pcoinsTip->GetBestAnchor();
+ uint256 sproutAnchorBeforeDisconnect = pcoinsTip->GetBestAnchor(SPROUT);
+ uint256 saplingAnchorBeforeDisconnect = pcoinsTip->GetBestAnchor(SAPLING);
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
pindexDelete->zfunds = 0;
LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
- uint256 anchorAfterDisconnect = pcoinsTip->GetBestAnchor();
+ uint256 sproutAnchorAfterDisconnect = pcoinsTip->GetBestAnchor(SPROUT);
+ uint256 saplingAnchorAfterDisconnect = pcoinsTip->GetBestAnchor(SAPLING);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
- if (!fBare)
- {
- // Resurrect mempool transactions from the disconnected block.
- //BOOST_FOREACH(const CTransaction &tx, block.vtx) {
+ if (!fBare) {
+ // resurrect mempool transactions from the disconnected block.
for (int i = 0; i < block.vtx.size(); i++)
{
- CTransaction &tx = block.vtx[i];
// ignore validation errors in resurrected transactions
+ CTransaction &tx = block.vtx[i];
list<CTransaction> removed;
CValidationState stateDummy;
- if (tx.IsCoinBase() || ((i == (block.vtx.size() - 1)) && komodo_isPoS((CBlock *)&block) != 0) || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL))
+
+ // don't keep staking or invalid transactions
+ if (tx.IsCoinBase() || ((i == (block.vtx.size() - 1)) && (ASSETCHAINS_STAKED && komodo_isPoS((CBlock *)&block) != 0)) || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL))
+ {
mempool.remove(tx, removed, true);
+ }
+
+ // if this is a staking tx, and we are on Verus Sapling with nothing at stake solution,
+ // save staking tx as a possible cheat
+ if (NetworkUpgradeActive(pindexDelete->GetHeight(), Params().GetConsensus(), Consensus::UPGRADE_SAPLING) &&
+ ASSETCHAINS_LWMAPOS && (i == (block.vtx.size() - 1)) &&
+ (block.IsVerusPOSBlock()))
+ {
+ CTxHolder txh = CTxHolder(block.vtx[i], pindexDelete->GetHeight());
+ cheatList.Add(txh);
+ }
+ }
+ if (sproutAnchorBeforeDisconnect != sproutAnchorAfterDisconnect) {
+ // The anchor may not change between block disconnects,
+ // in which case we don't want to evict from the mempool yet!
+ mempool.removeWithAnchor(sproutAnchorBeforeDisconnect, SPROUT);
}
- if (anchorBeforeDisconnect != anchorAfterDisconnect) {
+ if (saplingAnchorBeforeDisconnect != saplingAnchorAfterDisconnect) {
// The anchor may not change between block disconnects,
// in which case we don't want to evict from the mempool yet!
- mempool.removeWithAnchor(anchorBeforeDisconnect);
+ mempool.removeWithAnchor(saplingAnchorBeforeDisconnect, SAPLING);
}
}
// Update chainActive and related variables.
UpdateTip(pindexDelete->pprev);
+
// Get the current commitment tree
- ZCIncrementalMerkleTree newTree;
- assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), newTree));
+ SproutMerkleTree newSproutTree;
+ SaplingMerkleTree newSaplingTree;
+ assert(pcoinsTip->GetSproutAnchorAt(pcoinsTip->GetBestAnchor(SPROUT), newSproutTree));
+ assert(pcoinsTip->GetSaplingAnchorAt(pcoinsTip->GetBestAnchor(SAPLING), newSaplingTree));
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
- //BOOST_FOREACH(const CTransaction &tx, block.vtx) {
- // SyncWithWallets(tx, NULL);
- //}
- // Update cached incremental witnesses
- //fprintf(stderr,"chaintip false\n");
for (int i = 0; i < block.vtx.size(); i++)
{
CTransaction &tx = block.vtx[i];
- if ( (i == (block.vtx.size() - 1)) && komodo_isPoS((CBlock *)&block) != 0 )
+ //if ((i == (block.vtx.size() - 1)) && ((ASSETCHAINS_LWMAPOS && block.IsVerusPOSBlock()) || (ASSETCHAINS_STAKED != 0 && (komodo_isPoS((CBlock *)&block) != 0))))
+ if ((i == (block.vtx.size() - 1)) && (ASSETCHAINS_STAKED != 0 && (komodo_isPoS((CBlock *)&block) != 0)))
{
EraseFromWallets(tx.GetHash());
}
SyncWithWallets(tx, NULL);
}
}
- GetMainSignals().ChainTip(pindexDelete, &block, newTree, false);
+ // Update cached incremental witnesses
+ GetMainSignals().ChainTip(pindexDelete, &block, newSproutTree, newSaplingTree, false);
return true;
}
return AbortNode(state, "Failed to read block");
pblock = █
}
- KOMODO_CONNECTING = (int32_t)pindexNew->nHeight;
+ KOMODO_CONNECTING = (int32_t)pindexNew->GetHeight();
// Get the current commitment tree
- ZCIncrementalMerkleTree oldTree;
- assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), oldTree));
+ SproutMerkleTree oldSproutTree;
+ SaplingMerkleTree oldSaplingTree;
+ assert(pcoinsTip->GetSproutAnchorAt(pcoinsTip->GetBestAnchor(SPROUT), oldSproutTree));
+ assert(pcoinsTip->GetSaplingAnchorAt(pcoinsTip->GetBestAnchor(SAPLING), oldSaplingTree));
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.
list<CTransaction> txConflicted;
- mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload());
+ mempool.removeForBlock(pblock->vtx, pindexNew->GetHeight(), txConflicted, !IsInitialBlockDownload());
// Remove transactions that expire at new block height from mempool
- mempool.removeExpired(pindexNew->nHeight);
+ mempool.removeExpired(pindexNew->GetHeight());
// Update chainActive & related variables.
UpdateTip(pindexNew);
SyncWithWallets(tx, pblock);
}
// Update cached incremental witnesses
- //fprintf(stderr,"chaintip true\n");
- GetMainSignals().ChainTip(pindexNew, pblock, oldTree, true);
-
- EnforceNodeDeprecation(pindexNew->nHeight);
+ GetMainSignals().ChainTip(pindexNew, pblock, oldSproutTree, oldSaplingTree, true);
+
+ EnforceNodeDeprecation(pindexNew->GetHeight());
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
- if ( KOMODO_LONGESTCHAIN != 0 && pindexNew->nHeight >= KOMODO_LONGESTCHAIN )
+ if ( KOMODO_LONGESTCHAIN != 0 && pindexNew->GetHeight() >= KOMODO_LONGESTCHAIN )
KOMODO_INSYNC = 1;
else KOMODO_INSYNC = 0;
- //fprintf(stderr,"connect.%d insync.%d\n",(int32_t)pindexNew->nHeight,KOMODO_INSYNC);
+ //fprintf(stderr,"connect.%d insync.%d\n",(int32_t)pindexNew->GetHeight(),KOMODO_INSYNC);
if ( ASSETCHAINS_SYMBOL[0] == 0 && KOMODO_INSYNC != 0 )
komodo_broadcast(pblock,8);
return true;
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
- assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
+ assert(pindexTest->nChainTx || pindexTest->GetHeight() == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
if (fFailedChain || fMissingData) {
// Candidate chain is not usable (either invalid or missing data)
- if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
+ if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->chainPower > pindexBestInvalid->chainPower))
pindexBestInvalid = pindexNew;
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
bool fInvalidFound = false;
const CBlockIndex *pindexOldTip = chainActive.Tip();
const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
-
+
// - On ChainDB initialization, pindexOldTip will be null, so there are no removable blocks.
// - If pindexMostWork is in a chain that doesn't have the same genesis block as our chain,
// then pindexFork will be null, and we would need to remove the entire chain including
// our genesis block. In practice this (probably) won't happen because of checks elsewhere.
- auto reorgLength = pindexOldTip ? pindexOldTip->nHeight - (pindexFork ? pindexFork->nHeight : -1) : 0;
+ auto reorgLength = pindexOldTip ? pindexOldTip->GetHeight() - (pindexFork ? pindexFork->GetHeight() : -1) : 0;
static_assert(MAX_REORG_LENGTH > 0, "We must be able to reorg some distance");
if (reorgLength > MAX_REORG_LENGTH) {
auto msg = strprintf(_(
"This is larger than the maximum of %d blocks, and so the node is shutting down for your safety."
), reorgLength, MAX_REORG_LENGTH) + "\n\n" +
_("Reorganization details") + ":\n" +
- "- " + strprintf(_("Current tip: %s, height %d, work %s"),
- pindexOldTip->phashBlock->GetHex(), pindexOldTip->nHeight, pindexOldTip->nChainWork.GetHex()) + "\n" +
- "- " + strprintf(_("New tip: %s, height %d, work %s"),
- pindexMostWork->phashBlock->GetHex(), pindexMostWork->nHeight, pindexMostWork->nChainWork.GetHex()) + "\n" +
+ "- " + strprintf(_("Current tip: %s, height %d, work %s\nstake %s"),
+ pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight(), pindexOldTip->chainPower.chainWork.GetHex(),
+ pindexOldTip->chainPower.chainStake.GetHex()) + "\n" +
+ "- " + strprintf(_("New tip: %s, height %d, work %s\nstake %s"),
+ pindexMostWork->phashBlock->GetHex(), pindexMostWork->GetHeight(), pindexMostWork->chainPower.chainWork.GetHex(),
+ pindexMostWork->chainPower.chainStake.GetHex()) + "\n" +
"- " + strprintf(_("Fork point: %s %s, height %d"),
- ASSETCHAINS_SYMBOL,pindexFork->phashBlock->GetHex(), pindexFork->nHeight) + "\n\n" +
+ ASSETCHAINS_SYMBOL,pindexFork->phashBlock->GetHex(), pindexFork->GetHeight()) + "\n\n" +
_("Please help, human!");
LogPrintf("*** %s\n", msg);
uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
+
while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
if (!DisconnectTip(state))
return false;
if ( KOMODO_REWIND != 0 )
{
CBlockIndex *tipindex;
- fprintf(stderr,">>>>>>>>>>> rewind start ht.%d -> KOMODO_REWIND.%d\n",chainActive.LastTip()->nHeight,KOMODO_REWIND);
- while ( KOMODO_REWIND > 0 && (tipindex= chainActive.LastTip()) != 0 && tipindex->nHeight > KOMODO_REWIND )
+ fprintf(stderr,">>>>>>>>>>> rewind start ht.%d -> KOMODO_REWIND.%d\n",chainActive.LastTip()->GetHeight(),KOMODO_REWIND);
+ while ( KOMODO_REWIND > 0 && (tipindex= chainActive.LastTip()) != 0 && tipindex->GetHeight() > KOMODO_REWIND )
{
fBlocksDisconnected = true;
- fprintf(stderr,"%d ",(int32_t)tipindex->nHeight);
+ fprintf(stderr,"%d ",(int32_t)tipindex->GetHeight());
InvalidateBlock(state,tipindex);
if ( !DisconnectTip(state) )
break;
// Build list of new blocks to connect.
std::vector<CBlockIndex*> vpindexToConnect;
bool fContinue = true;
- int nHeight = pindexFork ? pindexFork->nHeight : -1;
- while (fContinue && nHeight != pindexMostWork->nHeight) {
+ int nHeight = pindexFork ? pindexFork->GetHeight() : -1;
+ while (fContinue && nHeight != pindexMostWork->GetHeight()) {
// Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
// a few blocks along the way.
- int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
+ int nTargetHeight = std::min(nHeight + 32, pindexMostWork->GetHeight());
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
- while (pindexIter && pindexIter->nHeight != nHeight) {
+ while (pindexIter && pindexIter->GetHeight() != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
}
} else {
PruneBlockIndexCandidates();
- if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
+ if (!pindexOldTip || chainActive.Tip()->chainPower > pindexOldTip->chainPower) {
// We're in a better position than we were. Return temporarily to release the lock.
fContinue = false;
break;
}
if (fBlocksDisconnected) {
- mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
}
mempool.removeWithoutBranchId(
- CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
+ CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus()));
mempool.check(pcoinsTip);
// Callbacks/notifications for a new best chain.
CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
else
CheckForkWarningConditions();
-
+
return true;
}
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
if (!DisconnectTip(state)) {
- mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
mempool.removeWithoutBranchId(
- CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
+ CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus()));
return false;
}
}
// The resulting new best tip may not be in setBlockIndexCandidates anymore, so
// add it again.
BlockMap::iterator it = mapBlockIndex.begin();
- while (it != mapBlockIndex.end() && it->second != 0 ) {
- if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
+ while (it != mapBlockIndex.end()) {
+ if ((it->second != 0) && it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
setBlockIndexCandidates.insert(it->second);
}
it++;
}
InvalidChainFound(pindex);
- mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
mempool.removeWithoutBranchId(
- CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
+ CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus()));
return true;
}
bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) {
AssertLockHeld(cs_main);
- int nHeight = pindex->nHeight;
+ int nHeight = pindex->GetHeight();
// Remove the invalidity flag from this block and all its descendants.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
- if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
+ if ((it->second != 0) && !it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
uint256 hash = block.GetHash();
BlockMap::iterator it = mapBlockIndex.find(hash);
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
+
+ // the following block is for debugging, comment when not needed
+ /*
+ std::vector<BlockMap::iterator> vrit;
+ for (BlockMap::iterator bit = mapBlockIndex.begin(); bit != mapBlockIndex.end(); bit++)
+ {
+ if (bit->second == NULL)
+ vrit.push_back(bit);
+ }
+ if (!vrit.empty())
+ {
+ printf("found %d NULL blocks in mapBlockIndex\n", vrit.size());
+ }
+ */
+
if (it != mapBlockIndex.end())
{
if ( it->second != 0 ) // vNodes.size() >= KOMODO_LIMITED_NETWORKSIZE, change behavior to allow komodo_ensure to work
if ( miPrev != mapBlockIndex.end() && (*miPrev).second == 0 )
{
//fprintf(stderr,"edge case of both block and prevblock in the strange state\n");
- return(0); // return here to avoid the state of pindex->nHeight not set and pprev NULL
+ return(0); // return here to avoid the state of pindex->GetHeight() not set and pprev NULL
}
}
// Construct new block index object
pindexNew->phashBlock = &((*mi).first);
if (miPrev != mapBlockIndex.end())
{
- if ( (pindexNew->pprev= (*miPrev).second) != 0 )
- pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
+ if ( (pindexNew->pprev = (*miPrev).second) != 0 )
+ pindexNew->SetHeight(pindexNew->pprev->GetHeight() + 1);
else fprintf(stderr,"unexpected null pprev %s\n",hash.ToString().c_str());
pindexNew->BuildSkip();
}
- pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
+ pindexNew->chainPower = (pindexNew->pprev ? CChainPower(pindexNew) + pindexNew->pprev->chainPower : CChainPower(pindexNew)) + GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
- if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
+ if (pindexBestHeader == NULL || pindexBestHeader->chainPower < pindexNew->chainPower)
pindexBestHeader = pindexNew;
setDirtyBlockIndex.insert(pindexNew);
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
CAmount sproutValue = 0;
+ CAmount saplingValue = 0;
for (auto tx : block.vtx) {
+ // Negative valueBalance "takes" money from the transparent value pool
+ // and adds it to the Sapling value pool. Positive valueBalance "gives"
+ // money to the transparent value pool, removing from the Sapling value
+ // pool. So we invert the sign here.
+ saplingValue += -tx.valueBalance;
+
for (auto js : tx.vjoinsplit) {
sproutValue += js.vpub_old;
sproutValue -= js.vpub_new;
}
pindexNew->nSproutValue = sproutValue;
pindexNew->nChainSproutValue = boost::none;
+ pindexNew->nSaplingValue = saplingValue;
+ pindexNew->nChainSaplingValue = boost::none;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
} else {
pindex->nChainSproutValue = boost::none;
}
+ if (pindex->pprev->nChainSaplingValue) {
+ pindex->nChainSaplingValue = *pindex->pprev->nChainSaplingValue + pindex->nSaplingValue;
+ } else {
+ pindex->nChainSaplingValue = boost::none;
+ }
} else {
pindex->nChainSproutValue = pindex->nSproutValue;
+ pindex->nChainSaplingValue = pindex->nSaplingValue;
}
{
LOCK(cs_nBlockSequenceId);
}
if ( fCheckPOW )
{
- //if ( !CheckEquihashSolution(&block, Params()) )
+ //if ( !CheckEquihashSolution(&block, Params()) )
// return state.DoS(100, error("CheckBlock: Equihash solution invalid"),REJECT_INVALID, "invalid-solution");
komodo_block2pubkey33(pubkey33,(CBlock *)&block);
- if ( !CheckProofOfWork(height,pubkey33,hash,block.nBits,Params().GetConsensus(),block.nTime) )
+ if ( !CheckProofOfWork(block,pubkey33,height,Params().GetConsensus()) )
{
int32_t z; for (z=31; z>=0; z--)
fprintf(stderr,"%02x",((uint8_t *)&hash)[z]);
if (block.vtx.empty() || !block.vtx[0].IsCoinBase())
return state.DoS(100, error("CheckBlock: first tx is not coinbase"),
REJECT_INVALID, "bad-cb-missing");
+
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i].IsCoinBase())
return state.DoS(100, error("CheckBlock: more than one coinbase"),
REJECT_INVALID, "bad-cb-multiple");
// Check transactions
+ CTransaction sTx;
+ CTransaction *ptx = NULL;
if ( ASSETCHAINS_CC != 0 ) // CC contracts might refer to transactions in the current block, from a CC spend within the same block and out of order
{
- CValidationState stateDummy; int32_t i,j,rejects=0,lastrejects=0;
+ int32_t i,j,rejects=0,lastrejects=0;
//fprintf(stderr,"put block's tx into mempool\n");
while ( 1 )
{
for (i=0; i<block.vtx.size(); i++)
{
- CTransaction Tx; const CTransaction &tx = (CTransaction)block.vtx[i];
- if (tx.IsCoinBase() != 0 )
- continue;
- else if ( ASSETCHAINS_STAKED != 0 && (i == (block.vtx.size() - 1)) && komodo_isPoS((CBlock *)&block) != 0 )
+ CValidationState state;
+ CTransaction Tx;
+ const CTransaction &tx = (CTransaction)block.vtx[i];
+ if (tx.IsCoinBase() || ((i == (block.vtx.size() - 1)) && (ASSETCHAINS_STAKED && komodo_isPoS((CBlock *)&block) != 0)))
continue;
Tx = tx;
- if ( myAddtomempool(Tx) == false ) // can happen with out of order tx in block on resync
- //if ( AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL) == false )
- rejects++;
+ if ( myAddtomempool(Tx, &state) == false ) // happens with out of order tx in block on resync
+ {
+ //LogPrintf("Rejected by mempool, reason: .%s.\n", state.GetRejectReason().c_str());
+ // take advantage of other checks, but if we were only rejected because it is a valid staking
+ // transaction, sync with wallets and don't mark as a reject
+ if (i == (block.vtx.size() - 1) && ASSETCHAINS_LWMAPOS && block.IsVerusPOSBlock() && state.GetRejectReason() == "staking")
+ {
+ sTx = Tx;
+ ptx = &sTx;
+ } else rejects++;
+ }
}
if ( rejects == 0 || rejects == lastrejects )
{
}
//fprintf(stderr,"done putting block's tx into mempool\n");
}
- BOOST_FOREACH(const CTransaction& tx, block.vtx)
+
+ for (uint32_t i = 0; i < block.vtx.size(); i++)
{
+ const CTransaction& tx = block.vtx[i];
if ( komodo_validate_interest(tx,height == 0 ? komodo_block2height((CBlock *)&block) : height,block.nTime,0) < 0 )
return error("CheckBlock: komodo_validate_interest failed");
if (!CheckTransaction(tx, state, verifier))
LogPrintf("CheckBlockHeader komodo_check_deposit error");
return(false);
}
+
+ if (ptx)
+ {
+ SyncWithWallets(*ptx, &block);
+ }
return true;
}
assert(pindexPrev);
- int nHeight = pindexPrev->nHeight+1;
-
+ int nHeight = pindexPrev->GetHeight()+1;
+
// Check proof of work
if ( (ASSETCHAINS_SYMBOL[0] != 0 || nHeight < 235300 || nHeight > 236000) && block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
{
- cout << block.nBits << " block.nBits vs. calc " << GetNextWorkRequired(pindexPrev, &block, consensusParams) << endl;
+ cout << block.nBits << " block.nBits vs. calc " << GetNextWorkRequired(pindexPrev, &block, consensusParams) <<
+ " for block #" << nHeight << endl;
return state.DoS(100, error("%s: incorrect proof of work", __func__),
- REJECT_INVALID, "bad-diffbits");
+ REJECT_INVALID, "bad-diffbits");
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
+ {
return state.Invalid(error("%s: block's timestamp is too early", __func__),
- REJECT_INVALID, "time-too-old");
-
+ REJECT_INVALID, "time-too-old");
+ }
+
+ // Check that timestamp is not too far in the future
+ if (block.GetBlockTime() > GetAdjustedTime() + consensusParams.nMaxFutureBlockTime)
+ {
+ return state.Invalid(error("%s: block timestamp too far in the future", __func__),
+ REJECT_INVALID, "time-too-new");
+ }
+
if (fCheckpointsEnabled)
{
// Check that the block chain matches the known block chain up to a checkpoint
// Don't accept any forks from the main chain prior to last checkpoint
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints());
int32_t notarized_height;
- if ( nHeight == 1 && chainActive.LastTip() != 0 && chainActive.LastTip()->nHeight > 1 )
+ if ( nHeight == 1 && chainActive.LastTip() != 0 && chainActive.LastTip()->GetHeight() > 1 )
{
CBlockIndex *heightblock = chainActive[nHeight];
if ( heightblock != 0 && heightblock->GetBlockHash() == hash )
}
if ( nHeight != 0 )
{
- if ( pcheckpoint != 0 && nHeight < pcheckpoint->nHeight )
- return state.DoS(1, error("%s: forked chain older than last checkpoint (height %d) vs %d", __func__, nHeight,pcheckpoint->nHeight));
+ if ( pcheckpoint != 0 && nHeight < pcheckpoint->GetHeight() )
+ return state.DoS(1, error("%s: forked chain older than last checkpoint (height %d) vs %d", __func__, nHeight,pcheckpoint->GetHeight()));
if ( komodo_checkpoint(¬arized_height,nHeight,hash) < 0 )
{
CBlockIndex *heightblock = chainActive[nHeight];
bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev)
{
- const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
+ const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->GetHeight() + 1;
const Consensus::Params& consensusParams = Params().GetConsensus();
-
+ bool sapling = NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_SAPLING);
+
+ if (block.nVersion != CBlockHeader::GetVersionByHeight(nHeight))
+ {
+ printf("ERROR: block rejected as wrong version, version should be %d for block height %d\n", CBlockHeader::GetVersionByHeight(nHeight), nHeight);
+ return state.DoS(10, error("%s: block header has incorrect version", __func__), REJECT_INVALID, "incorrect-block-version");
+ }
+
+ if (block.nVersion == CBlockHeader::VERUS_V2 && ASSETCHAINS_ALGO == ASSETCHAINS_VERUSHASH)
+ {
+ std::vector<unsigned char> vch = block.nSolution;
+ if (CVerusSolutionVector(vch).Version() != 1)
+ {
+ return state.DoS(10, error("%s: block header has incorrect version", __func__), REJECT_INVALID, "incorrect-block-version");
+ }
+ }
+
// Check that all transactions are finalized
- BOOST_FOREACH(const CTransaction& tx, block.vtx) {
+ for (uint32_t i = 0; i < block.vtx.size(); i++) {
+ const CTransaction& tx = block.vtx[i];
// Check transaction contextually against consensus rules at block height
if (!ContextualCheckTransaction(tx, state, nHeight, 100)) {
return false; // Failure reason has been set in validation state object
}
-
+
+ // if this is a stake transaction with a stake opreturn, reject it if not staking a block. don't check coinbase or actual stake tx
+ CStakeParams p;
+ if (sapling && i > 0 && i < (block.vtx.size() - 1) && ValidateStakeTransaction(tx, p, false))
+ {
+ return state.DoS(10, error("%s: attempt to submit block with staking transaction that is not staking", __func__), REJECT_INVALID, "bad-txns-staking");
+ }
+
int nLockTimeFlags = 0;
int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
? pindexPrev->GetMedianTimePast()
return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height");
}
}
-
return true;
}
const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
- // Check for duplicate
+ // Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = NULL;
if (miSelf != mapBlockIndex.end())
{
// Block header is already known.
- if ( (pindex= miSelf->second) == 0 )
+ if ( (pindex = miSelf->second) == 0 )
miSelf->second = pindex = AddToBlockIndex(block);
if (ppindex)
*ppindex = pindex;
// fprintf(stderr,"accepthdr %s already known but no pindex\n",hash.ToString().c_str());
return true;
}
- if (!CheckBlockHeader(futureblockp,*ppindex!=0?(*ppindex)->nHeight:0,*ppindex, block, state,0))
+ if (!CheckBlockHeader(futureblockp,*ppindex!=0?(*ppindex)->GetHeight():0,*ppindex, block, state,0))
{
if ( *futureblockp == 0 )
{
AssertLockHeld(cs_main);
CBlockIndex *&pindex = *ppindex;
- if (!AcceptBlockHeader(futureblockp,block, state, &pindex))
+ if (!AcceptBlockHeader(futureblockp, block, state, &pindex))
{
if ( *futureblockp == 0 )
{
}
if ( pindex == 0 )
{
- LogPrintf("AcceptBlock null pindex error\n");
+ LogPrintf("AcceptBlock null pindex\n");
+ *futureblockp = true;
return false;
}
//fprintf(stderr,"acceptblockheader passed\n");
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
- bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true);
+ bool fHasMoreWork = (chainActive.Tip() ? pindex->chainPower > chainActive.Tip()->chainPower : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
- bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + BLOCK_DOWNLOAD_WINDOW)); //MIN_BLOCKS_TO_KEEP));
+ bool fTooFarAhead = (pindex->GetHeight() > int(chainActive.Height() + BLOCK_DOWNLOAD_WINDOW)); //MIN_BLOCKS_TO_KEEP));
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
// See method docstring for why this is always disabled
auto verifier = libzcash::ProofVerifier::Disabled();
- if ((!CheckBlock(futureblockp,pindex->nHeight,pindex,block, state, verifier,0)) || !ContextualCheckBlock(block, state, pindex->pprev))
+ if ((!CheckBlock(futureblockp,pindex->GetHeight(),pindex,block, state, verifier,0)) || !ContextualCheckBlock(block, state, pindex->pprev))
{
if ( *futureblockp == 0 )
{
}
}
- int nHeight = pindex->nHeight;
+ int nHeight = pindex->GetHeight();
// Write block to history file
try {
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
void komodo_currentheight_set(int32_t height);
-CBlockIndex *komodo_ensure(CBlock *pblock,uint256 hash)
+CBlockIndex *komodo_ensure(CBlock *pblock, uint256 hash)
{
CBlockIndex *pindex = 0;
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
if ( miSelf != mapBlockIndex.end() )
{
- if ( (pindex= miSelf->second) == 0 ) // create pindex so first Accept block doesnt fail
+ if ( (pindex = miSelf->second) == 0 ) // create pindex so first Accept block doesnt fail
{
miSelf->second = AddToBlockIndex(*pblock);
//fprintf(stderr,"Block header %s is already known, but without pindex -> ensured %p\n",hash.ToString().c_str(),miSelf->second);
return(pindex);
}
-CBlockIndex *oldkomodo_ensure(CBlock *pblock,uint256 hash)
+CBlockIndex *oldkomodo_ensure(CBlock *pblock, uint256 hash)
{
CBlockIndex *pindex=0,*previndex=0;
- if ( (pindex= mapBlockIndex[hash]) == 0 )
+ if ( (pindex = mapBlockIndex[hash]) == 0 )
{
pindex = new CBlockIndex();
if (!pindex)
bool checked; uint256 hash; int32_t futureblock=0;
auto verifier = libzcash::ProofVerifier::Disabled();
hash = pblock->GetHash();
- //fprintf(stderr,"ProcessBlock %d\n",(int32_t)chainActive.LastTip()->nHeight);
+ //fprintf(stderr,"ProcessBlock %d\n",(int32_t)chainActive.LastTip()->GetHeight());
{
LOCK(cs_main);
if ( chainActive.LastTip() != 0 )
- komodo_currentheight_set(chainActive.LastTip()->nHeight);
+ komodo_currentheight_set(chainActive.LastTip()->GetHeight());
checked = CheckBlock(&futureblock,height!=0?height:komodo_block2height(pblock),0,*pblock, state, verifier,0);
bool fRequested = MarkBlockAsReceived(hash);
fRequested |= fForceProcessing;
}
// Store to disk
CBlockIndex *pindex = NULL;
- if ( 1 )
- {
- // without the komodo_ensure call, it is quite possible to get a non-error but null pindex returned from AcceptBlockHeader. In a 2 node network, it will be a long time before that block is reprocessed. Even though restarting makes it rescan, it seems much better to keep the nodes in sync
- komodo_ensure(pblock,hash);
- }
+
bool ret = AcceptBlock(&futureblock,*pblock, state, &pindex, fRequested, dbp);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
if (futureblock == 0 && !ActivateBestChain(state, pblock))
return error("%s: ActivateBestChain failed", __func__);
- //fprintf(stderr,"finished ProcessBlock %d\n",(int32_t)chainActive.LastTip()->nHeight);
+ //fprintf(stderr,"finished ProcessBlock %d\n",(int32_t)chainActive.LastTip()->GetHeight());
+
+ // when we succeed here, we prune all cheat candidates in the cheat list to 250 blocks ago, as they should be used or not
+ // useful by then
+ if ((height - 250) > 1)
+ cheatList.Prune(height - 200);
return true;
}
CCoinsViewCache viewNew(pcoinsTip);
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
- indexDummy.nHeight = pindexPrev->nHeight + 1;
+ indexDummy.SetHeight(pindexPrev->GetHeight() + 1);
// JoinSplit proofs are verified in ConnectBlock
auto verifier = libzcash::ProofVerifier::Disabled();
// NOTE: CheckBlockHeader is called by CheckBlock
return false;
}
int32_t futureblock;
- if (!CheckBlock(&futureblock,indexDummy.nHeight,0,block, state, verifier, fCheckPOW, fCheckMerkleRoot))
+ if (!CheckBlock(&futureblock,indexDummy.GetHeight(),0,block, state, verifier, fCheckPOW, fCheckMerkleRoot))
{
//fprintf(stderr,"TestBlockValidity failure B checkPOW.%d\n",fCheckPOW);
return false;
{
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
CBlockIndex* pindex = it->second;
- if (pindex->nFile == fileNumber) {
+ if (pindex && pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
pindex->nFile = 0;
if (chainActive.Tip() == NULL || nPruneTarget == 0) {
return;
}
- if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) {
+ if (chainActive.Tip()->GetHeight() <= Params().PruneAfterHeight()) {
return;
}
- unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
+ unsigned int nLastBlockWeCanPrune = chainActive.Tip()->GetHeight() - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files
// So we should leave a buffer under our target to account for another allocation
// Return existing
BlockMap::iterator mi = mapBlockIndex.find(hash);
- if (mi != mapBlockIndex.end())
+ if (mi != mapBlockIndex.end() && mi->second != NULL)
return (*mi).second;
// Create new
LogPrintf("%s: loaded guts\n", __func__);
boost::this_thread::interruption_point();
- // Calculate nChainWork
+ // Calculate chainPower
vector<pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
- vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
- //komodo_pindex_init(pindex,(int32_t)pindex->nHeight);
+ vSortedByHeight.push_back(make_pair(pindex->GetHeight(), pindex));
+ //komodo_pindex_init(pindex,(int32_t)pindex->GetHeight());
}
//fprintf(stderr,"load blockindexDB paired %u\n",(uint32_t)time(NULL));
sort(vSortedByHeight.begin(), vSortedByHeight.end());
BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
{
CBlockIndex* pindex = item.second;
- pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ pindex->chainPower = (pindex->pprev ? CChainPower(pindex) + pindex->pprev->chainPower : CChainPower(pindex)) + GetBlockProof(*pindex);
// We can link the chain of blocks for which we've received transactions at some point.
// Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
} else {
pindex->nChainSproutValue = boost::none;
}
+ if (pindex->pprev->nChainSaplingValue) {
+ pindex->nChainSaplingValue = *pindex->pprev->nChainSaplingValue + pindex->nSaplingValue;
+ } else {
+ pindex->nChainSaplingValue = boost::none;
+ }
} else {
pindex->nChainTx = 0;
pindex->nChainSproutValue = boost::none;
+ pindex->nChainSaplingValue = boost::none;
mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
}
} else {
pindex->nChainTx = pindex->nTx;
pindex->nChainSproutValue = pindex->nSproutValue;
+ pindex->nChainSaplingValue = pindex->nSaplingValue;
}
}
// Construct in-memory chain of branch IDs.
}
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
setBlockIndexCandidates.insert(pindex);
- if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
+ if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->chainPower > pindexBestInvalid->chainPower))
pindexBestInvalid = pindex;
if (pindex->pprev)
pindex->BuildSkip();
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
pindexBestHeader = pindex;
- //komodo_pindex_init(pindex,(int32_t)pindex->nHeight);
+ //komodo_pindex_init(pindex,(int32_t)pindex->GetHeight());
}
//fprintf(stderr,"load blockindexDB chained %u\n",(uint32_t)time(NULL));
if (pindex->nStatus & BLOCK_HAVE_DATA) {
setBlkDataFiles.insert(pindex->nFile);
}
- //komodo_pindex_init(pindex,(int32_t)pindex->nHeight);
+ //komodo_pindex_init(pindex,(int32_t)pindex->GetHeight());
}
//fprintf(stderr,"load blockindexDB %u\n",(uint32_t)time(NULL));
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
CBlockIndex* pindex = item.second;
// - This relationship will always be true even if pprev has multiple
- // children, because hashAnchor is technically a property of pprev,
+ // children, because hashSproutAnchor is technically a property of pprev,
// not its children.
// - This will miss chain tips; we handle the best tip below, and other
// tips will be handled by ConnectTip during a re-org.
if (pindex->pprev) {
- pindex->pprev->hashAnchorEnd = pindex->hashAnchor;
+ pindex->pprev->hashFinalSproutRoot = pindex->hashSproutAnchor;
}
- //komodo_pindex_init(pindex,(int32_t)pindex->nHeight);
+ //komodo_pindex_init(pindex,(int32_t)pindex->GetHeight());
}
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end())
return true;
+
chainActive.SetTip(it->second);
- // Set hashAnchorEnd for the end of best chain
- it->second->hashAnchorEnd = pcoinsTip->GetBestAnchor();
-
+
+ // Set hashFinalSproutRoot for the end of best chain
+ it->second->hashFinalSproutRoot = pcoinsTip->GetBestAnchor(SPROUT);
+
PruneBlockIndexCandidates();
double progress;
for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
- uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))));
- if (pindex->nHeight < chainActive.Height()-nCheckDepth)
+ uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->GetHeight())) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))));
+ if (pindex->GetHeight() < chainActive.Height()-nCheckDepth)
break;
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex,0))
- return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString());
// check level 1: verify block validity
int32_t futureblock;
- if (nCheckLevel >= 1 && !CheckBlock(&futureblock,pindex->nHeight,pindex,block, state, verifier,0) )
- return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ if (nCheckLevel >= 1 && !CheckBlock(&futureblock,pindex->GetHeight(),pindex,block, state, verifier,0) )
+ return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString());
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (!pos.IsNull()) {
if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash()))
- return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString());
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
bool fClean = true;
if (!DisconnectBlock(block, state, pindex, coins, &fClean))
- return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString());
pindexState = pindex->pprev;
if (!fClean) {
nGoodTransactions = 0;
}
//fprintf(stderr,"end VerifyDB %u\n",(uint32_t)time(NULL));
if (pindexFailure)
- return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
+ return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->GetHeight() + 1, nGoodTransactions);
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
while (pindex != chainActive.Tip()) {
boost::this_thread::interruption_point();
- uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))));
+ uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->GetHeight())) / (double)nCheckDepth * 50))));
pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex,0))
- return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString());
if (!ConnectBlock(block, state, pindex, coins,false, true))
- return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString());
}
}
- LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions);
+ LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->GetHeight(), nGoodTransactions);
return true;
}
-bool RewindBlockIndex(const CChainParams& params)
+bool RewindBlockIndex(const CChainParams& params, bool& clearWitnessCaches)
{
LOCK(cs_main);
auto sufficientlyValidated = [¶ms](const CBlockIndex* pindex) {
auto consensus = params.GetConsensus();
bool fFlagSet = pindex->nStatus & BLOCK_ACTIVATES_UPGRADE;
- bool fFlagExpected = IsActivationHeightForAnyUpgrade(pindex->nHeight, consensus);
+ bool fFlagExpected = IsActivationHeightForAnyUpgrade(pindex->GetHeight(), consensus);
return fFlagSet == fFlagExpected &&
pindex->nCachedBranchId &&
- *pindex->nCachedBranchId == CurrentEpochBranchId(pindex->nHeight, consensus);
+ *pindex->nCachedBranchId == CurrentEpochBranchId(pindex->GetHeight(), consensus);
};
int nHeight = 1;
// nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
auto rewindLength = chainActive.Height() - nHeight;
- if (rewindLength > 0 && rewindLength > MAX_REORG_LENGTH) {
+ if (rewindLength > 0 && rewindLength > MAX_REORG_LENGTH)
+ {
auto pindexOldTip = chainActive.Tip();
auto pindexRewind = chainActive[nHeight - 1];
auto msg = strprintf(_(
), rewindLength, MAX_REORG_LENGTH) + "\n\n" +
_("Rewind details") + ":\n" +
"- " + strprintf(_("Current tip: %s, height %d"),
- pindexOldTip->phashBlock->GetHex(), pindexOldTip->nHeight) + "\n" +
+ pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight()) + "\n" +
"- " + strprintf(_("Rewinding to: %s, height %d"),
- pindexRewind->phashBlock->GetHex(), pindexRewind->nHeight) + "\n\n" +
+ pindexRewind->phashBlock->GetHex(), pindexRewind->GetHeight()) + "\n\n" +
_("Please help, human!");
LogPrintf("*** %s\n", msg);
uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR);
break;
}
if (!DisconnectTip(state, true)) {
- return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight);
+ return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->GetHeight());
}
// Occasionally flush state to disk.
if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC))
}
// Reduce validity flag and have-data flags.
+
+ // Collect blocks to be removed (blocks in mapBlockIndex must be at least BLOCK_VALID_TREE).
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
+ std::vector<const CBlockIndex*> vBlocks;
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
CBlockIndex* pindexIter = it->second;
// this block or some successor doesn't HAVE_DATA, so we were unable to
// rewind all the way. Blocks remaining on chainActive at this point
// must not have their validity reduced.
- if (!sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
+ if (pindexIter && !sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
// Reduce validity
pindexIter->nStatus =
std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) |
pindexIter->nChainTx = 0;
pindexIter->nSproutValue = boost::none;
pindexIter->nChainSproutValue = boost::none;
+ pindexIter->nSaplingValue = 0;
+ pindexIter->nChainSaplingValue = boost::none;
pindexIter->nSequenceId = 0;
+
// Make sure it gets written
+ /* corresponds to commented out block below as an alternative to setDirtyBlockIndex
+ vBlocks.push_back(pindexIter);
+ */
setDirtyBlockIndex.insert(pindexIter);
if (pindexIter == pindexBestInvalid)
{
}
}
+ /*
+ // Set pindexBestHeader to the current chain tip
+ // (since we are about to delete the block it is pointing to)
+ pindexBestHeader = chainActive.Tip();
+
+ // Erase block indices on-disk
+ if (!pblocktree->EraseBatchSync(vBlocks)) {
+ return AbortNode(state, "Failed to erase from block index database");
+ }
+
+ // Erase block indices in-memory
+ for (auto pindex : vBlocks) {
+ auto ret = mapBlockIndex.find(*pindex->phashBlock);
+ if (ret != mapBlockIndex.end()) {
+ mapBlockIndex.erase(ret);
+ delete pindex;
+ }
+ }
+ */
+
PruneBlockIndexCandidates();
CheckBlockIndex();
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
-
// Check whether we're already initialized
if (chainActive.Genesis() != NULL)
+ {
return true;
-
+ }
// Use the provided setting for -txindex in the new database
fTxIndex = GetBoolArg("-txindex", true);
pblocktree->WriteFlag("txindex", fTxIndex);
// Use the provided setting for -timestampindex in the new database
fTimestampIndex = GetBoolArg("-timestampindex", DEFAULT_TIMESTAMPINDEX);
pblocktree->WriteFlag("timestampindex", fTimestampIndex);
-
+
fSpentIndex = GetBoolArg("-spentindex", DEFAULT_SPENTINDEX);
pblocktree->WriteFlag("spentindex", fSpentIndex);
+ fprintf(stderr,"fAddressIndex.%d/%d fSpentIndex.%d/%d\n",fAddressIndex,DEFAULT_ADDRESSINDEX,fSpentIndex,DEFAULT_SPENTINDEX);
LogPrintf("Initializing databases...\n");
// Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
nLoaded++;
if (state.IsError())
break;
- } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) {
- LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
+ } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->GetHeight() % 1000 == 0) {
+ LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->GetHeight());
}
// Recursively process earlier encountered successors of this block
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
- if (ReadBlockFromDisk(mapBlockIndex[hash]!=0?mapBlockIndex[hash]->nHeight:0,block, it->second,1))
+ if (ReadBlockFromDisk(mapBlockIndex.count(hash)!=0?mapBlockIndex[hash]->GetHeight():0,block, it->second,1))
{
LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
head.ToString());
// All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0));
- assert(pindex->nHeight == nHeight); // nHeight must be consistent.
- assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
- assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
+ assert(pindex->GetHeight() == nHeight); // nHeight must be consistent.
+ assert(pindex->pprev == NULL || pindex->chainPower >= pindex->pprev->chainPower); // For every block except the genesis block, the chainwork must be larger than the parent's.
+ assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->GetHeight() < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
-
-
-
-
+
+ //printf("netmsg: %s\n", strCommand.c_str());
+
if (strCommand == "version")
{
// Each connection can only send one version message
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
- vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
- if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
+ int nVersion; // use temporary for version, don't set version number until validated as connected
+ vRecv >> nVersion >> pfrom->nServices >> nTime >> addrMe;
+ if (nVersion == 10300)
+ nVersion = 300;
+
+ if (nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
pfrom->fDisconnect = true;
return false;
}
-
- // When Overwinter is active, reject incoming connections from non-Overwinter nodes
+
+ // Reject incoming connections from nodes that don't know about the current epoch
const Consensus::Params& params = Params().GetConsensus();
- if (NetworkUpgradeActive(GetHeight(), params, Consensus::UPGRADE_OVERWINTER)
- && pfrom->nVersion < params.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion)
+ auto currentEpoch = CurrentEpoch(GetHeight(), params);
+ if (nVersion < params.vUpgrades[currentEpoch].nProtocolVersion)
{
- LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
+ LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, nVersion);
pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
- strprintf("Version must be %d or greater",
- params.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion));
+ strprintf("Version must be %d or greater",
+ params.vUpgrades[currentEpoch].nProtocolVersion));
pfrom->fDisconnect = true;
return false;
}
- if (pfrom->nVersion == 10300)
- pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty()) {
- vRecv >> LIMITED_STRING(pfrom->strSubVer, 256);
+ vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH);
pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
}
if (!vRecv.empty())
pfrom->fDisconnect = true;
return true;
}
+
+ pfrom->nVersion = nVersion;
pfrom->addrLocal = addrMe;
if (pfrom->fInbound && addrMe.IsRoutable())
// Disconnect existing peer connection when:
// 1. The version message has been received
- // 2. Overwinter is active
- // 3. Peer version is pre-Overwinter
- else if (NetworkUpgradeActive(GetHeight(), chainparams.GetConsensus(), Consensus::UPGRADE_OVERWINTER)
- && (pfrom->nVersion < chainparams.GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion))
+ // 2. Peer version is below the minimum version for the current epoch
+ else if (pfrom->nVersion < chainparams.GetConsensus().vUpgrades[
+ CurrentEpoch(GetHeight(), chainparams.GetConsensus())].nProtocolVersion)
{
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
- strprintf("Version must be %d or greater",
- chainparams.GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion));
+ strprintf("Version must be %d or greater",
+ chainparams.GetConsensus().vUpgrades[
+ CurrentEpoch(GetHeight(), chainparams.GetConsensus())].nProtocolVersion));
pfrom->fDisconnect = true;
return false;
}
// later (within the same cs_main lock, though).
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
}
- LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
+ LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->GetHeight(), inv.hash.ToString(), pfrom->id);
}
}
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
- LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
+ LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->GetHeight() : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
- LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrint("net", " getblocks stopping at %d %s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
{
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
- LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
+ LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
if (pindex)
pindex = chainActive.Next(pindex);
}
-
- // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
- vector<CBlock> vHeaders;
+
+ // we must use CNetworkBlockHeader, as CBlockHeader won't include the 0x00 nTx count at the end for compatibility
+ vector<CNetworkBlockHeader> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
- LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id);
- //if ( pfrom->lasthdrsreq >= chainActive.Height()-MAX_HEADERS_RESULTS || pfrom->lasthdrsreq != (int32_t)(pindex ? pindex->nHeight : -1) )// no need to ever suppress this
+ LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->GetHeight() : -1), hashStop.ToString(), pfrom->id);
+ //if ( pfrom->lasthdrsreq >= chainActive.Height()-MAX_HEADERS_RESULTS || pfrom->lasthdrsreq != (int32_t)(pindex ? pindex->GetHeight() : -1) )// no need to ever suppress this
{
- pfrom->lasthdrsreq = (int32_t)(pindex ? pindex->nHeight : -1);
+ pfrom->lasthdrsreq = (int32_t)(pindex ? pindex->GetHeight() : -1);
for (; pindex; pindex = chainActive.Next(pindex))
{
+ CBlockHeader h = pindex->GetBlockHeader();
+ //printf("size.%i, solution size.%i\n", (int)sizeof(h), (int)h.nSolution.size());
+ //printf("hash.%s prevhash.%s nonce.%s\n", h.GetHash().ToString().c_str(), h.hashPrevBlock.ToString().c_str(), h.nNonce.ToString().c_str());
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
{
static uint32_t counter;
if ( counter++ < 3 )
- fprintf(stderr,"you can ignore redundant getheaders from peer.%d %d prev.%d\n",(int32_t)pfrom->id,(int32_t)(pindex ? pindex->nHeight : -1),pfrom->lasthdrsreq);
+ fprintf(stderr,"you can ignore redundant getheaders from peer.%d %d prev.%d\n",(int32_t)pfrom->id,(int32_t)(pindex ? pindex->GetHeight() : -1),pfrom->lasthdrsreq);
}*/
}
pfrom->setAskFor.erase(inv.hash);
mapAlreadyAskedFor.erase(inv);
-
+
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs))
{
mempool.check(pcoinsTip);
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
- // TODO: currently, prohibit joinsplits from entering mapOrphans
- else if (fMissingInputs && tx.vjoinsplit.size() == 0)
+ // TODO: currently, prohibit joinsplits and shielded spends/outputs from entering mapOrphans
+ else if (fMissingInputs &&
+ tx.vjoinsplit.empty() &&
+ tx.vShieldedSpend.empty() &&
+ tx.vShieldedOutput.empty())
{
+ // valid stake transactions end up in the orphan tx bin
AddOrphanTx(tx, pfrom->GetId());
-
+
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
Misbehaving(pfrom->GetId(), nDoS);
}
}
-
-
+
else if (strCommand == "headers" && !fImporting && !fReindex) // Ignore headers received while importing
{
std::vector<CBlockHeader> headers;
CBlockIndex *pindexLast = NULL;
BOOST_FOREACH(const CBlockHeader& header, headers) {
+ //printf("size.%i, solution size.%i\n", (int)sizeof(header), (int)header.nSolution.size());
+ //printf("hash.%s prevhash.%s nonce.%s\n", header.GetHash().ToString().c_str(), header.hashPrevBlock.ToString().c_str(), header.nNonce.ToString().c_str());
+
CValidationState state;
if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
Misbehaving(pfrom->GetId(), 20);
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
- if ( pfrom->sendhdrsreq >= chainActive.Height()-MAX_HEADERS_RESULTS || pindexLast->nHeight != pfrom->sendhdrsreq )
+ if ( pfrom->sendhdrsreq >= chainActive.Height()-MAX_HEADERS_RESULTS || pindexLast->GetHeight() != pfrom->sendhdrsreq )
{
- pfrom->sendhdrsreq = (int32_t)pindexLast->nHeight;
- LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
+ pfrom->sendhdrsreq = (int32_t)pindexLast->GetHeight();
+ LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->GetHeight(), pfrom->id, pfrom->nStartingHeight);
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256());
}
}
vector<CInv> vInv;
BOOST_FOREACH(uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
- CTransaction tx;
- bool fInMemPool = mempool.lookup(hash, tx);
- if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
- if ((pfrom->pfilter && pfrom->pfilter->IsRelevantAndUpdate(tx)) ||
- (!pfrom->pfilter))
- vInv.push_back(inv);
+ if (pfrom->pfilter) {
+ CTransaction tx;
+ bool fInMemPool = mempool.lookup(hash, tx);
+ if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
+ if (!pfrom->pfilter->IsRelevantAndUpdate(tx)) continue;
+ }
+ vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pfrom->PushMessage("inv", vInv);
vInv.clear();
}
}
}
-
-
+
+ else if (!(nLocalServices & NODE_BLOOM) &&
+ (strCommand == "filterload" ||
+ strCommand == "filteradd"))
+ {
+ if (pfrom->nVersion >= NO_BLOOM_VERSION) {
+ Misbehaving(pfrom->GetId(), 100);
+ return false;
+ } else if (GetBoolArg("-enforcenodebloom", false)) {
+ pfrom->fDisconnect = true;
+ return false;
+ }
+ }
+
+
else if (strCommand == "filterload")
{
CBloomFilter filter;
else if (strCommand == "filterclear")
{
LOCK(pfrom->cs_filter);
- delete pfrom->pfilter;
- pfrom->pfilter = new CBloomFilter();
+ if (nLocalServices & NODE_BLOOM) {
+ delete pfrom->pfilter;
+ pfrom->pfilter = new CBloomFilter();
+ }
pfrom->fRelayTxes = true;
}
state.fSyncStarted = true;
nSyncStarted++;
CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader;
- LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
+ LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->GetHeight(), pto->id, pto->nStartingHeight);
pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256());
}
}
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
- pindex->nHeight, pto->id);
+ pindex->GetHeight(), pto->id);
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
-class CMainCleanup
+static class CMainCleanup
{
public:
CMainCleanup() {}
bool isOverwintered = NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_OVERWINTER);
if (isOverwintered) {
mtx.fOverwintered = true;
- mtx.nVersionGroupId = OVERWINTER_VERSION_GROUP_ID;
- mtx.nVersion = 3;
- // Expiry height is not set. Only fields required for a parser to treat as a valid Overwinter V3 tx.
-
- // TODO: In future, when moving from Overwinter to Sapling, it will be useful
- // to set the expiry height to: min(activation_height - 1, default_expiry_height)
+ mtx.nExpiryHeight = nHeight + expiryDelta;
+
+ if (NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_SAPLING)) {
+ mtx.nVersionGroupId = SAPLING_VERSION_GROUP_ID;
+ mtx.nVersion = SAPLING_TX_VERSION;
+ } else {
+ mtx.nVersionGroupId = OVERWINTER_VERSION_GROUP_ID;
+ mtx.nVersion = OVERWINTER_TX_VERSION;
+ mtx.nExpiryHeight = std::min(
+ mtx.nExpiryHeight,
+ static_cast<uint32_t>(consensusParams.vUpgrades[Consensus::UPGRADE_SAPLING].nActivationHeight - 1));
+ }
}
return mtx;
}