mirror of
https://github.com/bitcoin/bitcoin.git
synced 2024-11-19 01:42:58 +01:00
doc: fix typos
As found by lint-spelling.py using codespell 2.2.6.
This commit is contained in:
parent
0387ca0774
commit
43de4d3630
@ -52,5 +52,5 @@ in order.
|
||||
### Cache
|
||||
|
||||
In order to avoid rebuilding all dependencies for each build, the binaries are
|
||||
cached and re-used when possible. Changes in the dependency-generator will
|
||||
cached and reused when possible. Changes in the dependency-generator will
|
||||
trigger cache-invalidation and rebuilds as necessary.
|
||||
|
@ -28,7 +28,7 @@ etc), and as well as a hash of the same data for each recursive dependency. If
|
||||
any portion of a package's build recipe changes, it will be rebuilt as well as
|
||||
any other package that depends on it. If any of the main makefiles (Makefile,
|
||||
funcs.mk, etc) are changed, all packages will be rebuilt. After building, the
|
||||
results are cached into a tarball that can be re-used and distributed.
|
||||
results are cached into a tarball that can be reused and distributed.
|
||||
|
||||
### Package build results are (relatively) deterministic.
|
||||
|
||||
|
@ -172,7 +172,7 @@ public:
|
||||
/**
|
||||
* Returns an information-location pair for all addresses in the selected addrman table.
|
||||
* If an address appears multiple times in the new table, an information-location pair
|
||||
* is returned for each occurence. Addresses only ever appear once in the tried table.
|
||||
* is returned for each occurrence. Addresses only ever appear once in the tried table.
|
||||
*
|
||||
* @param[in] from_tried Selects which table to return entries from.
|
||||
*
|
||||
|
@ -2447,7 +2447,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
|
||||
// of transactions relevant to them, without having to download the
|
||||
// entire memory pool.
|
||||
// Also, other nodes can use these messages to automatically request a
|
||||
// transaction from some other peer that annnounced it, and stop
|
||||
// transaction from some other peer that announced it, and stop
|
||||
// waiting for us to respond.
|
||||
// In normal operation, we often send NOTFOUND messages for parents of
|
||||
// transactions that we relay; if a peer is missing a parent, they may
|
||||
@ -3592,7 +3592,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
return;
|
||||
}
|
||||
|
||||
// Log succesful connections unconditionally for outbound, but not for inbound as those
|
||||
// Log successful connections unconditionally for outbound, but not for inbound as those
|
||||
// can be triggered by an attacker at high rate.
|
||||
if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) {
|
||||
const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
|
||||
|
@ -98,7 +98,7 @@ static constexpr unsigned int MANDATORY_SCRIPT_VERIFY_FLAGS{SCRIPT_VERIFY_P2SH |
|
||||
* Standard script verification flags that standard transactions will comply
|
||||
* with. However we do not ban/disconnect nodes that forward txs violating
|
||||
* the additional (non-mandatory) rules here, to improve forwards and
|
||||
* backwards compatability.
|
||||
* backwards compatibility.
|
||||
*/
|
||||
static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS{MANDATORY_SCRIPT_VERIFY_FLAGS |
|
||||
SCRIPT_VERIFY_STRICTENC |
|
||||
|
@ -219,7 +219,7 @@ public:
|
||||
|
||||
// If a status update is needed (blocks came in since last check),
|
||||
// try to update the status of this transaction from the wallet.
|
||||
// Otherwise, simply re-use the cached status.
|
||||
// Otherwise, simply reuse the cached status.
|
||||
interfaces::WalletTxStatus wtx;
|
||||
int numBlocks;
|
||||
int64_t block_time;
|
||||
|
@ -409,7 +409,7 @@ static inline JSONRPCRequest transformNamedArguments(const JSONRPCRequest& in, c
|
||||
}
|
||||
// Process expected parameters. If any parameters were left unspecified in
|
||||
// the request before a parameter that was specified, null values need to be
|
||||
// inserted at the unspecifed parameter positions, and the "hole" variable
|
||||
// inserted at the unspecified parameter positions, and the "hole" variable
|
||||
// below tracks the number of null values that need to be inserted.
|
||||
// The "initial_hole_size" variable stores the size of the initial hole,
|
||||
// i.e. how many initial positional arguments were left unspecified. This is
|
||||
|
@ -1089,7 +1089,7 @@ UniValue RPCResult::MatchesType(const UniValue& result) const
|
||||
if (UniValue::VARR == result.getType()) {
|
||||
UniValue errors(UniValue::VOBJ);
|
||||
for (size_t i{0}; i < result.get_array().size(); ++i) {
|
||||
// If there are more results than documented, re-use the last doc_inner.
|
||||
// If there are more results than documented, reuse the last doc_inner.
|
||||
const RPCResult& doc_inner{m_inner.at(std::min(m_inner.size() - 1, i))};
|
||||
UniValue match{doc_inner.MatchesType(result.get_array()[i])};
|
||||
if (!match.isTrue()) errors.pushKV(strprintf("%d", i), match);
|
||||
|
@ -407,7 +407,7 @@ public:
|
||||
* RPC method implementations. The helper internally checks whether the
|
||||
* user-passed argument isNull() and parses (from JSON) and returns the
|
||||
* user-passed argument, or the default value derived from the RPCArg
|
||||
* documention, or a falsy value if no default was given.
|
||||
* documentation, or a falsy value if no default was given.
|
||||
*
|
||||
* Use Arg<Type>(i) to get the argument or its default value. Otherwise,
|
||||
* use MaybeArg<Type>(i) to get the optional argument or a falsy value.
|
||||
|
@ -158,7 +158,7 @@ FuzzedDataProvider::ConsumeRandomLengthString(size_t max_length) {
|
||||
// picking its contents.
|
||||
std::string result;
|
||||
|
||||
// Reserve the anticipated capaticity to prevent several reallocations.
|
||||
// Reserve the anticipated capacity to prevent several reallocations.
|
||||
result.reserve(std::min(max_length, remaining_bytes_));
|
||||
for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
|
||||
char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
|
||||
|
@ -112,7 +112,7 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
|
||||
}
|
||||
SignatureData empty;
|
||||
BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL, empty));
|
||||
// Re-use same signature for other inputs
|
||||
// Reuse same signature for other inputs
|
||||
// (they don't have to be valid for this test)
|
||||
for (unsigned int j = 1; j < tx.vin.size(); j++)
|
||||
tx.vin[j].scriptSig = tx.vin[0].scriptSig;
|
||||
|
@ -1112,7 +1112,7 @@ BOOST_AUTO_TEST_CASE(script_CHECKMULTISIG23)
|
||||
BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_OK, ScriptErrorString(err));
|
||||
|
||||
keys.clear();
|
||||
keys.push_back(key2); keys.push_back(key2); // Can't re-use sig
|
||||
keys.push_back(key2); keys.push_back(key2); // Can't reuse sig
|
||||
CScript badsig1 = sign_multisig(scriptPubKey23, keys, CTransaction(txTo23));
|
||||
BOOST_CHECK(!VerifyScript(badsig1, scriptPubKey23, nullptr, gFlags, MutableTransactionSignatureChecker(&txTo23, 0, txFrom23.vout[0].nValue, MissingDataBehavior::ASSERT_FAIL), &err));
|
||||
BOOST_CHECK_MESSAGE(err == SCRIPT_ERR_EVAL_FALSE, ScriptErrorString(err));
|
||||
|
@ -424,7 +424,7 @@ BOOST_AUTO_TEST_CASE(versionbits_computeblockversion)
|
||||
uint32_t chain_all_vbits{0};
|
||||
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++i) {
|
||||
const auto dep = static_cast<Consensus::DeploymentPos>(i);
|
||||
// Check that no bits are re-used (within the same chain). This is
|
||||
// Check that no bits are reused (within the same chain). This is
|
||||
// disallowed because the transition to FAILED (on timeout) does
|
||||
// not take precedence over STARTED/LOCKED_IN. So all softforks on
|
||||
// the same bit might overlap, even when non-overlapping start-end
|
||||
|
@ -251,7 +251,7 @@ std::map<std::string,std::string> ParseTorReplyMapping(const std::string &s)
|
||||
/**
|
||||
* Unescape value. Per https://spec.torproject.org/control-spec section 2.1.1:
|
||||
*
|
||||
* For future-proofing, controller implementors MAY use the following
|
||||
* For future-proofing, controller implementers MAY use the following
|
||||
* rules to be compatible with buggy Tor implementations and with
|
||||
* future ones that implement the spec as intended:
|
||||
*
|
||||
|
@ -477,7 +477,7 @@ public:
|
||||
void removeRecursive(const CTransaction& tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
|
||||
/** After reorg, filter the entries that would no longer be valid in the next block, and update
|
||||
* the entries' cached LockPoints if needed. The mempool does not have any knowledge of
|
||||
* consensus rules. It just appplies the callable function and removes the ones for which it
|
||||
* consensus rules. It just applies the callable function and removes the ones for which it
|
||||
* returns true.
|
||||
* @param[in] filter_final_and_mature Predicate that checks the relevant validation rules
|
||||
* and updates an entry's LockPoints.
|
||||
|
@ -170,7 +170,7 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
|
||||
return Result::INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
// We are going to modify coin control later, copy to re-use
|
||||
// We are going to modify coin control later, copy to reuse
|
||||
CCoinControl new_coin_control(coin_control);
|
||||
|
||||
LOCK(wallet.cs_wallet);
|
||||
|
@ -1257,7 +1257,7 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
|
||||
}
|
||||
|
||||
// Before we return success, we assume any change key will be used to prevent
|
||||
// accidental re-use.
|
||||
// accidental reuse.
|
||||
reservedest.KeepDestination();
|
||||
|
||||
wallet.WalletLogPrintf("Fee Calculation: Fee:%d Bytes:%u Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
|
||||
@ -1299,7 +1299,7 @@ util::Result<CreatedTransactionResult> CreateTransaction(
|
||||
CCoinControl tmp_cc = coin_control;
|
||||
tmp_cc.m_avoid_partial_spends = true;
|
||||
|
||||
// Re-use the change destination from the first creation attempt to avoid skipping BIP44 indexes
|
||||
// Reuse the change destination from the first creation attempt to avoid skipping BIP44 indexes
|
||||
const int ungrouped_change_pos = txr_ungrouped.change_pos;
|
||||
if (ungrouped_change_pos != -1) {
|
||||
ExtractDestination(txr_ungrouped.tx->vout[ungrouped_change_pos].scriptPubKey, tmp_cc.destChange);
|
||||
|
@ -178,7 +178,7 @@ static const std::map<std::string,WalletFlags> WALLET_FLAG_MAP{
|
||||
* Instantiating a ReserveDestination does not reserve an address. To do so,
|
||||
* GetReservedDestination() needs to be called on the object. Once an address has been
|
||||
* reserved, call KeepDestination() on the ReserveDestination object to make sure it is not
|
||||
* returned. Call ReturnDestination() to return the address so it can be re-used (for
|
||||
* returned. Call ReturnDestination() to return the address so it can be reused (for
|
||||
* example, if the address was used in a new transaction
|
||||
* and that transaction was not completed and needed to be aborted).
|
||||
*
|
||||
|
@ -75,7 +75,7 @@ class AssumeutxoTest(BitcoinTestFramework):
|
||||
with self.nodes[1].assert_debug_log([log_msg]):
|
||||
assert_raises_rpc_error(-32603, f"Unable to load UTXO snapshot{rpc_details}", self.nodes[1].loadtxoutset, bad_snapshot_path)
|
||||
|
||||
self.log.info(" - snapshot file refering to a block that is not in the assumeutxo parameters")
|
||||
self.log.info(" - snapshot file referring to a block that is not in the assumeutxo parameters")
|
||||
prev_block_hash = self.nodes[0].getblockhash(SNAPSHOT_BASE_HEIGHT - 1)
|
||||
bogus_block_hash = "0" * 64 # Represents any unknown block hash
|
||||
for bad_block_hash in [bogus_block_hash, prev_block_hash]:
|
||||
@ -112,7 +112,7 @@ class AssumeutxoTest(BitcoinTestFramework):
|
||||
def test_invalid_chainstate_scenarios(self):
|
||||
self.log.info("Test different scenarios of invalid snapshot chainstate in datadir")
|
||||
|
||||
self.log.info(" - snapshot chainstate refering to a block that is not in the assumeutxo parameters")
|
||||
self.log.info(" - snapshot chainstate referring to a block that is not in the assumeutxo parameters")
|
||||
self.stop_node(0)
|
||||
chainstate_snapshot_path = self.nodes[0].chain_path / "chainstate_snapshot"
|
||||
chainstate_snapshot_path.mkdir()
|
||||
|
@ -29,7 +29,7 @@ def bip158_basic_element_hash(script_pub_key, N, block_hash):
|
||||
|
||||
|
||||
def bip158_relevant_scriptpubkeys(node, block_hash):
|
||||
""" Determines the basic filter relvant scriptPubKeys as defined in BIP158:
|
||||
""" Determines the basic filter relevant scriptPubKeys as defined in BIP158:
|
||||
|
||||
'A basic filter MUST contain exactly the following items for each transaction in a block:
|
||||
- The previous output script (the script being spent) for each input, except for
|
||||
|
@ -257,7 +257,7 @@ class AvoidReuseTest(BitcoinTestFramework):
|
||||
|
||||
if not self.options.descriptors:
|
||||
# For the second send, we transmute it to a related single-key address
|
||||
# to make sure it's also detected as re-use
|
||||
# to make sure it's also detected as reuse
|
||||
fund_spk = address_to_scriptpubkey(fundaddr).hex()
|
||||
fund_decoded = self.nodes[0].decodescript(fund_spk)
|
||||
if second_addr_type == "p2sh-segwit":
|
||||
|
@ -6,6 +6,7 @@ bu
|
||||
cachable
|
||||
clen
|
||||
crypted
|
||||
debbugs
|
||||
fo
|
||||
fpr
|
||||
hights
|
||||
|
Loading…
Reference in New Issue
Block a user