-
Notifications
You must be signed in to change notification settings - Fork 38.7k
mempool/validation: mempool ancestor/descendant limits for packages #21800
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
f95bbf5
97dd1c7
f551841
c6e016a
3cd663a
f8253d6
313c09f
2b6b26e
accf3d5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -151,33 +151,17 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes | |
| } | ||
| } | ||
|
|
||
| bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const | ||
| bool CTxMemPool::CalculateAncestorsAndCheckLimits(size_t entry_size, | ||
| size_t entry_count, | ||
| setEntries& setAncestors, | ||
| CTxMemPoolEntry::Parents& staged_ancestors, | ||
| uint64_t limitAncestorCount, | ||
| uint64_t limitAncestorSize, | ||
| uint64_t limitDescendantCount, | ||
| uint64_t limitDescendantSize, | ||
| std::string &errString) const | ||
| { | ||
| CTxMemPoolEntry::Parents staged_ancestors; | ||
| const CTransaction &tx = entry.GetTx(); | ||
|
|
||
| if (fSearchForParents) { | ||
| // Get parents of this transaction that are in the mempool | ||
| // GetMemPoolParents() is only valid for entries in the mempool, so we | ||
| // iterate mapTx to find parents. | ||
| for (unsigned int i = 0; i < tx.vin.size(); i++) { | ||
| std::optional<txiter> piter = GetIter(tx.vin[i].prevout.hash); | ||
| if (piter) { | ||
| staged_ancestors.insert(**piter); | ||
| if (staged_ancestors.size() + 1 > limitAncestorCount) { | ||
| errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); | ||
| return false; | ||
| } | ||
| } | ||
| } | ||
| } else { | ||
| // If we're not searching for parents, we require this to be an | ||
| // entry in the mempool already. | ||
| txiter it = mapTx.iterator_to(entry); | ||
| staged_ancestors = it->GetMemPoolParentsConst(); | ||
| } | ||
|
|
||
| size_t totalSizeWithAncestors = entry.GetTxSize(); | ||
| size_t totalSizeWithAncestors = entry_size; | ||
|
|
||
| while (!staged_ancestors.empty()) { | ||
| const CTxMemPoolEntry& stage = staged_ancestors.begin()->get(); | ||
|
|
@@ -187,10 +171,10 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr | |
| staged_ancestors.erase(stage); | ||
| totalSizeWithAncestors += stageit->GetTxSize(); | ||
|
|
||
| if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { | ||
| if (stageit->GetSizeWithDescendants() + entry_size > limitDescendantSize) { | ||
| errString = strprintf("exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantSize); | ||
| return false; | ||
| } else if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { | ||
| } else if (stageit->GetCountWithDescendants() + entry_count > limitDescendantCount) { | ||
| errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantCount); | ||
glozow marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return false; | ||
| } else if (totalSizeWithAncestors > limitAncestorSize) { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we have topologies of in-mempool transactions and packages bypassing this limit. Let's say you have in-mempool, independent Tx_A and Tx_B where their virtual sizes are 30 KvB each. In In Further, I don't think it's rejected by the check against Note, you need to replace Tx by chain of transactions because of
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hm, I don't think this case bypasses the algorithm. I've implemented this test in the latest push (see There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for writing the test, I noticed where my reasoning was flawed!
We init |
||
|
|
@@ -206,7 +190,7 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr | |
| if (setAncestors.count(parent_it) == 0) { | ||
| staged_ancestors.insert(parent); | ||
| } | ||
| if (staged_ancestors.size() + setAncestors.size() + 1 > limitAncestorCount) { | ||
| if (staged_ancestors.size() + setAncestors.size() + entry_count > limitAncestorCount) { | ||
| errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); | ||
| return false; | ||
| } | ||
|
|
@@ -216,6 +200,80 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr | |
| return true; | ||
| } | ||
|
|
||
| bool CTxMemPool::CheckPackageLimits(const Package& package, | ||
| uint64_t limitAncestorCount, | ||
| uint64_t limitAncestorSize, | ||
| uint64_t limitDescendantCount, | ||
| uint64_t limitDescendantSize, | ||
| std::string &errString) const | ||
| { | ||
| CTxMemPoolEntry::Parents staged_ancestors; | ||
| size_t total_size = 0; | ||
| for (const auto& tx : package) { | ||
| total_size += GetVirtualTransactionSize(*tx); | ||
| for (const auto& input : tx->vin) { | ||
| std::optional<txiter> piter = GetIter(input.prevout.hash); | ||
| if (piter) { | ||
| staged_ancestors.insert(**piter); | ||
| if (staged_ancestors.size() + package.size() > limitAncestorCount) { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the future, if packages are allowed to replace in-mempool transactions do we have concerns of the same transaction accounted twice falsifying this check ? Let's say you have in-mempool { Tx_A } and in-package { Tx_A, Tx_B } where Tx_A is parent of Tx_B. Within this configuration, Tx_A is going to be counted twice, firstly as a If this reasoning holds, one solution to be future-proof could be to proceed to the evaluation against
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yep, this is absolutely a consideration for replacements and duplicates in packages in the future. I agree, we'll want to de-duplicate first (CMPA and CheckPackageLimits wouldn't be called for transactions already in the mempool, since PreChecks looks for
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (I will note this down for the future - I think we're on the same page about this, and it's not yet applicable for this PR) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Exactly, this is another limit case to be aware of. And even trickier one like an ancestor of a second-stage package member being replaced by a first-stage package member and thus failing the whole package acceptance. Yes, not yet applicable for this PR, good to not forget about it!
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i think a simpler API would be to have staged always contain all the entries themselves? is there a reason not to (I think the epoch algorithm is relatively lightweight for this). |
||
| errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); | ||
| return false; | ||
| } | ||
| } | ||
jnewbery marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| } | ||
| } | ||
| // When multiple transactions are passed in, the ancestors and descendants of all transactions | ||
| // considered together must be within limits even if they are not interdependent. This may be | ||
| // stricter than the limits for each individual transaction. | ||
| setEntries setAncestors; | ||
| const auto ret = CalculateAncestorsAndCheckLimits(total_size, package.size(), | ||
| setAncestors, staged_ancestors, | ||
| limitAncestorCount, limitAncestorSize, | ||
| limitDescendantCount, limitDescendantSize, errString); | ||
| // It's possible to overestimate the ancestor/descendant totals. | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Now that this errString is only used for the
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In fact, I don't think this string ever gets used (in logging or returned to users). Should |
||
| if (!ret) errString.insert(0, "possibly "); | ||
glozow marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return ret; | ||
| } | ||
|
|
||
| bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, | ||
| setEntries &setAncestors, | ||
| uint64_t limitAncestorCount, | ||
| uint64_t limitAncestorSize, | ||
| uint64_t limitDescendantCount, | ||
| uint64_t limitDescendantSize, | ||
| std::string &errString, | ||
| bool fSearchForParents /* = true */) const | ||
| { | ||
| CTxMemPoolEntry::Parents staged_ancestors; | ||
| const CTransaction &tx = entry.GetTx(); | ||
|
|
||
| if (fSearchForParents) { | ||
| // Get parents of this transaction that are in the mempool | ||
| // GetMemPoolParents() is only valid for entries in the mempool, so we | ||
| // iterate mapTx to find parents. | ||
| for (unsigned int i = 0; i < tx.vin.size(); i++) { | ||
| std::optional<txiter> piter = GetIter(tx.vin[i].prevout.hash); | ||
| if (piter) { | ||
| staged_ancestors.insert(**piter); | ||
| if (staged_ancestors.size() + 1 > limitAncestorCount) { | ||
| errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); | ||
| return false; | ||
| } | ||
| } | ||
| } | ||
| } else { | ||
| // If we're not searching for parents, we require this to already be an | ||
| // entry in the mempool and use the entry's cached parents. | ||
| txiter it = mapTx.iterator_to(entry); | ||
| staged_ancestors = it->GetMemPoolParentsConst(); | ||
| } | ||
|
|
||
| return CalculateAncestorsAndCheckLimits(entry.GetTxSize(), /* entry_count */ 1, | ||
| setAncestors, staged_ancestors, | ||
| limitAncestorCount, limitAncestorSize, | ||
| limitDescendantCount, limitDescendantSize, errString); | ||
| } | ||
|
|
||
| void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) | ||
| { | ||
| CTxMemPoolEntry::Parents parents = it->GetMemPoolParents(); | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1079,6 +1079,19 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: | |
| m_viewmempool.PackageAddTransaction(ws.m_ptx); | ||
| } | ||
|
|
||
| // Apply package mempool ancestor/descendant limits. Skip if there is only one transaction, | ||
| // because it's unnecessary. Also, CPFP carve out can increase the limit for individual | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note, I still wonder if we need to make CPFP carve-out composable with package acceptance in the future. Otherwise a counterparty can build a branch of descendants from an output A on a multi-party unconfirmed ancestor to block package acceptance on an output B. I don't think this is required for current LN safety, as least as long as implementation backends don't try to chain their commitment+CPFP transactions. But it might be useful for other second-layers applications (e.g Lightning Pool's batch execution+commitment txn).
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we use packages to just remove the carve out? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ideally yes, carve-out don't scale for multi-party (n > 2) fee-bumping. Though extending the carve-out to safely chain CPFPs was argued by Matt on the LDK side (see here https://lightningdevkit.slack.com/archives/CTBLT3CAU/p1625786787422100). Personally, I think this approach is a bit doomed as we can assume first-stage of the CPFP-chain can always be replaced by a valid claim of a counterparty, thus downgrading the feerate of your other broadcasted commitments and I prefer the concurrent domino fee-bumping approach even if it's more complexity swallowed by the LN backend. Further, I think there is another multi-party contract issue we have to solve. Namely, the first-stage state transaction can be symmetric but the second-stage states asymmetric-though-composable. E.g a CTV tree with a root transaction with branch A/branch B spending 2 isolated outputs. Alice broadcast root+branch A and Bob broadcast root+branch B, thus accidentally or maliciously evicting Alice's branch. I think we would like the mempool logic to aggregate those in-flight packages by conserving the highest-feerate subgraphs and that would require some form of carve-out to avoid package limits interfering ? |
||
| // transactions, but this exemption is not extended to packages in CheckPackageLimits(). | ||
| std::string err_string; | ||
| if (txns.size() > 1 && | ||
| !m_pool.CheckPackageLimits(txns, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, | ||
| m_limit_descendant_size, err_string)) { | ||
| // All transactions must have individually passed mempool ancestor and descendant limits | ||
| // inside of PreChecks(), so this is separate from an individual transaction error. | ||
| package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string); | ||
| return PackageMempoolAcceptResult(package_state, std::move(results)); | ||
| } | ||
|
|
||
| for (Workspace& ws : workspaces) { | ||
| PrecomputedTransactionData txdata; | ||
| if (!PolicyScriptChecks(args, ws, txdata)) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this check doesn't have new coverage in `rpc_package.py ? I got a success for the following diff:
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This error should be caught in the mempool_tests unit test?
It's true, though, that I didn't write tests for the size limits, just did count limits. I can add size limit tests.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Added a functional test for size limits -
test_desc_size_limits()will fail if you apply this diffThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It should but I don't' get an error for any
mempool_*unit tests. Won't be surprised there is a hole coverage before to this PR.Thanks for adding one, there is at least one behavior change to cover introduce with this PR,
entry_sizecan be > 1.