Merge pull request #6949 from stejbac/change-burning-man-capping-algorithm

Change burning man capping algorithm
This commit is contained in:
Alejandro García 2023-12-20 04:59:40 +00:00 committed by GitHub
commit 237c7054be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 452 additions and 38 deletions

View File

@ -42,12 +42,16 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -73,10 +77,10 @@ public class BurningManService {
private static final double GENESIS_OUTPUT_AMOUNT_FACTOR = 0.1;
// The number of cycles we go back for the decay function used for compensation request amounts.
private static final int NUM_CYCLES_COMP_REQUEST_DECAY = 24;
static final int NUM_CYCLES_COMP_REQUEST_DECAY = 24;
// The number of cycles we go back for the decay function used for burned amounts.
private static final int NUM_CYCLES_BURN_AMOUNT_DECAY = 12;
static final int NUM_CYCLES_BURN_AMOUNT_DECAY = 12;
// Factor for boosting the issuance share (issuance is compensation requests + genesis output).
// This will be used for increasing the allowed burn amount. The factor gives more flexibility
@ -111,7 +115,11 @@ public class BurningManService {
///////////////////////////////////////////////////////////////////////////////////////////
Map<String, BurningManCandidate> getBurningManCandidatesByName(int chainHeight) {
Map<String, BurningManCandidate> burningManCandidatesByName = new HashMap<>();
return getBurningManCandidatesByName(chainHeight, !DelayedPayoutTxReceiverService.isProposal412Activated());
}
Map<String, BurningManCandidate> getBurningManCandidatesByName(int chainHeight, boolean limitCappingRounds) {
Map<String, BurningManCandidate> burningManCandidatesByName = new TreeMap<>();
Map<P2PDataStorage.ByteArray, Set<TxOutput>> proofOfBurnOpReturnTxOutputByHash = getProofOfBurnOpReturnTxOutputByHash(chainHeight);
// Add contributors who made a compensation request
@ -120,8 +128,7 @@ public class BurningManService {
.forEach(issuance -> {
getCompensationProposalsForIssuance(issuance).forEach(compensationProposal -> {
String name = compensationProposal.getName();
burningManCandidatesByName.putIfAbsent(name, new BurningManCandidate());
BurningManCandidate candidate = burningManCandidatesByName.get(name);
BurningManCandidate candidate = burningManCandidatesByName.computeIfAbsent(name, n -> new BurningManCandidate());
// Issuance
Optional<String> customAddress = compensationProposal.getBurningManReceiverAddress();
@ -187,25 +194,58 @@ public class BurningManService {
.sum();
burningManCandidates.forEach(candidate -> candidate.calculateShares(totalDecayedCompensationAmounts, totalDecayedBurnAmounts));
int numRoundsWithCapsApplied = imposeCaps(burningManCandidates, limitCappingRounds);
double sumAllCappedBurnAmountShares = burningManCandidates.stream()
.filter(candidate -> candidate.getBurnAmountShare() >= candidate.getMaxBoostedCompensationShare())
.filter(candidate -> candidate.getRoundCapped().isPresent())
.mapToDouble(BurningManCandidate::getMaxBoostedCompensationShare)
.sum();
double sumAllNonCappedBurnAmountShares = burningManCandidates.stream()
.filter(candidate -> candidate.getBurnAmountShare() < candidate.getMaxBoostedCompensationShare())
.filter(candidate -> candidate.getRoundCapped().isEmpty())
.mapToDouble(BurningManCandidate::getBurnAmountShare)
.sum();
burningManCandidates.forEach(candidate -> candidate.calculateCappedAndAdjustedShares(sumAllCappedBurnAmountShares, sumAllNonCappedBurnAmountShares));
burningManCandidates.forEach(candidate -> candidate.calculateCappedAndAdjustedShares(
sumAllCappedBurnAmountShares, sumAllNonCappedBurnAmountShares, numRoundsWithCapsApplied));
return burningManCandidatesByName;
}
private static int imposeCaps(Collection<BurningManCandidate> burningManCandidates, boolean limitCappingRounds) {
List<BurningManCandidate> candidatesInDescendingBurnCapRatio = new ArrayList<>(burningManCandidates);
candidatesInDescendingBurnCapRatio.sort(Comparator.comparing(BurningManCandidate::getBurnCapRatio).reversed());
double thresholdBurnCapRatio = 1.0;
double remainingBurnShare = 1.0;
double remainingCapShare = 1.0;
int cappingRound = 0;
for (BurningManCandidate candidate : candidatesInDescendingBurnCapRatio) {
double invScaleFactor = remainingBurnShare / remainingCapShare;
double burnCapRatio = candidate.getBurnCapRatio();
if (remainingCapShare <= 0.0 || burnCapRatio <= 0.0 || burnCapRatio < invScaleFactor ||
limitCappingRounds && burnCapRatio < 1.0) {
cappingRound++;
break;
}
if (burnCapRatio < thresholdBurnCapRatio) {
thresholdBurnCapRatio = invScaleFactor;
cappingRound++;
}
candidate.imposeCap(cappingRound, candidate.getBurnAmountShare() / thresholdBurnCapRatio);
remainingBurnShare -= candidate.getBurnAmountShare();
remainingCapShare -= candidate.getMaxBoostedCompensationShare();
}
return cappingRound;
}
String getLegacyBurningManAddress(int chainHeight) {
return daoStateService.getParamValue(Param.RECIPIENT_BTC_ADDRESS, chainHeight);
}
Set<BurningManCandidate> getActiveBurningManCandidates(int chainHeight) {
return getBurningManCandidatesByName(chainHeight).values().stream()
return getActiveBurningManCandidates(chainHeight, !DelayedPayoutTxReceiverService.isProposal412Activated());
}
Set<BurningManCandidate> getActiveBurningManCandidates(int chainHeight, boolean limitCappingRounds) {
return getBurningManCandidatesByName(chainHeight, limitCappingRounds).values().stream()
.filter(burningManCandidate -> burningManCandidate.getCappedBurnAmountShare() > 0)
.filter(candidate -> candidate.getReceiverAddress().isPresent())
.collect(Collectors.toSet());

View File

@ -56,11 +56,18 @@ public class DelayedPayoutTxReceiverService implements DaoStateListener {
// requests change address.
// See: https://github.com/bisq-network/bisq/issues/6699
public static final Date BUGFIX_6699_ACTIVATION_DATE = Utilities.getUTCDate(2023, GregorianCalendar.JULY, 24);
// See: https://github.com/bisq-network/proposals/issues/412
public static final Date PROPOSAL_412_ACTIVATION_DATE = Utilities.getUTCDate(2024, GregorianCalendar.JANUARY, 1);
public static boolean isBugfix6699Activated() {
return new Date().after(BUGFIX_6699_ACTIVATION_DATE);
}
@SuppressWarnings("BooleanMethodIsAlwaysInverted")
public static boolean isProposal412Activated() {
return new Date().after(PROPOSAL_412_ACTIVATION_DATE);
}
// We don't allow to get further back than 767950 (the block height from Dec. 18th 2022).
static final int MIN_SNAPSHOT_HEIGHT = Config.baseCurrencyNetwork().isRegtest() ? 0 : 767950;

View File

@ -28,6 +28,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
import java.util.stream.Collectors;
@ -68,6 +69,7 @@ public class BurningManCandidate {
// The burnAmountShare adjusted in case there are cappedBurnAmountShare.
// We redistribute the over-burned amounts to the group of not capped candidates.
protected double adjustedBurnAmountShare;
private OptionalInt roundCapped = OptionalInt.empty();
public BurningManCandidate() {
}
@ -142,11 +144,19 @@ public class BurningManCandidate {
burnAmountShare = totalDecayedBurnAmounts > 0 ? accumulatedDecayedBurnAmount / totalDecayedBurnAmounts : 0;
}
public void imposeCap(int cappingRound, double adjustedBurnAmountShare) {
roundCapped = OptionalInt.of(cappingRound);
// NOTE: The adjusted burn share set here will not affect the final capped burn share, only
// the presentation service, so we need not worry about rounding errors affecting consensus.
this.adjustedBurnAmountShare = adjustedBurnAmountShare;
}
public void calculateCappedAndAdjustedShares(double sumAllCappedBurnAmountShares,
double sumAllNonCappedBurnAmountShares) {
double sumAllNonCappedBurnAmountShares,
int numAppliedCappingRounds) {
double maxBoostedCompensationShare = getMaxBoostedCompensationShare();
adjustedBurnAmountShare = burnAmountShare;
if (burnAmountShare < maxBoostedCompensationShare) {
if (roundCapped.isEmpty()) {
adjustedBurnAmountShare = burnAmountShare;
if (sumAllCappedBurnAmountShares == 0) {
// If no one is capped we do not need to do any adjustment
cappedBurnAmountShare = burnAmountShare;
@ -165,7 +175,11 @@ public class BurningManCandidate {
} else {
// We exceeded the cap by the adjustment. This will lead to the legacy BM getting the
// difference of the adjusted amount and the maxBoostedCompensationShare.
// NOTE: When the number of capping rounds are unlimited (that is post- Proposal 412
// activation), we should only get to this branch as a result of floating point rounding
// errors. In that case, the extra amount the LBM gets is negligible.
cappedBurnAmountShare = maxBoostedCompensationShare;
roundCapped = OptionalInt.of(roundCapped.orElse(numAppliedCappingRounds));
}
}
}
@ -174,6 +188,12 @@ public class BurningManCandidate {
}
}
public double getBurnCapRatio() {
// NOTE: This is less than 1.0 precisely when burnAmountShare < maxBoostedCompensationShare,
// in spite of any floating point rounding errors, since 1.0 is proportionately at least as
// close to the previous double as any two consecutive nonzero doubles on the number line.
return burnAmountShare > 0.0 ? burnAmountShare / getMaxBoostedCompensationShare() : 0.0;
}
public double getMaxBoostedCompensationShare() {
return Math.min(BurningManService.MAX_BURN_SHARE, compensationShare * BurningManService.ISSUANCE_BOOST_FACTOR);
@ -194,6 +214,7 @@ public class BurningManCandidate {
",\r\n burnAmountShare=" + burnAmountShare +
",\r\n cappedBurnAmountShare=" + cappedBurnAmountShare +
",\r\n adjustedBurnAmountShare=" + adjustedBurnAmountShare +
",\r\n roundCapped=" + roundCapped +
"\r\n}";
}
}

View File

@ -48,9 +48,15 @@ public final class LegacyBurningMan extends BurningManCandidate {
// do nothing
}
@Override
public void imposeCap(int cappingRound, double adjustedBurnAmountShare) {
// do nothing
}
@Override
public void calculateCappedAndAdjustedShares(double sumAllCappedBurnAmountShares,
double sumAllNonCappedBurnAmountShares) {
double sumAllNonCappedBurnAmountShares,
int numAppliedCappingRounds) {
// do nothing
}

View File

@ -17,10 +17,55 @@
package bisq.core.dao.burningman;
import bisq.core.dao.CyclesInDaoStateService;
import bisq.core.dao.burningman.model.BurningManCandidate;
import bisq.core.dao.governance.proofofburn.ProofOfBurnConsensus;
import bisq.core.dao.governance.proposal.ProposalService;
import bisq.core.dao.governance.proposal.storage.appendonly.ProposalPayload;
import bisq.core.dao.state.DaoStateService;
import bisq.core.dao.state.model.blockchain.Tx;
import bisq.core.dao.state.model.governance.CompensationProposal;
import bisq.core.dao.state.model.governance.Issuance;
import bisq.core.dao.state.model.governance.IssuanceType;
import bisq.common.util.Tuple2;
import protobuf.BaseTx;
import protobuf.BaseTxOutput;
import protobuf.TxOutput;
import protobuf.TxOutputType;
import protobuf.TxType;
import com.google.protobuf.ByteString;
import org.bitcoinj.core.Coin;
import javafx.collections.FXCollections;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoSession;
import org.mockito.stubbing.Answer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.when;
public class BurningManServiceTest {
@Test
@ -43,6 +88,285 @@ public class BurningManServiceTest {
assertEquals(7, BurningManService.getDecayedAmount(amount, 120, 400, 100));
assertEquals(3, BurningManService.getDecayedAmount(amount, 120, 410, 110));
assertEquals(40, BurningManService.getDecayedAmount(amount, 220, 400, 100));
}
@Nested
public class BurnShareTest {
private MockitoSession mockitoSession;
@Mock
private DaoStateService daoStateService;
@Mock
private CyclesInDaoStateService cyclesInDaoStateService;
@Mock
private ProposalService proposalService;
@InjectMocks
private BurningManService burningManService;
@BeforeEach
public void setUp() {
mockitoSession = Mockito.mockitoSession().initMocks(this).startMocking();
when(cyclesInDaoStateService.getChainHeightOfPastCycle(800000, BurningManService.NUM_CYCLES_BURN_AMOUNT_DECAY))
.thenReturn(750000);
when(cyclesInDaoStateService.getChainHeightOfPastCycle(800000, BurningManService.NUM_CYCLES_COMP_REQUEST_DECAY))
.thenReturn(700000);
}
@AfterEach
public void tearDown() {
mockitoSession.finishMocking();
}
private void addProofOfBurnTxs(Tx... txs) {
var txsById = Arrays.stream(txs)
.collect(Collectors.toMap(Tx::getId, tx -> tx));
when(daoStateService.getProofOfBurnOpReturnTxOutputs())
.thenReturn(Arrays.stream(txs).map(tx -> tx.getTxOutputs().get(0)).collect(Collectors.toSet()));
when(daoStateService.getTx(Mockito.anyString()))
.thenAnswer((Answer<Optional<Tx>>) inv -> Optional.ofNullable(txsById.get(inv.getArgument(0, String.class))));
}
private void addCompensationIssuanceAndPayloads(Collection<Tuple2<Issuance, ProposalPayload>> tuples) {
when(daoStateService.getIssuanceSetForType(IssuanceType.COMPENSATION))
.thenReturn(tuples.stream().map(t -> t.first).collect(Collectors.toSet()));
when(proposalService.getProposalPayloads())
.thenReturn(tuples.stream().map(t -> t.second).collect(Collectors.toCollection(FXCollections::observableArrayList)));
}
@SafeVarargs
private void addCompensationIssuanceAndPayloads(Tuple2<Issuance, ProposalPayload>... tuples) {
addCompensationIssuanceAndPayloads(Arrays.asList(tuples));
}
@ValueSource(booleans = {true, false})
@ParameterizedTest(name = "[{index}] limitCappingRounds={0}")
public void testGetBurningManCandidatesByName_inactiveAndExpiredCandidates(boolean limitCappingRounds) {
addCompensationIssuanceAndPayloads(
compensationIssuanceAndPayload("alice", "0000", 760000, 10000),
compensationIssuanceAndPayload("bob", "0001", 690000, 20000), // expired
compensationIssuanceAndPayload("carol", "0002", 770000, 20000),
compensationIssuanceAndPayload("dave", "0003", 770000, 20000) // inactive
);
addProofOfBurnTxs(
proofOfBurnTx("alice", "1000", 780000, 400000),
proofOfBurnTx("bob", "1001", 790000, 300000),
proofOfBurnTx("carol", "1002", 740000, 300000) // expired
);
var candidateMap = burningManService.getBurningManCandidatesByName(800000, limitCappingRounds);
assertEquals(0.11, candidateMap.get("alice").getMaxBoostedCompensationShare());
assertEquals(0.0, candidateMap.get("bob").getMaxBoostedCompensationShare());
assertEquals(0.11, candidateMap.get("carol").getMaxBoostedCompensationShare());
assertEquals(0.11, candidateMap.get("dave").getMaxBoostedCompensationShare());
assertEquals(0.5, candidateMap.get("alice").getBurnAmountShare());
assertEquals(0.5, candidateMap.get("bob").getBurnAmountShare());
assertEquals(0.0, candidateMap.get("carol").getBurnAmountShare());
assertEquals(0.0, candidateMap.get("dave").getBurnAmountShare());
assertEquals(0.5, candidateMap.get("alice").getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.5, candidateMap.get("bob").getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.0, candidateMap.get("carol").getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.0, candidateMap.get("dave").getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.11, candidateMap.get("alice").getCappedBurnAmountShare());
assertEquals(0.0, candidateMap.get("bob").getCappedBurnAmountShare());
assertEquals(0.0, candidateMap.get("carol").getCappedBurnAmountShare());
assertEquals(0.0, candidateMap.get("dave").getCappedBurnAmountShare());
assertEquals(0, candidateMap.get("alice").getRoundCapped().orElse(-1));
assertEquals(0, candidateMap.get("bob").getRoundCapped().orElse(-1));
assertEquals(-1, candidateMap.get("carol").getRoundCapped().orElse(-1));
assertEquals(-1, candidateMap.get("dave").getRoundCapped().orElse(-1));
}
@ValueSource(booleans = {true, false})
@ParameterizedTest(name = "[{index}] limitCappingRounds={0}")
public void testGetBurningManCandidatesByName_capsSumToLessThanUnity_allCapped_oneCappingRoundNeeded(boolean limitCappingRounds) {
addCompensationIssuanceAndPayloads(
compensationIssuanceAndPayload("alice", "0000", 760000, 10000),
compensationIssuanceAndPayload("bob", "0001", 770000, 20000)
);
addProofOfBurnTxs(
proofOfBurnTx("alice", "1000", 780000, 400000),
proofOfBurnTx("bob", "1001", 790000, 300000)
);
var candidateMap = burningManService.getBurningManCandidatesByName(800000, limitCappingRounds);
assertEquals(0.5, candidateMap.get("alice").getBurnAmountShare());
assertEquals(0.5, candidateMap.get("bob").getBurnAmountShare());
assertEquals(0.5, candidateMap.get("alice").getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.5, candidateMap.get("bob").getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.11, candidateMap.get("alice").getCappedBurnAmountShare());
assertEquals(0.11, candidateMap.get("bob").getCappedBurnAmountShare());
assertEquals(0, candidateMap.get("alice").getRoundCapped().orElse(-1));
assertEquals(0, candidateMap.get("bob").getRoundCapped().orElse(-1));
}
@ValueSource(booleans = {true, false})
@ParameterizedTest(name = "[{index}] limitCappingRounds={0}")
public void testGetBurningManCandidatesByName_capsSumToMoreThanUnity_noneCapped_oneCappingRoundNeeded(boolean limitCappingRounds) {
addCompensationIssuanceAndPayloads(IntStream.range(0, 10).mapToObj(i ->
compensationIssuanceAndPayload("alice" + i, "000" + i, 710000, 100000)
).collect(Collectors.toList()));
addProofOfBurnTxs(IntStream.range(0, 10).mapToObj(i ->
proofOfBurnTx("alice" + i, "100" + i, 760000, 400000)
).toArray(Tx[]::new));
var candidateMap = burningManService.getBurningManCandidatesByName(800000, limitCappingRounds);
assertAll(IntStream.range(0, 10).mapToObj(i -> () -> {
var candidate = candidateMap.get("alice" + i);
assertEquals(0.11, candidate.getMaxBoostedCompensationShare());
assertEquals(0.1, candidate.getBurnAmountShare());
assertEquals(0.1, candidate.getAdjustedBurnAmountShare(), 1e-10);
assertEquals(0.1, candidate.getCappedBurnAmountShare());
assertEquals(-1, candidate.getRoundCapped().orElse(-1));
}));
}
@ValueSource(booleans = {true, false})
@ParameterizedTest(name = "[{index}] limitCappingRounds={0}")
public void testGetBurningManCandidatesByName_capsSumToMoreThanUnity_someCapped_twoCappingRoundsNeeded(boolean limitCappingRounds) {
addCompensationIssuanceAndPayloads(IntStream.range(0, 10).mapToObj(i ->
compensationIssuanceAndPayload("alice" + i, "000" + i, 710000, 100000)
).collect(Collectors.toList()));
addProofOfBurnTxs(IntStream.range(0, 10).mapToObj(i ->
proofOfBurnTx("alice" + i, "100" + i, 760000, i < 6 ? 400000 : 200000)
).toArray(Tx[]::new));
var candidateMap = burningManService.getBurningManCandidatesByName(800000, limitCappingRounds);
// Note the expected rounding error below. To prevent DPT verification failures, the
// capping algorithm output must be well defined to the nearest floating point ULP.
assertAll(IntStream.range(0, 10).mapToObj(i -> () -> {
var candidate = candidateMap.get("alice" + i);
assertEquals(0.11, candidate.getMaxBoostedCompensationShare());
assertEquals(i < 6 ? 0.125 : 0.0625, candidate.getBurnAmountShare());
assertEquals(i < 6 ? 0.125 : 0.085, candidate.getAdjustedBurnAmountShare(), 1e-10);
assertEquals(i < 6 ? 0.11 : 0.08499999999999999, candidate.getCappedBurnAmountShare());
assertEquals(i < 6 ? 0 : -1, candidate.getRoundCapped().orElse(-1));
}));
// Only two capping rounds were required to achieve a burn share total of 100%, so
// nothing goes to the LBM in this case.
double burnShareTotal = candidateMap.values().stream().mapToDouble(BurningManCandidate::getCappedBurnAmountShare).sum();
assertEquals(1.0, burnShareTotal);
}
@ValueSource(booleans = {true, false})
@ParameterizedTest(name = "[{index}] limitCappingRounds={0}")
public void testGetBurningManCandidatesByName_capsSumToMoreThanUnity_someCapped_threeCappingRoundsNeeded(boolean limitCappingRounds) {
addCompensationIssuanceAndPayloads(IntStream.range(0, 10).mapToObj(i ->
compensationIssuanceAndPayload("alice" + i, "000" + i, 710000, i < 8 ? 123250 : 7000)
).collect(Collectors.toList()));
addProofOfBurnTxs(IntStream.range(0, 10).mapToObj(i ->
proofOfBurnTx("alice" + i, "100" + i, 760000, i < 6 ? 400000 : 200000)
).toArray(Tx[]::new));
var candidateMap = burningManService.getBurningManCandidatesByName(800000, limitCappingRounds);
// Note the expected rounding errors below. To prevent DPT verification failures, the
// capping algorithm output must be well defined to the nearest floating point ULP.
assertAll(IntStream.range(0, 10).mapToObj(i -> () -> {
var candidate = candidateMap.get("alice" + i);
assertEquals(i < 8 ? 0.11 : 0.07, candidate.getMaxBoostedCompensationShare());
assertEquals(i < 6 ? 0.125 : 0.0625, candidate.getBurnAmountShare());
if (limitCappingRounds) {
assertEquals(i < 6 ? 0.125 : 0.085, candidate.getAdjustedBurnAmountShare(), 1e-10);
assertEquals(i < 6 ? 0.11 : i < 8 ? 0.08499999999999999 : 0.07, candidate.getCappedBurnAmountShare());
} else {
assertEquals(i < 6 ? 0.125 : i < 8 ? 0.1 : 0.085, candidate.getAdjustedBurnAmountShare(), 1e-10);
assertEquals(i < 6 ? 0.11 : i < 8 ? 0.09999999999999998 : 0.07, candidate.getCappedBurnAmountShare());
}
assertEquals(i < 6 ? 0 : i < 8 ? -1 : 1, candidate.getRoundCapped().orElse(-1));
}));
// Three capping rounds are required to achieve a burn share total of 100%, but our
// algorithm only applies two when `limitCappingRounds` is true (that is, prior to
// the activation of the capping algorithm change), so 3% ends up going to the LBM in
// that case, instead of being distributed between `alice6` & `alice7`. The caps sum
// to more than 100%, however, so we could have avoided giving him any.
double capTotal = candidateMap.values().stream().mapToDouble(BurningManCandidate::getMaxBoostedCompensationShare).sum();
double burnShareTotal = candidateMap.values().stream().mapToDouble(BurningManCandidate::getCappedBurnAmountShare).sum();
assertEquals(1.02, capTotal);
assertEquals(limitCappingRounds ? 0.97 : 1.0, burnShareTotal);
}
@ValueSource(booleans = {true, false})
@ParameterizedTest(name = "[{index}] limitCappingRounds={0}")
public void testGetBurningManCandidatesByName_capsSumToLessThanUnity_allShouldBeCapped_fourCappingRoundsNeeded(
boolean limitCappingRounds) {
addCompensationIssuanceAndPayloads(IntStream.range(0, 10).mapToObj(i ->
compensationIssuanceAndPayload("alice" + i, "000" + i, 710000,
i < 6 ? 483200 : i == 6 ? 31800 : i == 7 ? 27000 : 21000)
).collect(Collectors.toList()));
addProofOfBurnTxs(IntStream.range(0, 10).mapToObj(i ->
proofOfBurnTx("alice" + i, "100" + i, 760000, i < 6 ? 400000 : 200000)
).toArray(Tx[]::new));
var candidateMap = burningManService.getBurningManCandidatesByName(800000, limitCappingRounds);
// Note the expected rounding error below. To prevent DPT verification failures, the
// capping algorithm output must be well defined to the nearest floating point ULP.
assertAll(IntStream.range(0, 10).mapToObj(i -> () -> {
var candidate = candidateMap.get("alice" + i);
assertEquals(i < 6 ? 0.11 : i == 6 ? 0.106 : i == 7 ? 0.09 : 0.07, candidate.getMaxBoostedCompensationShare());
assertEquals(i < 6 ? 0.125 : 0.0625, candidate.getBurnAmountShare());
if (limitCappingRounds) {
assertEquals(i < 6 ? 0.125 : 0.085, candidate.getAdjustedBurnAmountShare(), 1e-10);
assertEquals(i < 6 ? 0.11 : i < 8 ? 0.08499999999999999 : 0.07, candidate.getCappedBurnAmountShare());
assertEquals(i < 6 ? 0 : i < 8 ? -1 : 1, candidate.getRoundCapped().orElse(-1));
} else {
assertEquals(i < 6 ? 0.125 : i == 6 ? 0.11 : i == 7 ? 0.1 : 0.085, candidate.getAdjustedBurnAmountShare(), 1e-10);
assertEquals(candidate.getMaxBoostedCompensationShare(), candidate.getCappedBurnAmountShare());
assertEquals(i < 6 ? 0 : i == 6 ? 3 : i == 7 ? 2 : 1, candidate.getRoundCapped().orElse(-1));
}
}));
// Four capping rounds are required to achieve a maximum possible burn share total of
// 99.6%, with all the contributors being capped. But our algorithm only applies two
// rounds when `limitCappingRounds` is true (that is, prior to the activation of the
// capping algorithm change), so 3% ends up going to the LBM in that case, instead of
// the minimum possible amount of 0.4% (100% less the cap sum). Contributors `alice6`
// & `alice7` therefore receive less than they could have done.
double capTotal = candidateMap.values().stream().mapToDouble(BurningManCandidate::getMaxBoostedCompensationShare).sum();
double burnShareTotal = candidateMap.values().stream().mapToDouble(BurningManCandidate::getCappedBurnAmountShare).sum();
assertEquals(0.996, capTotal);
assertEquals(limitCappingRounds ? 0.97 : capTotal, burnShareTotal);
}
}
// Returns a cut-down issuance and compensation proposal payload tuple for mocking.
private static Tuple2<Issuance, ProposalPayload> compensationIssuanceAndPayload(String name,
String txId,
int chainHeight,
long amount) {
var issuance = new Issuance(txId, chainHeight, amount, null, IssuanceType.COMPENSATION);
var extraDataMap = Map.of(CompensationProposal.BURNING_MAN_RECEIVER_ADDRESS, "receiverAddress");
var proposal = new CompensationProposal(name, "link", Coin.valueOf(amount), "bsqAddress", extraDataMap);
return new Tuple2<>(issuance, new ProposalPayload(proposal.cloneProposalAndAddTxId(txId)));
}
// Returns a cut-down proof-of-burn tx for mocking. FIXME: Going via a protobuf object is a bit of a hack.
private static Tx proofOfBurnTx(String candidateName, String txId, int blockHeight, long burntBsq) {
byte[] opReturnData = ProofOfBurnConsensus.getOpReturnData(ProofOfBurnConsensus.getHash(candidateName.getBytes(UTF_8)));
var txOutput = BaseTxOutput.newBuilder()
.setTxOutput(TxOutput.newBuilder()
.setTxOutputType(TxOutputType.PROOF_OF_BURN_OP_RETURN_OUTPUT))
.setOpReturnData(ByteString.copyFrom(opReturnData))
.setTxId(txId)
.setBlockHeight(blockHeight)
.build();
return Tx.fromProto(BaseTx.newBuilder()
.setId(txId)
.setTx(protobuf.Tx.newBuilder()
.addTxOutputs(txOutput)
.setTxType(TxType.PROOF_OF_BURN)
.setBurntBsq(burntBsq))
.build());
}
}

View File

@ -36,8 +36,11 @@ import java.util.Arrays;
import java.util.Collections;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.Mockito;
import org.mockito.MockitoSession;
import org.mockito.quality.Strictness;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@ -48,20 +51,22 @@ import static org.mockito.Mockito.*;
* Tests of the P2PDataStorage::onRemoved callback behavior to ensure that the proper number of signal events occur.
*/
public class ProposalServiceP2PDataStorageListenerTest {
private ProposalService proposalService;
private MockitoSession mockitoSession;
private ProposalService proposalService;
@Mock
private PeriodService periodService;
@Mock
private DaoStateService daoStateService;
@Mock
private ListChangeListener<Proposal> tempProposalListener;
@BeforeEach
public void setUp() {
MockitoAnnotations.initMocks(this);
mockitoSession = Mockito.mockitoSession()
.initMocks(this)
.strictness(Strictness.LENIENT) // the two stubs below are not used in every test
.startMocking();
this.proposalService = new ProposalService(
mock(P2PService.class),
@ -78,6 +83,11 @@ public class ProposalServiceP2PDataStorageListenerTest {
when(this.daoStateService.isParseBlockChainComplete()).thenReturn(false);
}
@AfterEach
public void tearDown() {
mockitoSession.finishMocking();
}
private static ProtectedStorageEntry buildProtectedStorageEntry() {
ProtectedStorageEntry protectedStorageEntry = mock(ProtectedStorageEntry.class);
TempProposalPayload tempProposalPayload = mock(TempProposalPayload.class);

View File

@ -45,8 +45,11 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.mockito.Mockito;
import org.mockito.MockitoSession;
import org.mockito.quality.Strictness;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@ -60,6 +63,7 @@ import static org.mockito.Mockito.withSettings;
public class P2PDataStorageBuildGetDataResponseTest {
abstract static class P2PDataStorageBuildGetDataResponseTestBase {
private MockitoSession mockitoSession;
// GIVEN null & non-null supportedCapabilities
private TestState testState;
@ -72,7 +76,10 @@ public class P2PDataStorageBuildGetDataResponseTest {
@BeforeEach
public void setUp() {
MockitoAnnotations.initMocks(this);
mockitoSession = Mockito.mockitoSession()
.initMocks(this)
.strictness(Strictness.LENIENT) // there are unused stubs in TestState & elsewhere
.startMocking();
this.testState = new TestState();
this.localNodeAddress = new NodeAddress("localhost", 8080);
@ -82,6 +89,11 @@ public class P2PDataStorageBuildGetDataResponseTest {
Capabilities.app.addAll(Capability.MEDIATION);
}
@AfterEach
public void tearDown() {
mockitoSession.finishMocking();
}
static class RequiredCapabilitiesPNPStub extends PersistableNetworkPayloadStub
implements CapabilityRequiringPayload {
Capabilities capabilities;

View File

@ -34,8 +34,6 @@ import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import org.mockito.MockitoAnnotations;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@ -50,7 +48,6 @@ public class P2PDataStorageProcessGetDataResponse {
@BeforeEach
public void setUp() {
MockitoAnnotations.initMocks(this);
this.testState = new TestState();
this.peerNodeAddress = new NodeAddress("peer", 8080);
@ -111,7 +108,7 @@ public class P2PDataStorageProcessGetDataResponse {
// XXXBUGXXX: We signal listeners w/ non ProcessOncePersistableNetworkPayloads
@Test
public void processGetDataResponse_newPNPUpdatesState() {
PersistableNetworkPayload persistableNetworkPayload = new PersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload persistableNetworkPayload = new PersistableNetworkPayloadStub(new byte[]{1});
GetDataResponse getDataResponse = buildGetDataResponse(persistableNetworkPayload);
@ -137,7 +134,7 @@ public class P2PDataStorageProcessGetDataResponse {
// TESTCASE: GetDataResponse w/ existing PNP changes no state
@Test
public void processGetDataResponse_duplicatePNPDoesNothing() {
PersistableNetworkPayload persistableNetworkPayload = new PersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload persistableNetworkPayload = new PersistableNetworkPayloadStub(new byte[]{1});
this.testState.mockedStorage.addPersistableNetworkPayload(persistableNetworkPayload,
this.peerNodeAddress, false);
@ -152,7 +149,7 @@ public class P2PDataStorageProcessGetDataResponse {
// TESTCASE: GetDataResponse w/ missing PNP is added with no broadcast or listener signal (ProcessOncePersistableNetworkPayload)
@Test
public void processGetDataResponse_newPNPUpdatesState_LazyProcessed() {
PersistableNetworkPayload persistableNetworkPayload = new LazyPersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload persistableNetworkPayload = new LazyPersistableNetworkPayloadStub(new byte[]{1});
GetDataResponse getDataResponse = buildGetDataResponse(persistableNetworkPayload);
@ -165,7 +162,7 @@ public class P2PDataStorageProcessGetDataResponse {
// TESTCASE: GetDataResponse w/ existing PNP changes no state (ProcessOncePersistableNetworkPayload)
@Test
public void processGetDataResponse_duplicatePNPDoesNothing_LazyProcessed() {
PersistableNetworkPayload persistableNetworkPayload = new LazyPersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload persistableNetworkPayload = new LazyPersistableNetworkPayloadStub(new byte[]{1});
this.testState.mockedStorage.addPersistableNetworkPayload(persistableNetworkPayload,
this.peerNodeAddress, false);
@ -180,7 +177,7 @@ public class P2PDataStorageProcessGetDataResponse {
// TESTCASE: Second call to processGetDataResponse adds PNP for non-ProcessOncePersistableNetworkPayloads
@Test
public void processGetDataResponse_secondProcessNewPNPUpdatesState() {
PersistableNetworkPayload addFromFirstProcess = new PersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload addFromFirstProcess = new PersistableNetworkPayloadStub(new byte[]{1});
GetDataResponse getDataResponse = buildGetDataResponse(addFromFirstProcess);
TestState.SavedTestState beforeState = this.testState.saveTestState(addFromFirstProcess);
@ -188,7 +185,7 @@ public class P2PDataStorageProcessGetDataResponse {
this.testState.verifyPersistableAdd(
beforeState, addFromFirstProcess, true, true, false);
PersistableNetworkPayload addFromSecondProcess = new PersistableNetworkPayloadStub(new byte[] { 2 });
PersistableNetworkPayload addFromSecondProcess = new PersistableNetworkPayloadStub(new byte[]{2});
getDataResponse = buildGetDataResponse(addFromSecondProcess);
beforeState = this.testState.saveTestState(addFromSecondProcess);
this.testState.mockedStorage.processGetDataResponse(getDataResponse, this.peerNodeAddress);
@ -199,7 +196,7 @@ public class P2PDataStorageProcessGetDataResponse {
// TESTCASE: Second call to processGetDataResponse does not add any PNP (LazyProcessed)
@Test
public void processGetDataResponse_secondProcessNoPNPUpdates_LazyProcessed() {
PersistableNetworkPayload addFromFirstProcess = new LazyPersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload addFromFirstProcess = new LazyPersistableNetworkPayloadStub(new byte[]{1});
GetDataResponse getDataResponse = buildGetDataResponse(addFromFirstProcess);
TestState.SavedTestState beforeState = this.testState.saveTestState(addFromFirstProcess);
@ -207,7 +204,7 @@ public class P2PDataStorageProcessGetDataResponse {
this.testState.verifyPersistableAdd(
beforeState, addFromFirstProcess, true, false, false);
PersistableNetworkPayload addFromSecondProcess = new LazyPersistableNetworkPayloadStub(new byte[] { 2 });
PersistableNetworkPayload addFromSecondProcess = new LazyPersistableNetworkPayloadStub(new byte[]{2});
getDataResponse = buildGetDataResponse(addFromSecondProcess);
beforeState = this.testState.saveTestState(addFromSecondProcess);
this.testState.mockedStorage.processGetDataResponse(getDataResponse, this.peerNodeAddress);

View File

@ -35,8 +35,6 @@ import java.security.NoSuchAlgorithmException;
import java.util.Set;
import org.mockito.MockitoAnnotations;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@ -53,7 +51,6 @@ public class P2PDataStorageRequestDataTest {
@BeforeEach
public void setUp() {
MockitoAnnotations.initMocks(this);
this.testState = new TestState();
this.localNodeAddress = new NodeAddress("localhost", 8080);
@ -119,8 +116,8 @@ public class P2PDataStorageRequestDataTest {
// correct GetDataRequestMessage with both sets of keys.
@Test
public void buildPreliminaryGetDataRequest_FilledP2PDataStore() throws NoSuchAlgorithmException {
PersistableNetworkPayload toAdd1 = new PersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload toAdd2 = new PersistableNetworkPayloadStub(new byte[] { 2 });
PersistableNetworkPayload toAdd1 = new PersistableNetworkPayloadStub(new byte[]{1});
PersistableNetworkPayload toAdd2 = new PersistableNetworkPayloadStub(new byte[]{2});
ProtectedStorageEntry toAdd3 = getProtectedStorageEntryForAdd();
ProtectedStorageEntry toAdd4 = getProtectedStorageEntryForAdd();
@ -147,8 +144,8 @@ public class P2PDataStorageRequestDataTest {
// correct GetDataRequestMessage with both sets of keys.
@Test
public void requestData_FilledP2PDataStore_GetUpdatedDataRequest() throws NoSuchAlgorithmException {
PersistableNetworkPayload toAdd1 = new PersistableNetworkPayloadStub(new byte[] { 1 });
PersistableNetworkPayload toAdd2 = new PersistableNetworkPayloadStub(new byte[] { 2 });
PersistableNetworkPayload toAdd1 = new PersistableNetworkPayloadStub(new byte[]{1});
PersistableNetworkPayload toAdd2 = new PersistableNetworkPayloadStub(new byte[]{2});
ProtectedStorageEntry toAdd3 = getProtectedStorageEntryForAdd();
ProtectedStorageEntry toAdd4 = getProtectedStorageEntryForAdd();