SPVBlockStore: support 32 byte chain work

Existing V1 files are automatically migrated to V2 format.

Includes a test for migration from V1 to V2 format. This requires
`getRingCursor()` to be changed from private to package-private.
This commit is contained in:
Andreas Schildbach 2024-06-28 17:54:23 +02:00
parent effe62b649
commit fcb36eb06b
2 changed files with 131 additions and 40 deletions

View File

@ -60,8 +60,10 @@ public class SPVBlockStore implements BlockStore {
public static final int DEFAULT_CAPACITY = 10000;
@Deprecated
public static final String HEADER_MAGIC = "SPVB";
// Magic header.
private static final byte[] MAGIC_HEADER = HEADER_MAGIC.getBytes(StandardCharsets.US_ASCII);
// Magic header for the V1 format.
static final byte[] HEADER_MAGIC_V1 = HEADER_MAGIC.getBytes(StandardCharsets.US_ASCII);
// Magic header for the V2 format.
static final byte[] HEADER_MAGIC_V2 = "SPV2".getBytes(StandardCharsets.US_ASCII);
protected volatile MappedByteBuffer buffer;
protected final NetworkParameters params;
@ -95,6 +97,7 @@ public class SPVBlockStore implements BlockStore {
// Used to stop other applications/processes from opening the store.
protected FileLock fileLock = null;
protected RandomAccessFile randomAccessFile = null;
private final FileChannel channel;
private int fileLength;
/**
@ -119,11 +122,13 @@ public class SPVBlockStore implements BlockStore {
Objects.requireNonNull(file);
this.params = Objects.requireNonNull(params);
checkArgument(capacity > 0);
try {
boolean exists = file.exists();
// Set up the backing file, empty if it doesn't exist.
randomAccessFile = new RandomAccessFile(file, "rw");
FileChannel channel = randomAccessFile.getChannel();
channel = randomAccessFile.getChannel();
// Lock the file.
fileLock = channel.tryLock();
@ -132,43 +137,59 @@ public class SPVBlockStore implements BlockStore {
// Ensure expected file size, grow if desired.
fileLength = getFileSize(capacity);
if (!exists) {
log.info("Creating new SPV block chain file " + file);
randomAccessFile.setLength(fileLength);
byte[] currentHeader = new byte[4];
if (exists) {
log.info("Using existing SPV block chain file: " + file);
// Map it into memory read/write. The kernel will take care of flushing writes to disk at the most
// efficient times, which may mean that until the map is deallocated the data on disk is randomly
// inconsistent. However the only process accessing it is us, via this mapping, so our own view will
// always be correct. Once we establish the mmap the underlying file and channel can go away. Note that
// the details of mmapping vary between platforms.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, randomAccessFile.length());
buffer.get(currentHeader);
} else {
log.info("Creating new SPV block chain file: " + file);
randomAccessFile.setLength(fileLength);
// Map it into memory read/write. See above comment.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
initNewStore(params.getGenesisBlock());
}
// Maybe migrate V1 to V2 format.
if (Arrays.equals(HEADER_MAGIC_V1, currentHeader)) {
log.info("Migrating SPV block chain file from V1 to V2 format: " + file);
migrateV1toV2();
}
// Maybe grow.
if (exists) {
final long currentLength = randomAccessFile.length();
if (currentLength != fileLength) {
if ((currentLength - FILE_PROLOGUE_BYTES) % RECORD_SIZE != 0)
if ((currentLength - FILE_PROLOGUE_BYTES) % RECORD_SIZE_V2 != 0) {
throw new BlockStoreException(
"File size on disk indicates this is not a block store: " + currentLength);
else if (!grow)
"File size on disk indicates this is not a V2 block store: " + currentLength);
} else if (!grow) {
throw new BlockStoreException("File size on disk does not match expected size: " + currentLength
+ " vs " + fileLength);
else if (fileLength < randomAccessFile.length())
} else if (fileLength < randomAccessFile.length()) {
throw new BlockStoreException(
"Shrinking is unsupported: " + currentLength + " vs " + fileLength);
else
} else {
randomAccessFile.setLength(fileLength);
// Map it into memory again because of the length change.
buffer.force();
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
}
}
}
// Map it into memory read/write. The kernel will take care of flushing writes to disk at the most
// efficient times, which may mean that until the map is deallocated the data on disk is randomly
// inconsistent. However the only process accessing it is us, via this mapping, so our own view will
// always be correct. Once we establish the mmap the underlying file and channel can go away. Note that
// the details of mmapping vary between platforms.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
// Check or initialize the header bytes to ensure we don't try to open some random file.
if (exists) {
byte[] currentHeader = new byte[4];
((Buffer) buffer).rewind();
buffer.get(currentHeader);
if (!Arrays.equals(currentHeader, MAGIC_HEADER))
throw new BlockStoreException("Magic header expected, got: " + ByteUtils.formatHex(currentHeader));
} else {
initNewStore(params.getGenesisBlock());
}
// Check the header bytes to ensure we don't try to open some random file.
byte[] header = new byte[4];
((Buffer) buffer).rewind();
buffer.get(currentHeader);
if (!Arrays.equals(currentHeader, HEADER_MAGIC_V2))
throw new BlockStoreException("Magic header V2 expected: " + new String(currentHeader,
StandardCharsets.US_ASCII));
} catch (Exception e) {
try {
if (randomAccessFile != null) randomAccessFile.close();
@ -181,7 +202,7 @@ public class SPVBlockStore implements BlockStore {
private void initNewStore(Block genesisBlock) throws Exception {
((Buffer) buffer).rewind();
buffer.put(MAGIC_HEADER);
buffer.put(HEADER_MAGIC_V2);
// Insert the genesis block.
lock.lock();
try {
@ -194,9 +215,43 @@ public class SPVBlockStore implements BlockStore {
setChainHead(storedGenesis);
}
private void migrateV1toV2() throws BlockStoreException, IOException {
long currentLength = randomAccessFile.length();
long currentBlocksLength = currentLength - FILE_PROLOGUE_BYTES;
if (currentBlocksLength % RECORD_SIZE_V1 != 0)
throw new BlockStoreException(
"File size on disk indicates this is not a V1 block store: " + currentLength);
int currentCapacity = (int) (currentBlocksLength / RECORD_SIZE_V1);
randomAccessFile.setLength(fileLength);
// Map it into memory again because of the length change.
buffer.force();
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
// migrate magic header
((Buffer) buffer).rewind();
buffer.put(HEADER_MAGIC_V2);
// migrate headers
final byte[] zeroPadding = new byte[20]; // 32 (V2 work) - 12 (V1 work)
for (int i = currentCapacity - 1; i >= 0; i--) {
byte[] record = new byte[RECORD_SIZE_V1];
buffer.position(FILE_PROLOGUE_BYTES + i * RECORD_SIZE_V1);
buffer.get(record);
buffer.position(FILE_PROLOGUE_BYTES + i * RECORD_SIZE_V2);
buffer.put(record, 0, 32); // hash
buffer.put(zeroPadding);
buffer.put(record, 32, RECORD_SIZE_V1 - 32); // work, height, block header
}
// migrate cursor
int cursorRecord = (getRingCursor() - FILE_PROLOGUE_BYTES) / RECORD_SIZE_V1;
setRingCursor(FILE_PROLOGUE_BYTES + cursorRecord * RECORD_SIZE_V2);
}
/** Returns the size in bytes of the file that is used to store the chain with the current parameters. */
public static int getFileSize(int capacity) {
return RECORD_SIZE * capacity + FILE_PROLOGUE_BYTES /* extra kilobyte for stuff */;
return RECORD_SIZE_V2 * capacity + FILE_PROLOGUE_BYTES /* extra kilobyte for stuff */;
}
@Override
@ -215,7 +270,7 @@ public class SPVBlockStore implements BlockStore {
Sha256Hash hash = block.getHeader().getHash();
notFoundCache.remove(hash);
buffer.put(hash.getBytes());
block.serializeCompact(buffer);
block.serializeCompactV2(buffer);
setRingCursor(buffer.position());
blockCache.put(hash, block);
} finally { lock.unlock(); }
@ -242,17 +297,17 @@ public class SPVBlockStore implements BlockStore {
final byte[] targetHashBytes = hash.getBytes();
byte[] scratch = new byte[32];
do {
cursor -= RECORD_SIZE;
cursor -= RECORD_SIZE_V2;
if (cursor < FILE_PROLOGUE_BYTES) {
// We hit the start, so wrap around.
cursor = fileLength - RECORD_SIZE;
cursor = fileLength - RECORD_SIZE_V2;
}
// Cursor is now at the start of the next record to check, so read the hash and compare it.
((Buffer) buffer).position(cursor);
buffer.get(scratch);
if (Arrays.equals(scratch, targetHashBytes)) {
// Found the target.
StoredBlock storedBlock = StoredBlock.deserializeCompact(buffer);
StoredBlock storedBlock = StoredBlock.deserializeCompactV2(buffer);
blockCache.put(hash, storedBlock);
return storedBlock;
}
@ -315,22 +370,24 @@ public class SPVBlockStore implements BlockStore {
}
}
protected static final int RECORD_SIZE = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE;
static final int RECORD_SIZE_V1 = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE;
static final int RECORD_SIZE_V2 = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE_V2;
// File format:
// 4 header bytes = "SPVB"
// V2 file format, V1 in parenthesis:
// 4 magic header bytes = "SPV2" ("SPVB" for V1)
// 4 cursor bytes, which indicate the offset from the first kb where the next block header should be written.
// 32 bytes for the hash of the chain head
//
// For each header (128 bytes)
// For each header:
// 32 bytes hash of the header
// 12 bytes of chain work
// 32 bytes of chain work (12 bytes for V1)
// 4 bytes of height
// 80 bytes of block header data
protected static final int FILE_PROLOGUE_BYTES = 1024;
/** Returns the offset from the file start where the latest block should be written (end of prev block). */
private int getRingCursor() {
int getRingCursor() {
int c = buffer.getInt(4);
checkState(c >= FILE_PROLOGUE_BYTES, () ->
"integer overflow");

View File

@ -36,7 +36,11 @@ import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import java.io.RandomAccessFile;
import java.math.BigInteger;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.time.Duration;
import java.time.Instant;
import java.util.Collections;
@ -190,4 +194,34 @@ public class SPVBlockStoreTest {
assertTrue(deleted);
}
}
@Test
public void migrateV1toV2() throws Exception {
// create V1 format
RandomAccessFile raf = new RandomAccessFile(blockStoreFile, "rw");
FileChannel channel = raf.getChannel();
ByteBuffer buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0,
SPVBlockStore.FILE_PROLOGUE_BYTES + SPVBlockStore.RECORD_SIZE_V1 * 3);
buffer.put(SPVBlockStore.HEADER_MAGIC_V1); // header magic
Block genesisBlock = TESTNET.getGenesisBlock();
StoredBlock storedGenesisBlock = new StoredBlock(genesisBlock.cloneAsHeader(), genesisBlock.getWork(), 0);
Sha256Hash genesisHash = storedGenesisBlock.getHeader().getHash();
((Buffer) buffer).position(SPVBlockStore.FILE_PROLOGUE_BYTES);
buffer.put(genesisHash.getBytes());
storedGenesisBlock.serializeCompact(buffer);
buffer.putInt(4, buffer.position()); // ring cursor
((Buffer) buffer).position(8);
buffer.put(genesisHash.getBytes()); // chain head
raf.close();
// migrate to V2 format
SPVBlockStore store = new SPVBlockStore(TESTNET, blockStoreFile);
// check block is the same
assertEquals(genesisHash, store.getChainHead().getHeader().getHash());
// check ring cursor
assertEquals(SPVBlockStore.FILE_PROLOGUE_BYTES + SPVBlockStore.RECORD_SIZE_V2 * 1,
store.getRingCursor());
store.close();
}
}