Cleanups, change log levels

This commit is contained in:
chimp1984 2021-10-31 19:42:23 +01:00
parent 3487932f6e
commit 62b3e51b22
No known key found for this signature in database
GPG key ID: 9801B4EC591F90E3
12 changed files with 17 additions and 2005 deletions

View file

@ -63,7 +63,6 @@ configure(subprojects) {
kotlinVersion = '1.3.41'
knowmXchangeVersion = '4.4.2'
langVersion = '3.11'
leveldbVersion = '1.2'
logbackVersion = '1.1.11'
loggingVersion = '1.2'
lombokVersion = '1.18.12'
@ -265,10 +264,6 @@ configure(project(':common')) {
exclude(module: 'okhttp')
exclude(module: 'okio')
}
implementation("io.github.pcmind:leveldb:$leveldbVersion") {
exclude(module: 'guava')
}
runtimeOnly("io.grpc:grpc-netty-shaded:$grpcVersion") {
exclude(module: 'guava')
exclude(module: 'animal-sniffer-annotations')
@ -303,7 +298,6 @@ configure(project(':p2p')) {
implementation("org.apache.httpcomponents:httpclient:$httpclientVersion") {
exclude(module: 'commons-codec')
}
compile "org.fxmisc.easybind:easybind:$easybindVersion"
compileOnly "org.projectlombok:lombok:$lombokVersion"
annotationProcessor "org.projectlombok:lombok:$lombokVersion"

View file

@ -1,78 +0,0 @@
package bisq.common.persistence.db;
public interface DataStore {
public enum Storage {
IN_MEMORY,
PERSISTED
}
public static final byte INACTIVE_RECORD = 0;
public static final byte ACTIVE_RECORD = 1;
public static final byte EMPTY_RECORD_TYPE = -1;
public static final byte OBJ_RECORD_TYPE = 1;
public static final byte TEXT_RECORD_TYPE = 2;
public static final byte LONG_RECORD_TYPE = 3;
public static final byte INT_RECORD_TYPE = 4;
public static final byte DOUBLE_RECORD_TYPE = 5;
public static final byte FLOAT_RECORD_TYPE = 6;
public static final byte SHORT_RECORD_TYPE = 7;
public static final byte CHAR_RECORD_TYPE = 8;
public static final byte BYTEARRAY_RECORD_TYPE = 9;
// Get Journal stats
public long getRecordCount();
public long getEmptyCount();
public String getName();
public String getFolder();
public long getFilesize();
public boolean putInteger(String key, Integer val);
public Integer getInteger(String key);
public boolean putShort(String key, Short val);
public Short getShort(String key);
public boolean putLong(String key, Long val);
public Long getLong(String key);
public boolean putFloat(String key, Float val);
public Float getFloat(String key);
public boolean putDouble(String key, Double val);
public Double getDouble(String key);
public boolean putString(String key, String val);
public String getString(String key);
public boolean putObject(String key, Object msg);
public Object getObject(String key); // key is the object ID
boolean putBytes(String key, byte[] bytes);
byte[] getBytes(String key);
public boolean putChar(String key, char val);
public char getChar(String key);
public boolean remove(String key); // ID is the object hashs
public Object iterateStart();
public Object iterateNext();
public void delete();
}

View file

@ -1,418 +0,0 @@
package bisq.common.persistence.db;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.io.File;
import java.io.RandomAccessFile;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* @author ebruno
*/
public class FixedHash implements HashBase {
public enum Storage {
IN_MEMORY,
PERSISTED
}
protected static final Logger logger = Logger.getLogger("FixedHash");
protected boolean debugLogging = false;
protected static int LOAD_THRESHOLD = 70;
protected static final int PAGE_SIZE = 1024 * 1024;
protected static final int SIZE_FACTOR = 2;
protected static final int DEFAULT_INDEX_JOURNAL_SIZE = SIZE_FACTOR * PAGE_SIZE;
protected static final int KEY_SIZE = 0;
//
// 1 (byte) key length
// 4 (int) key hashcode
// 0 (bytes) key size
// 8 (long) record location
//
protected static final int INDEX_ENTRY_SIZE_BYTES =
1 + Integer.BYTES + KEY_SIZE + Long.BYTES;
protected long sizeInBytes = DEFAULT_INDEX_JOURNAL_SIZE;
protected int previousOffset = 0; // the last record inserted into the index
protected int bucketsFree = 0;
protected int bucketsUsed = 0;
protected int totalBuckets = 0;
protected int collisions = 0;
protected String journalPath = "";
protected boolean inMemory = true;
protected RandomAccessFile indexFile = null;
protected FileChannel indexChannel = null;
protected ByteBuffer indexBuffer = null;
protected byte keyLength = 16;
protected long indexCurrentEnd = 0;
protected int indexRecordReadCount = 1;
// Used when iterating through the index
protected int iterateNext = 0;
///////////////////////////////////////////////////////////////////////////
public FixedHash(String journalPath, boolean inMemory, boolean reuseExisting) {
this(DEFAULT_INDEX_JOURNAL_SIZE, journalPath, inMemory, reuseExisting);
}
public FixedHash(int size, String journalPath, boolean inMemory, boolean reuseExisting) {
boolean success = false;
sizeInBytes = size;
this.inMemory = inMemory;
this.journalPath = journalPath;
if (inMemory) {
success = createIndexJournalBB();
} else {
success = createIndexJournalMBB(reuseExisting);
}
if (success) {
totalBuckets = (int) (sizeInBytes / INDEX_ENTRY_SIZE_BYTES);
bucketsFree = (int) (sizeInBytes / INDEX_ENTRY_SIZE_BYTES);
bucketsUsed = 0;
}
}
protected boolean createIndexJournalBB() {
try {
indexBuffer = ByteBuffer.allocateDirect((int) sizeInBytes);
indexCurrentEnd = indexBuffer.position();
return true;
} catch (Exception e) {
logger.log(Level.SEVERE, "Exception", e);
}
return false;
}
protected boolean createIndexJournalMBB(boolean reuseExisting) {
try {
journalPath += "Index";
// If the journal file already exists, rename it unless we're
// supposed to reuse the existing file and its contents
boolean fileExists = false;
try {
File file = new File(journalPath);
fileExists = file.exists();
if (fileExists && !reuseExisting) {
File newFile = new File(journalPath + "_prev");
logger.info("Moving journal " + journalPath + " to " + newFile.getName());
file.renameTo(newFile);
}
} catch (Exception e) {
}
indexFile = new RandomAccessFile(journalPath, "rw");
if (fileExists && reuseExisting) {
// Existing file, so use its existing length
sizeInBytes = indexFile.length();
} else {
// New file, set its length
indexFile.setLength(sizeInBytes);
}
indexChannel = indexFile.getChannel();
indexBuffer = indexChannel.map(FileChannel.MapMode.READ_WRITE, 0, sizeInBytes);
indexCurrentEnd = indexBuffer.position();
return true;
} catch (Exception e) {
logger.log(Level.SEVERE, "Exception", e);
}
return false;
}
@Override
public void reset() {
try {
indexBuffer.clear();
indexBuffer.limit(0);
if (inMemory) {
indexBuffer = ByteBuffer.allocateDirect(0);
} else {
indexChannel.truncate(0);
indexChannel.close();
indexFile.close();
File f = new File(journalPath);
f.delete();
}
} catch (Exception e) {
e.printStackTrace();
}
}
protected int getHashBucket(int hash) {
return _getHashBucket(hash);
}
protected int getHashBucket(String key) {
int hash = key.hashCode();
return _getHashBucket(hash);
}
protected int _getHashBucket(int hash) {
hash = hash ^ (hash >>> 16);
int buckets = (int) (sizeInBytes / INDEX_ENTRY_SIZE_BYTES);
int bucket = Math.max(1, Math.abs(hash) % (buckets - 1));
return bucket * INDEX_ENTRY_SIZE_BYTES;
}
protected boolean enlargeIndex() {
try {
// Hold a reference to the original buffer to copy its contents
ByteBuffer oldBuffer = indexBuffer;
if (inMemory) {
logger.log(Level.INFO, "Expanding in-memory index...");
sizeInBytes += (PAGE_SIZE * SIZE_FACTOR);
createIndexJournalBB();
} else {
logger.log(Level.INFO, "Expanding persisted index...");
((MappedByteBuffer) indexBuffer).force();
indexFile.setLength(sizeInBytes + (PAGE_SIZE * SIZE_FACTOR));
indexChannel = indexFile.getChannel();
sizeInBytes = indexChannel.size();
indexBuffer = indexChannel.map(
FileChannel.MapMode.READ_WRITE, 0, sizeInBytes);
}
// Re-hash the index
//
collisions = 0;
bucketsUsed = 0;
oldBuffer.position(INDEX_ENTRY_SIZE_BYTES);
int buckets = (oldBuffer.capacity() / INDEX_ENTRY_SIZE_BYTES);
for (int i = 1; i <= buckets; i++) {
byte occupied = oldBuffer.get();
if (occupied > 0) {
int keyHash = oldBuffer.getInt();
byte[] fixedKeyBytes = null;
if (KEY_SIZE > 0) {
fixedKeyBytes = new byte[KEY_SIZE];
oldBuffer.get(fixedKeyBytes);
}
Long location = oldBuffer.getLong();
putInternal(fixedKeyBytes, keyHash, occupied, location);
} else {
// Bucket unocuppied, move to the next one
oldBuffer.position(i * INDEX_ENTRY_SIZE_BYTES);
}
}
totalBuckets = (int) (sizeInBytes / INDEX_ENTRY_SIZE_BYTES);
bucketsFree = totalBuckets - bucketsUsed;
logger.log(Level.INFO, "Done!");
return true;
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
protected int findBucket(Integer hashcode, int offset, boolean mustFind) {
boolean found = false;
byte occupied = 1;
while (occupied > 0 && !found) {
int keyHash = indexBuffer.getInt();
if (keyHash == hashcode) {
if (KEY_SIZE > 0) {
indexBuffer.position(
offset + 1 + Integer.BYTES + KEY_SIZE);
}
found = true;
break;
} else {
// Check for rollover past the end of the table
offset += INDEX_ENTRY_SIZE_BYTES;
if (offset >= (sizeInBytes - INDEX_ENTRY_SIZE_BYTES)) {
// Wrap to the beginning, skipping the first slot
// since it's reserved for the first record pointer
offset = INDEX_ENTRY_SIZE_BYTES;
}
// Skip to the next bucket
indexBuffer.position(offset);
occupied = indexBuffer.get();
}
}
// return if the key was found in the index
if (!found && mustFind) {
return -1;
}
return offset;
}
protected boolean putInternal(byte[] fixedKeyBytes, Integer hashcode,
byte keyLength, Long value) {
int offset = getHashBucket(hashcode);
indexBuffer.position(offset);
indexBuffer.mark();
byte occupied = indexBuffer.get();
if (occupied == 0) {
// found a free slot, go back to the beginning of it
indexBuffer.reset();
} else {
collisions++;
// When there's a collision, walk the table until a
// free slot is found
offset = findBucket(hashcode, offset, false);
// found a free slot, seek to it
indexBuffer.position(offset);
}
// Write the data
//
indexBuffer.put(keyLength);
indexBuffer.putInt(hashcode); // hashcode is faster for resolving collisions then comparing strings
if (KEY_SIZE > 0 && fixedKeyBytes != null && fixedKeyBytes.length > 0) {
// Make sure we copy *at most* KEY_SIZE bytes for the key
indexBuffer.put(fixedKeyBytes,
0, Math.min(KEY_SIZE, fixedKeyBytes.length));
}
indexBuffer.putLong(value); // indexed record location
bucketsUsed++;
return true;
}
@Override
public boolean put(String key, Long value) {
//
// Entry:
// 1 (byte) key length
// 4 (int) key hashcode
// 0 (bytes) key size
// 8 (long) record location
//
try {
// Check load to see if Index needs to be enlarged
//
if (getLoad() > LOAD_THRESHOLD) {
enlargeIndex();
}
byte keylen = (byte) key.length();
byte[] fixedKeyBytes = null;
if (KEY_SIZE > 0) {
fixedKeyBytes = new byte[KEY_SIZE];
System.arraycopy(key.getBytes(),
0, fixedKeyBytes,
0, Math.min(KEY_SIZE, keylen));
}
return putInternal(fixedKeyBytes, key.hashCode(), keylen, value);
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
protected int getHashBucketOffset(String key) {
int offset = -1;
try {
offset = getHashBucket(key.hashCode());
indexBuffer.position(offset);
byte occupied = indexBuffer.get();
if (occupied > 0) {
offset = findBucket(key.hashCode(), offset, true);
}
} catch (Exception e) {
e.printStackTrace();
}
return offset;
}
@Override
public Long get(String key) {
int offset = getHashBucketOffset(key);
if (offset == -1) {
// key not found
return -1L;
}
// Return the location of the data record
return indexBuffer.getLong();
}
@Override
public void remove(String key) {
int offset = getHashBucketOffset(key);
if (offset == -1) {
// key not found
return;
}
offset = findBucket(key.hashCode(), offset, true);
if (offset != -1) {
// Simply zero out the occupied slot, but need to rewind first
int currPos = indexBuffer.position();
currPos -= (Integer.BYTES + 1);
indexBuffer.position(currPos);
indexBuffer.put((byte) 0);
}
}
@Override
public int getCollisions() {
return collisions;
}
@Override
public void outputStats() {
System.out.println("Index " + journalPath + " Stats:");
System.out.println(" -size: " + size());
System.out.println(" -load: " + getLoad());
System.out.println(" -entries: " + entries());
System.out.println(" -capacity: " + capacity());
System.out.println(" -available: " + available());
System.out.println(" -collisions: " + getCollisions());
}
public long size() {
return sizeInBytes;
}
public int entries() {
return bucketsUsed;
}
public int capacity() {
return totalBuckets;
}
public int available() {
return capacity() - entries();
}
public int getLoad() {
int used = entries();
int capac = capacity();
float f = (float) used / (float) capac;
int load = (int) (f * 100);
return load; // percentage
}
}

View file

@ -1,20 +0,0 @@
package bisq.common.persistence.db;
/**
* @author ebruno
*/
public interface HashBase {
public boolean put(String k, Long v);
public Long get(String k);
public void remove(String k);
public int getCollisions();
public int getLoad();
public void outputStats();
public void reset();
}

View file

@ -1,380 +0,0 @@
package bisq.common.persistence.db;
import bisq.common.Timer;
import bisq.common.UserThread;
import bisq.common.util.Utilities;
import java.nio.charset.StandardCharsets;
import java.io.File;
import java.util.HashMap;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
/**
*
* @author ebruno
*/
@Slf4j
public class NoHeapDB {
protected final static int MEGABYTE = 1024 * 1024;
protected final static int DEFAULT_STORE_SIZE = MEGABYTE * 100;
private static Thread thread;
static long c;
static long ts;
static String key;
public static void main(String[] args) throws Exception {
NoHeapDB db = new NoHeapDB();
/* db.createStore(
"MyTestDataStore",
DataStore.Storage.IN_MEMORY, //or DataStore.Storage.PERSISTED
256); // in MB*/
db.createStore(
"MyTestDataStore",
DataStore.Storage.PERSISTED, //or DataStore.Storage.PERSISTED
50, 5); // in MB
DataStore store = db.getStore("MyTestDataStore");
String sep = "_";
String result = store.getString(2 + sep + 100);
log.error("result {}", result);
String name = "MyTestDataStore";
byte[] array = new byte[10000000];
new Random().nextBytes(array);
String random = new String(array, StandardCharsets.UTF_8);
log.error(Utilities.readableFileSize(array.length));
log.error(Utilities.readableFileSize(random.getBytes(StandardCharsets.UTF_8).length));
c = store.getRecordCount();
key = c + sep;
ts = System.currentTimeMillis();
String res1 = store.getString(key);
// log.error("read took {} ms. {}", System.currentTimeMillis() - ts, res1);
Timer timer = UserThread.runPeriodically(() -> {
ts = System.currentTimeMillis();
key = c + sep;
String val1 = random + c;
store.putString(key, val1);
//log.error("write took {} ms", System.currentTimeMillis() - ts);
ts = System.currentTimeMillis();
String res = store.getString(key);
// log.error("read took {} ms. res={}, val1={}, match {}", System.currentTimeMillis() - ts, res, val1, res.equals(val1));
// log.error("read took {} ms. match {}", System.currentTimeMillis() - ts, res.equals(val1));
c++;
log.error("getFilesize {} getRecordCount {}", Utilities.readableFileSize(store.getFilesize()), store.getRecordCount());
System.gc();
if (store.getFilesize() > 1800000000) {
log.error("too large");
System.exit(0);
}
// 400 000
/* long ts = System.currentTimeMillis();
int size = 10000000;
for (int i = 0; i < size; i++) {
String val = String.valueOf(c * i);
String key = c + sep + i;
store.putString(key, val); //400 000
// log.error("write key/val {}/{}", key, val);
}
log.error("write took {} ms", System.currentTimeMillis() - ts);
ts = System.currentTimeMillis();
for (int i = 0; i < size; i++) {
String key = c + sep + i;
String val = store.getString(key);
//log.error("read key/val {}/{}", key, val);
}
log.error("read took {} ms", System.currentTimeMillis() - ts);
c++;*/
}, 100, TimeUnit.MILLISECONDS);
thread = new Thread(() -> {
while (true) {
}
});
thread.start();
UserThread.runAfter(() -> {
timer.stop();
thread.interrupt();
}, 500);
}
HashMap<String, DataStore> stores = new HashMap<>();
String homeDirectory =
System.getProperty("user.home") +
File.separator + "JavaOffHeap";
public NoHeapDB() {
}
public NoHeapDB(String homeDirectory) {
this.homeDirectory = homeDirectory;
}
public boolean createStore(String name) throws Exception {
return createStore(name,
DataStore.Storage.IN_MEMORY,
100, 10);
}
public boolean createStore(String name,
DataStore.Storage storageType)
throws Exception {
return createStore(name,
storageType,
100, 10);
}
public boolean createStore(String name,
DataStore.Storage storageType,
int size,
int indexFileSize) throws Exception {
if (size > Integer.MAX_VALUE) {
throw new Exception("Database size exceeds " + Integer.MAX_VALUE);
}
NoHeapDBStore nohdb = new NoHeapDBStore(homeDirectory,
name,
storageType,
size * MEGABYTE,
indexFileSize * MEGABYTE,
true);
stores.put(name, nohdb);
return true;
}
public DataStore getStore(String storeName) {
return this.stores.get(storeName);
}
public boolean deleteStore(String storeName) {
DataStore store = this.stores.get(storeName);
if (store != null) {
// Delete the store here
store.delete();
if (stores.remove(storeName) != null) {
return true;
}
}
return false;
}
public boolean putString(String storeName, String key, String value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putString(key, value);
}
return false;
}
public boolean putInteger(String storeName, String key, Integer value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putInteger(key, value);
}
return false;
}
public boolean putShort(String storeName, String key, Short value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putShort(key, value);
}
return false;
}
public boolean putLong(String storeName, String key, Long value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putLong(key, value);
}
return false;
}
public boolean putFloat(String storeName, String key, Float value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putFloat(key, value);
}
return false;
}
public boolean putDouble(String storeName, String key, Double value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putDouble(key, value);
}
return false;
}
public boolean putChar(String storeName, String key, char value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putChar(key, value);
}
return false;
}
public boolean putObject(String storeName, String key, Object value) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.putObject(key, value);
}
return false;
}
public String getString(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getString(key);
}
return null;
}
public Integer getInteger(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getInteger(key);
}
return null;
}
public Short getShort(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getShort(key);
}
return null;
}
public Long getLong(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getLong(key);
}
return null;
}
public Float getFloat(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getFloat(key);
}
return null;
}
public Double getDouble(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getDouble(key);
}
return null;
}
public char getChar(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getChar(key);
}
return (char) 0;
}
public Object getObject(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.getObject(key);
}
return null;
}
public boolean remove(String storeName, String key) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.remove(key);
}
return false;
}
public Object iterateStart(String storeName) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.iterateStart();
}
return null;
}
public Object iterateNext(String storeName) {
DataStore store = this.stores.get(storeName);
if (store != null) {
return store.iterateNext();
}
return null;
}
public int getCollisions(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
return niop.getCollisions();
}
public int getIndexLoad(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
return niop.getIndexLoad();
}
public long getObjectRetrievalTime(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
return niop.getObjectRetrievalTime();
}
public long getObjectStorageTime(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
return niop.getObjectStorageTime();
}
public void outputStats(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
niop.outputStats();
}
public long getRecordCount(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
return niop.getRecordCount();
}
public long getEmptyCount(String storeName) {
NoHeapDBStore niop = (NoHeapDBStore) stores.get(storeName);
return niop.getEmptyCount();
}
}

View file

@ -26,8 +26,8 @@ import lombok.extern.slf4j.Slf4j;
public class GcUtil {
@Setter
private static boolean DISABLE_GC_CALLS = false;
private static int TRIGGER_MEM = 1000;
private static int TRIGGER_MAX_MEM = 3000;
private static final int TRIGGER_MEM = 1000;
private static final int TRIGGER_MAX_MEM = 3000;
private static int totalInvocations;
private static long totalGCTime;

View file

@ -143,7 +143,6 @@ public class DaoStateSnapshotService implements DaoSetupService, DaoStateListene
// This also comes with the improvement that the user does not need to load the past blocks back to the last
// snapshot height. Though it comes also with the small risk that in case of re-orgs the user need to do
// a resync in case the dao state would have been affected by that reorg.
//todo
long ts = System.currentTimeMillis();
// We do not keep a copy of the clone as we use it immediately for persistence.
GcUtil.maybeReleaseMemory();

View file

@ -38,8 +38,6 @@ import javax.annotation.Nullable;
@Slf4j
public class BlocksPersistence {
// 10000->Writing 130014 blocks took 1658 msec
// 1000-> Writing 130014 blocks took 1685 msec Mapping blocks from DaoStateStore took 2250 ms
public static final int BUCKET_SIZE = 1000; // results in about 1 MB files and about 1 new file per week
private final File storageDir;
@ -51,10 +49,6 @@ public class BlocksPersistence {
this.storageDir = storageDir;
this.fileName = fileName;
this.persistenceProtoResolver = persistenceProtoResolver;
/* if (!storageDir.exists()) {
storageDir.mkdir();
}*/
}
public void writeBlocks(List<BaseBlock> protobufBlocks) {
@ -75,7 +69,6 @@ public class BlocksPersistence {
int first = bucketIndex * BUCKET_SIZE - BUCKET_SIZE + 1;
int last = bucketIndex * BUCKET_SIZE;
File storageFile = new File(storageDir, fileName + "_" + first + "-" + last);
// log.error("addAll height={} items={}", height, temp.stream().map(e -> e.getHeight() + ", ").collect(Collectors.toList()));
writeToDisk(storageFile, new BsqBlockStore(temp), null);
temp = new ArrayList<>();
}
@ -85,11 +78,10 @@ public class BlocksPersistence {
int first = bucketIndex * BUCKET_SIZE - BUCKET_SIZE + 1;
int last = bucketIndex * BUCKET_SIZE;
File storageFile = new File(storageDir, fileName + "_" + first + "-" + last);
// log.error("items={}", temp.stream().map(e -> e.getHeight()).collect(Collectors.toList()));
writeToDisk(storageFile, new BsqBlockStore(temp), null);
}
log.error("Write {} blocks to disk took {} msec", protobufBlocks.size(), System.currentTimeMillis() - ts);
log.info("Write {} blocks to disk took {} msec", protobufBlocks.size(), System.currentTimeMillis() - ts);
}
public void removeBlocksDirectory() {
@ -107,19 +99,15 @@ public class BlocksPersistence {
storageDir.mkdir();
}
// log.error("getBlocks {}-{}", from, to);
long ts = System.currentTimeMillis();
// from = Math.max(571747, from);
List<BaseBlock> buckets = new ArrayList<>();
int start = from / BUCKET_SIZE + 1;
int end = to / BUCKET_SIZE + 1;
for (int bucketIndex = start; bucketIndex <= end; bucketIndex++) {
List<BaseBlock> bucket = readBucket(bucketIndex);
// log.error("read bucketIndex {}, items={}", bucketIndex, bucket.stream().map(e -> e.getHeight() + ", ").collect(Collectors.toList()));
buckets.addAll(bucket);
}
log.error("Reading {} blocks took {} msec", buckets.size(), System.currentTimeMillis() - ts);
// System.exit(0);
log.info("Reading {} blocks took {} msec", buckets.size(), System.currentTimeMillis() - ts);
return buckets;
}
@ -127,23 +115,17 @@ public class BlocksPersistence {
private List<BaseBlock> readBucket(int bucketIndex) {
int first = bucketIndex * BUCKET_SIZE - BUCKET_SIZE + 1;
int last = bucketIndex * BUCKET_SIZE;
/* int first = bucketIndex * BUCKET_SIZE + 1;
int last = first + BUCKET_SIZE - 1;*/
String child = fileName + "_" + first + "-" + last;
// log.error("getBlocksOfBucket {}", child);
File storageFile = new File(storageDir, child);
if (!storageFile.exists()) {
// log.error("storageFile not existing {}", storageFile.getName());
return new ArrayList<>();
}
try (FileInputStream fileInputStream = new FileInputStream(storageFile)) {
protobuf.PersistableEnvelope proto = protobuf.PersistableEnvelope.parseDelimitedFrom(fileInputStream);
BsqBlockStore bsqBlockStore = (BsqBlockStore) persistenceProtoResolver.fromProto(proto);
return bsqBlockStore.getBlocksAsProto();
} catch (Throwable t) {
log.error("Reading {} failed with {}.", fileName, t.getMessage());
log.info("Reading {} failed with {}.", fileName, t.getMessage());
return new ArrayList<>();
}
}
@ -151,7 +133,6 @@ public class BlocksPersistence {
private void writeToDisk(File storageFile,
BsqBlockStore bsqBlockStore,
@Nullable Runnable completeHandler) {
long ts = System.currentTimeMillis();
File tempFile = null;
FileOutputStream fileOutputStream = null;
try {
@ -196,7 +177,6 @@ public class BlocksPersistence {
e.printStackTrace();
log.error("Cannot close resources." + e.getMessage());
}
// log.info("Writing the serialized {} completed in {} msec", fileName, System.currentTimeMillis() - ts);
if (completeHandler != null) {
completeHandler.run();
}

View file

@ -46,6 +46,7 @@ import lombok.extern.slf4j.Slf4j;
@Singleton
public class BsqBlocksStorageService {
public final static String NAME = "BsqBlocks";
private final int genesisBlockHeight;
private final File storageDir;
private final BlocksPersistence blocksPersistence;
@ -72,7 +73,7 @@ public class BsqBlocksStorageService {
chainHeightOfPersistedBlocks = Math.max(chainHeightOfPersistedBlocks,
getHeightOfLastFullBucket(blocks));
}
log.error("Persist (serialize+write) {} blocks took {} ms",
log.info("Persist (serialize+write) {} blocks took {} ms",
blocks.size(),
System.currentTimeMillis() - ts);
}
@ -83,18 +84,15 @@ public class BsqBlocksStorageService {
List<BaseBlock> list = blocksPersistence.readBlocks(genesisBlockHeight, chainHeight);
list.stream().map(Block::fromProto)
.forEach(blocks::add);
log.error("Reading and deserializing {} blocks took {} ms", blocks.size(), System.currentTimeMillis() - ts);
log.info("Reading and deserializing {} blocks took {} ms", blocks.size(), System.currentTimeMillis() - ts);
if (!blocks.isEmpty()) {
chainHeightOfPersistedBlocks = getHeightOfLastFullBucket(blocks);
}
return blocks;
}
public LinkedList<Block> swapBlocks(List<protobuf.BaseBlock> protobufBlocks) {
public LinkedList<Block> migrateBlocks(List<protobuf.BaseBlock> protobufBlocks) {
long ts = System.currentTimeMillis();
log.error("We have {} blocks in the daoStateAsProto", protobufBlocks.size());
blocksPersistence.writeBlocks(protobufBlocks);
LinkedList<Block> blocks = new LinkedList<>();
protobufBlocks.forEach(protobufBlock -> blocks.add(Block.fromProto(protobufBlock)));
@ -102,7 +100,7 @@ public class BsqBlocksStorageService {
chainHeightOfPersistedBlocks = getHeightOfLastFullBucket(blocks);
}
log.error("Mapping blocks (write+deserialization) from DaoStateStore took {} ms", System.currentTimeMillis() - ts);
log.info("Migrating blocks (write+deserialization) from DaoStateStore took {} ms", System.currentTimeMillis() - ts);
return blocks;
}
@ -138,18 +136,15 @@ public class BsqBlocksStorageService {
File destinationFile = new File(storageDir, fileName);
FileUtils.copyFile(resourceFile, destinationFile);
}
log.error("Copying {} resource files took {} ms", fileNames.length, System.currentTimeMillis() - ts);
log.info("Copying {} resource files took {} ms", fileNames.length, System.currentTimeMillis() - ts);
} catch (Throwable e) {
e.printStackTrace();
}
}
// todo
private int getHeightOfLastFullBucket(List<Block> blocks) {
int i = blocks.get(blocks.size() - 1).getHeight() / BlocksPersistence.BUCKET_SIZE;
int i1 = i * BlocksPersistence.BUCKET_SIZE;
// log.error("getHeightOfLastFullBucket {}", i * BlocksPersistence.BUCKET_SIZE);
return i1;
int bucketIndex = blocks.get(blocks.size() - 1).getHeight() / BlocksPersistence.BUCKET_SIZE;
return bucketIndex * BlocksPersistence.BUCKET_SIZE;
}
public void removeBlocksDirectory() {
@ -163,11 +158,5 @@ public class BsqBlocksStorageService {
if (!storageDir.exists()) {
storageDir.mkdir();
}
/* List<protobuf.BaseBlock> blocks = new ArrayList<>();
// height, long time, String hash, String previousBlockHash
for (int i = genesisBlockHeight; i <= chainHeightOfPersistedBlocks; i++) {
blocks.add(new Block(i, 0, "", "").toProtoMessage());
}
blocksPersistence.addAll(blocks);*/
}
}

View file

@ -102,7 +102,7 @@ public class DaoStateStorageService extends StoreService<DaoStateStore> {
persistenceManager.persistNow(() -> {
// After we have written to disk we remove the daoStateAsProto in the store to avoid that it stays in
// memory there until the next persist call.
log.error("Persist daoState took {} ms", System.currentTimeMillis() - ts);
log.info("Persist daoState took {} ms", System.currentTimeMillis() - ts);
store.releaseMemory();
GcUtil.maybeReleaseMemory();
UserThread.execute(completeHandler);
@ -116,7 +116,6 @@ public class DaoStateStorageService extends StoreService<DaoStateStore> {
Thread.currentThread().setName("copyBsqBlocksFromResources");
bsqBlocksStorageService.copyFromResources(postFix);
// We read daoState and blocks ane keep them in fields in store and
super.readFromResources(postFix, () -> {
// We got mapped back to user thread so we need to create a new thread again as we dont want to
// execute on user thread
@ -128,7 +127,7 @@ public class DaoStateStorageService extends StoreService<DaoStateStore> {
if (daoStateAsProto.getBlocksList().isEmpty()) {
list = bsqBlocksStorageService.readBlocks(daoStateAsProto.getChainHeight());
} else {
list = bsqBlocksStorageService.swapBlocks(daoStateAsProto.getBlocksList());
list = bsqBlocksStorageService.migrateBlocks(daoStateAsProto.getBlocksList());
}
blocks.clear();
blocks.addAll(list);
@ -144,11 +143,10 @@ public class DaoStateStorageService extends StoreService<DaoStateStore> {
if (daoStateAsProto != null) {
long ts = System.currentTimeMillis();
DaoState daoState = DaoState.fromProto(daoStateAsProto, blocks);
log.error("Deserializing DaoState with {} blocks took {} ms",
log.info("Deserializing DaoState with {} blocks took {} ms",
daoState.getBlocks().size(), System.currentTimeMillis() - ts);
return daoState;
}
return new DaoState();
}
@ -189,7 +187,7 @@ public class DaoStateStorageService extends StoreService<DaoStateStore> {
}
private void removeAndBackupDaoConsensusFiles(File storageDir, String backupDirName) throws IOException {
// We delete all DAO consensus data. Some will be rebuild from resources.
// We delete all DAO related data. Some will be rebuild from resources.
long currentTime = System.currentTimeMillis();
String newFileName = "BlindVoteStore_" + currentTime;
FileUtil.removeAndBackupFile(storageDir, new File(storageDir, "BlindVoteStore"), newFileName, backupDirName);

View file

@ -77,7 +77,6 @@ public class DaoStateStore implements PersistableEnvelope {
return new DaoStateStore(proto.getDaoState(), daoStateHashList);
}
public void releaseMemory() {
daoStateAsProto = null;
daoStateHashChain = null;