Merge btcdb repo into database directory.

This commit is contained in:
Dave Collins 2015-01-27 13:32:36 -06:00
commit 8d7780e0ab
24 changed files with 5267 additions and 0 deletions

67
database/README.md Normal file
View File

@ -0,0 +1,67 @@
database
========
[![Build Status](http://img.shields.io/travis/btcsuite/btcd.svg)]
(https://travis-ci.org/btcsuite/btcd) [![ISC License]
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
Package database provides a database interface for the bitcoin block chain and
transactions.
## Documentation
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
(http://godoc.org/github.com/btcsuite/btcd/database)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the GoDoc site
[here](http://godoc.org/github.com/btcsuite/btcd/database).
You can also view the documentation locally once the package is installed with
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
http://localhost:6060/pkg/github.com/btcsuite/btcd/database
## Installation
```bash
$ go get github.com/btcsuite/btcd/database
```
## Examples
* [CreateDB Example]
(http://godoc.org/github.com/btcsuite/btcd/database#example-CreateDB)
Demonstrates creating a new database and inserting the genesis block into it.
* [NewestSha Example]
(http://godoc.org/github.com/btcsuite/btcd/database#example-Db--NewestSha)
Demonstrates querying the database for the most recent best block height and
hash.
## TODO
- Increase test coverage to 100%
## GPG Verification Key
All official release tags are signed by Conformal so users can ensure the code
has not been tampered with and is coming from Conformal. To verify the
signature perform the following:
- Download the public key from the Conformal website at
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
- Import the public key into your GPG keyring:
```bash
gpg --import GIT-GPG-KEY-conformal.txt
```
- Verify the release tag with the following command where `TAG_NAME` is a
placeholder for the specific tag:
```bash
git tag -v TAG_NAME
```
## License
Package database is licensed under the [copyfree](http://copyfree.org) ISC
License.

221
database/common_test.go Normal file
View File

@ -0,0 +1,221 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"compress/bzip2"
"encoding/binary"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/btcsuite/btcd/database"
_ "github.com/btcsuite/btcd/database/ldb"
_ "github.com/btcsuite/btcd/database/memdb"
"github.com/btcsuite/btcnet"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
)
var (
// network is the expected bitcoin network in the test block data.
network = btcwire.MainNet
// savedBlocks is used to store blocks loaded from the blockDataFile
// so multiple invocations to loadBlocks from the various test functions
// do not have to reload them from disk.
savedBlocks []*btcutil.Block
// blockDataFile is the path to a file containing the first 256 blocks
// of the block chain.
blockDataFile = filepath.Join("testdata", "blocks1-256.bz2")
)
var zeroHash = btcwire.ShaHash{}
// testDbRoot is the root directory used to create all test databases.
const testDbRoot = "testdbs"
// filesExists returns whether or not the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// openDB is used to open an existing database based on the database type and
// name.
func openDB(dbType, dbName string) (database.Db, error) {
// Handle memdb specially since it has no files on disk.
if dbType == "memdb" {
db, err := database.OpenDB(dbType)
if err != nil {
return nil, fmt.Errorf("error opening db: %v", err)
}
return db, nil
}
dbPath := filepath.Join(testDbRoot, dbName)
db, err := database.OpenDB(dbType, dbPath)
if err != nil {
return nil, fmt.Errorf("error opening db: %v", err)
}
return db, nil
}
// createDB creates a new db instance and returns a teardown function the caller
// should invoke when done testing to clean up. The close flag indicates
// whether or not the teardown function should sync and close the database
// during teardown.
func createDB(dbType, dbName string, close bool) (database.Db, func(), error) {
// Handle memory database specially since it doesn't need the disk
// specific handling.
if dbType == "memdb" {
db, err := database.CreateDB(dbType)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown := func() {
if close {
db.Close()
}
}
return db, teardown, nil
}
// Create the root directory for test databases.
if !fileExists(testDbRoot) {
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
err := fmt.Errorf("unable to create test db "+
"root: %v", err)
return nil, nil, err
}
}
// Create a new database to store the accepted blocks into.
dbPath := filepath.Join(testDbRoot, dbName)
_ = os.RemoveAll(dbPath)
db, err := database.CreateDB(dbType, dbPath)
if err != nil {
return nil, nil, fmt.Errorf("error creating db: %v", err)
}
// Setup a teardown function for cleaning up. This function is
// returned to the caller to be invoked when it is done testing.
teardown := func() {
dbVersionPath := filepath.Join(testDbRoot, dbName+".ver")
if close {
db.Sync()
db.Close()
}
os.RemoveAll(dbPath)
os.Remove(dbVersionPath)
os.RemoveAll(testDbRoot)
}
return db, teardown, nil
}
// setupDB is used to create a new db instance with the genesis block already
// inserted. In addition to the new db instance, it returns a teardown function
// the caller should invoke when done testing to clean up.
func setupDB(dbType, dbName string) (database.Db, func(), error) {
db, teardown, err := createDB(dbType, dbName, true)
if err != nil {
return nil, nil, err
}
// Insert the main network genesis block. This is part of the initial
// database setup.
genesisBlock := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock)
_, err = db.InsertBlock(genesisBlock)
if err != nil {
teardown()
err := fmt.Errorf("failed to insert genesis block: %v", err)
return nil, nil, err
}
return db, teardown, nil
}
// loadBlocks loads the blocks contained in the testdata directory and returns
// a slice of them.
func loadBlocks(t *testing.T) ([]*btcutil.Block, error) {
if len(savedBlocks) != 0 {
return savedBlocks, nil
}
var dr io.Reader
fi, err := os.Open(blockDataFile)
if err != nil {
t.Errorf("failed to open file %v, err %v", blockDataFile, err)
return nil, err
}
if strings.HasSuffix(blockDataFile, ".bz2") {
z := bzip2.NewReader(fi)
dr = z
} else {
dr = fi
}
defer func() {
if err := fi.Close(); err != nil {
t.Errorf("failed to close file %v %v", blockDataFile, err)
}
}()
// Set the first block as the genesis block.
blocks := make([]*btcutil.Block, 0, 256)
genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock)
blocks = append(blocks, genesis)
for height := int64(1); err == nil; height++ {
var rintbuf uint32
err := binary.Read(dr, binary.LittleEndian, &rintbuf)
if err == io.EOF {
// hit end of file at expected offset: no warning
height--
err = nil
break
}
if err != nil {
t.Errorf("failed to load network type, err %v", err)
break
}
if rintbuf != uint32(network) {
t.Errorf("Block doesn't match network: %v expects %v",
rintbuf, network)
break
}
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
blocklen := rintbuf
rbytes := make([]byte, blocklen)
// read block
dr.Read(rbytes)
block, err := btcutil.NewBlockFromBytes(rbytes)
if err != nil {
t.Errorf("failed to parse block %v", height)
return nil, err
}
blocks = append(blocks, block)
}
savedBlocks = blocks
return blocks, nil
}

10
database/cov_report.sh Normal file
View File

@ -0,0 +1,10 @@
#!/bin/sh
# This script uses go tool cover to generate a test coverage report.
go test -coverprofile=cov.out && go tool cover -func=cov.out && rm -f cov.out
echo "============================================================"
(cd ldb && go test -coverprofile=cov.out && go tool cover -func=cov.out && \
rm -f cov.out)
echo "============================================================"
(cd memdb && go test -coverprofile=cov.out && go tool cover -func=cov.out && \
rm -f cov.out)

221
database/db.go Normal file
View File

@ -0,0 +1,221 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"errors"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
"golang.org/x/crypto/ripemd160"
)
// Errors that the various database functions may return.
var (
ErrAddrIndexDoesNotExist = errors.New("address index hasn't been built up yet")
ErrUnsupportedAddressType = errors.New("address type is not supported " +
"by the address-index")
ErrPrevShaMissing = errors.New("previous sha missing from database")
ErrTxShaMissing = errors.New("requested transaction does not exist")
ErrBlockShaMissing = errors.New("requested block does not exist")
ErrDuplicateSha = errors.New("duplicate insert attempted")
ErrDbDoesNotExist = errors.New("non-existent database")
ErrDbUnknownType = errors.New("non-existent database type")
ErrNotImplemented = errors.New("method has not yet been implemented")
)
// AllShas is a special value that can be used as the final sha when requesting
// a range of shas by height to request them all.
const AllShas = int64(^uint64(0) >> 1)
// Db defines a generic interface that is used to request and insert data into
// the bitcoin block chain. This interface is intended to be agnostic to actual
// mechanism used for backend data storage. The AddDBDriver function can be
// used to add a new backend data storage method.
type Db interface {
// Close cleanly shuts down the database and syncs all data.
Close() (err error)
// DropAfterBlockBySha will remove any blocks from the database after
// the given block. It terminates any existing transaction and performs
// its operations in an atomic transaction which is commited before
// the function returns.
DropAfterBlockBySha(*btcwire.ShaHash) (err error)
// ExistsSha returns whether or not the given block hash is present in
// the database.
ExistsSha(sha *btcwire.ShaHash) (exists bool, err error)
// FetchBlockBySha returns a btcutil Block. The implementation may
// cache the underlying data if desired.
FetchBlockBySha(sha *btcwire.ShaHash) (blk *btcutil.Block, err error)
// FetchBlockHeightBySha returns the block height for the given hash.
FetchBlockHeightBySha(sha *btcwire.ShaHash) (height int64, err error)
// FetchBlockHeaderBySha returns a btcwire.BlockHeader for the given
// sha. The implementation may cache the underlying data if desired.
FetchBlockHeaderBySha(sha *btcwire.ShaHash) (bh *btcwire.BlockHeader, err error)
// FetchBlockShaByHeight returns a block hash based on its height in the
// block chain.
FetchBlockShaByHeight(height int64) (sha *btcwire.ShaHash, err error)
// FetchHeightRange looks up a range of blocks by the start and ending
// heights. Fetch is inclusive of the start height and exclusive of the
// ending height. To fetch all hashes from the start height until no
// more are present, use the special id `AllShas'.
FetchHeightRange(startHeight, endHeight int64) (rshalist []btcwire.ShaHash, err error)
// ExistsTxSha returns whether or not the given tx hash is present in
// the database
ExistsTxSha(sha *btcwire.ShaHash) (exists bool, err error)
// FetchTxBySha returns some data for the given transaction hash. The
// implementation may cache the underlying data if desired.
FetchTxBySha(txsha *btcwire.ShaHash) ([]*TxListReply, error)
// FetchTxByShaList returns a TxListReply given an array of transaction
// hashes. The implementation may cache the underlying data if desired.
// This differs from FetchUnSpentTxByShaList in that it will return
// the most recent known Tx, if it is fully spent or not.
//
// NOTE: This function does not return an error directly since it MUST
// return at least one TxListReply instance for each requested
// transaction. Each TxListReply instance then contains an Err field
// which can be used to detect errors.
FetchTxByShaList(txShaList []*btcwire.ShaHash) []*TxListReply
// FetchUnSpentTxByShaList returns a TxListReply given an array of
// transaction hashes. The implementation may cache the underlying
// data if desired. Fully spent transactions will not normally not
// be returned in this operation.
//
// NOTE: This function does not return an error directly since it MUST
// return at least one TxListReply instance for each requested
// transaction. Each TxListReply instance then contains an Err field
// which can be used to detect errors.
FetchUnSpentTxByShaList(txShaList []*btcwire.ShaHash) []*TxListReply
// InsertBlock inserts raw block and transaction data from a block
// into the database. The first block inserted into the database
// will be treated as the genesis block. Every subsequent block insert
// requires the referenced parent block to already exist.
InsertBlock(block *btcutil.Block) (height int64, err error)
// NewestSha returns the hash and block height of the most recent (end)
// block of the block chain. It will return the zero hash, -1 for
// the block height, and no error (nil) if there are not any blocks in
// the database yet.
NewestSha() (sha *btcwire.ShaHash, height int64, err error)
// FetchAddrIndexTip returns the hash and block height of the most recent
// block which has had its address index populated. It will return
// ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the
// addrindex hasn't yet been built up.
FetchAddrIndexTip() (sha *btcwire.ShaHash, height int64, err error)
// UpdateAddrIndexForBlock updates the stored addrindex with passed
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// Addresses are indexed by the raw bytes of their base58 decoded
// hash160.
UpdateAddrIndexForBlock(blkSha *btcwire.ShaHash, height int64,
addrIndex BlockAddrIndex) error
// FetchTxsForAddr looks up and returns all transactions which either
// spend a previously created output of the passed address, or create
// a new output locked to the passed address. The, `limit` parameter
// should be the max number of transactions to be returned.
// Additionally, if the caller wishes to skip forward in the results
// some amount, the 'seek' represents how many results to skip.
// NOTE: Values for both `seek` and `limit` MUST be positive.
FetchTxsForAddr(addr btcutil.Address, skip int, limit int) ([]*TxListReply, error)
// DeleteAddrIndex deletes the entire addrindex stored within the DB.
DeleteAddrIndex() error
// RollbackClose discards the recent database changes to the previously
// saved data at last Sync and closes the database.
RollbackClose() (err error)
// Sync verifies that the database is coherent on disk and no
// outstanding transactions are in flight.
Sync() (err error)
}
// DriverDB defines a structure for backend drivers to use when they registered
// themselves as a backend which implements the Db interface.
type DriverDB struct {
DbType string
CreateDB func(args ...interface{}) (pbdb Db, err error)
OpenDB func(args ...interface{}) (pbdb Db, err error)
}
// TxListReply is used to return individual transaction information when
// data about multiple transactions is requested in a single call.
type TxListReply struct {
Sha *btcwire.ShaHash
Tx *btcwire.MsgTx
BlkSha *btcwire.ShaHash
Height int64
TxSpent []bool
Err error
}
// AddrIndexKeySize is the number of bytes used by keys into the BlockAddrIndex.
const AddrIndexKeySize = ripemd160.Size
// BlockAddrIndex represents the indexing structure for addresses.
// It maps a hash160 to a list of transaction locations within a block that
// either pays to or spends from the passed UTXO for the hash160.
type BlockAddrIndex map[[AddrIndexKeySize]byte][]*btcwire.TxLoc
// driverList holds all of the registered database backends.
var driverList []DriverDB
// AddDBDriver adds a back end database driver to available interfaces.
func AddDBDriver(instance DriverDB) {
// TODO(drahn) Does this really need to check for duplicate names ?
for _, drv := range driverList {
// TODO(drahn) should duplicates be an error?
if drv.DbType == instance.DbType {
return
}
}
driverList = append(driverList, instance)
}
// CreateDB intializes and opens a database.
func CreateDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
for _, drv := range driverList {
if drv.DbType == dbtype {
return drv.CreateDB(args...)
}
}
return nil, ErrDbUnknownType
}
// OpenDB opens an existing database.
func OpenDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
for _, drv := range driverList {
if drv.DbType == dbtype {
return drv.OpenDB(args...)
}
}
return nil, ErrDbUnknownType
}
// SupportedDBs returns a slice of strings that represent the database drivers
// that have been registered and are therefore supported.
func SupportedDBs() []string {
var supportedDBs []string
for _, drv := range driverList {
supportedDBs = append(supportedDBs, drv.DbType)
}
return supportedDBs
}

179
database/db_test.go Normal file
View File

@ -0,0 +1,179 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"fmt"
"testing"
"github.com/btcsuite/btcd/database"
)
var (
// ignoreDbTypes are types which should be ignored when running tests
// that iterate all supported DB types. This allows some tests to add
// bogus drivers for testing purposes while still allowing other tests
// to easily iterate all supported drivers.
ignoreDbTypes = map[string]bool{"createopenfail": true}
)
// testNewestShaEmpty ensures that NewestSha returns the values expected by
// the interface contract.
func testNewestShaEmpty(t *testing.T, db database.Db) {
sha, height, err := db.NewestSha()
if err != nil {
t.Errorf("NewestSha error %v", err)
}
if !sha.IsEqual(&zeroHash) {
t.Errorf("NewestSha wrong hash got: %s, want %s", sha, &zeroHash)
}
if height != -1 {
t.Errorf("NewestSha wrong height got: %d, want %d", height, -1)
}
}
// TestEmptyDB tests that empty databases are handled properly.
func TestEmptyDB(t *testing.T) {
for _, dbType := range database.SupportedDBs() {
// Ensure NewestSha returns expected values for a newly created
// db.
db, teardown, err := createDB(dbType, "emptydb", false)
if err != nil {
t.Errorf("Failed to create test database %v", err)
return
}
testNewestShaEmpty(t, db)
// Ensure NewestSha still returns expected values for an empty
// database after reopen.
db.Close()
db, err = openDB(dbType, "emptydb")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
testNewestShaEmpty(t, db)
db.Close()
// Clean up the old db.
teardown()
}
}
// TestAddDuplicateDriver ensures that adding a duplicate driver does not
// overwrite an existing one.
func TestAddDuplicateDriver(t *testing.T) {
supportedDBs := database.SupportedDBs()
if len(supportedDBs) == 0 {
t.Errorf("TestAddDuplicateDriver: No backends to test")
return
}
dbType := supportedDBs[0]
// bogusCreateDB is a function which acts as a bogus create and open
// driver function and intentionally returns a failure that can be
// detected if the interface allows a duplicate driver to overwrite an
// existing one.
bogusCreateDB := func(args ...interface{}) (database.Db, error) {
return nil, fmt.Errorf("duplicate driver allowed for database "+
"type [%v]", dbType)
}
// Create a driver that tries to replace an existing one. Set its
// create and open functions to a function that causes a test failure if
// they are invoked.
driver := database.DriverDB{
DbType: dbType,
CreateDB: bogusCreateDB,
OpenDB: bogusCreateDB,
}
database.AddDBDriver(driver)
// Ensure creating a database of the type that we tried to replace
// doesn't fail (if it does, it indicates the driver was erroneously
// replaced).
_, teardown, err := createDB(dbType, "dupdrivertest", true)
if err != nil {
t.Errorf("TestAddDuplicateDriver: %v", err)
return
}
teardown()
}
// TestCreateOpenFail ensures that errors which occur while opening or closing
// a database are handled properly.
func TestCreateOpenFail(t *testing.T) {
// bogusCreateDB is a function which acts as a bogus create and open
// driver function that intentionally returns a failure which can be
// detected.
dbType := "createopenfail"
openError := fmt.Errorf("failed to create or open database for "+
"database type [%v]", dbType)
bogusCreateDB := func(args ...interface{}) (database.Db, error) {
return nil, openError
}
// Create and add driver that intentionally fails when created or opened
// to ensure errors on database open and create are handled properly.
driver := database.DriverDB{
DbType: dbType,
CreateDB: bogusCreateDB,
OpenDB: bogusCreateDB,
}
database.AddDBDriver(driver)
// Ensure creating a database with the new type fails with the expected
// error.
_, err := database.CreateDB(dbType, "createfailtest")
if err != openError {
t.Errorf("TestCreateOpenFail: expected error not received - "+
"got: %v, want %v", err, openError)
return
}
// Ensure opening a database with the new type fails with the expected
// error.
_, err = database.OpenDB(dbType, "openfailtest")
if err != openError {
t.Errorf("TestCreateOpenFail: expected error not received - "+
"got: %v, want %v", err, openError)
return
}
}
// TestCreateOpenUnsupported ensures that attempting to create or open an
// unsupported database type is handled properly.
func TestCreateOpenUnsupported(t *testing.T) {
// Ensure creating a database with an unsupported type fails with the
// expected error.
dbType := "unsupported"
_, err := database.CreateDB(dbType, "unsupportedcreatetest")
if err != database.ErrDbUnknownType {
t.Errorf("TestCreateOpenUnsupported: expected error not "+
"received - got: %v, want %v", err, database.ErrDbUnknownType)
return
}
// Ensure opening a database with the new type fails with the expected
// error.
_, err = database.OpenDB(dbType, "unsupportedopentest")
if err != database.ErrDbUnknownType {
t.Errorf("TestCreateOpenUnsupported: expected error not "+
"received - got: %v, want %v", err, database.ErrDbUnknownType)
return
}
}
// TestInterface performs tests for the various interfaces of the database
// package which require state in the database for each supported database
// type (those loaded in common_test.go that is).
func TestInterface(t *testing.T) {
for _, dbType := range database.SupportedDBs() {
if _, exists := ignoreDbTypes[dbType]; !exists {
testInterface(t, dbType)
}
}
}

31
database/doc.go Normal file
View File

@ -0,0 +1,31 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package database provides a database interface for the Bitcoin block chain.
As of July 2014, there are over 309,000 blocks in the Bitcoin block chain and
and over 42 million transactions (which turns out to be over 21GB of data).
This package provides a database layer to store and retrieve this data in a
fairly simple and efficient manner. The use of this should not require specific
knowledge of the database backend.
Basic Design
The basic design of this package is to provide two classes of items in a
database; blocks and transactions (tx) where the block number increases
monotonically. Each transaction belongs to a single block although a block can
have a variable number of transactions. Along with these two items, several
convenience functions for dealing with the database are provided as well as
functions to query specific items that may be present in a block or tx.
Usage
At the highest level, the use of this packages just requires that you import it,
setup a database, insert some data into it, and optionally, query the data back.
The first block inserted into the database will be treated as the genesis block.
Every subsequent block insert requires the referenced parent block to already
exist.
*/
package database

94
database/example_test.go Normal file
View File

@ -0,0 +1,94 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"fmt"
"github.com/btcsuite/btcd/database"
_ "github.com/btcsuite/btcd/database/memdb"
"github.com/btcsuite/btcnet"
"github.com/btcsuite/btcutil"
)
// This example demonstrates creating a new database and inserting the genesis
// block into it.
func ExampleCreateDB() {
// Notice in these example imports that the memdb driver is loaded.
// Ordinarily this would be whatever driver(s) your application
// requires.
// import (
// "github.com/btcsuite/btcd/database"
// _ "github.com/btcsuite/btcd/database/memdb"
// )
// Create a database and schedule it to be closed on exit. This example
// uses a memory-only database to avoid needing to write anything to
// the disk. Typically, you would specify a persistent database driver
// such as "leveldb" and give it a database name as the second
// parameter.
db, err := database.CreateDB("memdb")
if err != nil {
fmt.Println(err)
return
}
defer db.Close()
// Insert the main network genesis block.
genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock)
newHeight, err := db.InsertBlock(genesis)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("New height:", newHeight)
// Output:
// New height: 0
}
// exampleLoadDB is used in the example to elide the setup code.
func exampleLoadDB() (database.Db, error) {
db, err := database.CreateDB("memdb")
if err != nil {
return nil, err
}
// Insert the main network genesis block.
genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock)
_, err = db.InsertBlock(genesis)
if err != nil {
return nil, err
}
return db, err
}
// This example demonstrates querying the database for the most recent best
// block height and hash.
func ExampleDb_newestSha() {
// Load a database for the purposes of this example and schedule it to
// be closed on exit. See the CreateDB example for more details on what
// this step is doing.
db, err := exampleLoadDB()
if err != nil {
fmt.Println(err)
return
}
defer db.Close()
latestHash, latestHeight, err := db.NewestSha()
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Latest hash:", latestHash)
fmt.Println("Latest height:", latestHeight)
// Output:
// Latest hash: 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f
// Latest height: 0
}

637
database/interface_test.go Normal file
View File

@ -0,0 +1,637 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database_test
import (
"reflect"
"testing"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
"github.com/davecgh/go-spew/spew"
)
// testContext is used to store context information about a running test which
// is passed into helper functions. The useSpends field indicates whether or
// not the spend data should be empty or figure it out based on the specific
// test blocks provided. This is needed because the first loop where the blocks
// are inserted, the tests are running against the latest block and therefore
// none of the outputs can be spent yet. However, on subsequent runs, all
// blocks have been inserted and therefore some of the transaction outputs are
// spent.
type testContext struct {
t *testing.T
dbType string
db database.Db
blockHeight int64
blockHash *btcwire.ShaHash
block *btcutil.Block
useSpends bool
}
// testInsertBlock ensures InsertBlock conforms to the interface contract.
func testInsertBlock(tc *testContext) bool {
// The block must insert without any errors.
newHeight, err := tc.db.InsertBlock(tc.block)
if err != nil {
tc.t.Errorf("InsertBlock (%s): failed to insert block #%d (%s) "+
"err %v", tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
// The returned height must be the expected value.
if newHeight != tc.blockHeight {
tc.t.Errorf("InsertBlock (%s): height mismatch got: %v, "+
"want: %v", tc.dbType, newHeight, tc.blockHeight)
return false
}
return true
}
// testNewestSha ensures the NewestSha returns the values expected by the
// interface contract.
func testNewestSha(tc *testContext) bool {
// The news block hash and height must be returned without any errors.
sha, height, err := tc.db.NewestSha()
if err != nil {
tc.t.Errorf("NewestSha (%s): block #%d (%s) error %v",
tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
// The returned hash must be the expected value.
if !sha.IsEqual(tc.blockHash) {
tc.t.Errorf("NewestSha (%s): block #%d (%s) wrong hash got: %s",
tc.dbType, tc.blockHeight, tc.blockHash, sha)
return false
}
// The returned height must be the expected value.
if height != tc.blockHeight {
tc.t.Errorf("NewestSha (%s): block #%d (%s) wrong height "+
"got: %d", tc.dbType, tc.blockHeight, tc.blockHash,
height)
return false
}
return true
}
// testExistsSha ensures ExistsSha conforms to the interface contract.
func testExistsSha(tc *testContext) bool {
// The block must exist in the database.
exists, err := tc.db.ExistsSha(tc.blockHash)
if err != nil {
tc.t.Errorf("ExistsSha (%s): block #%d (%s) unexpected error: "+
"%v", tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
if !exists {
tc.t.Errorf("ExistsSha (%s): block #%d (%s) does not exist",
tc.dbType, tc.blockHeight, tc.blockHash)
return false
}
return true
}
// testFetchBlockBySha ensures FetchBlockBySha conforms to the interface
// contract.
func testFetchBlockBySha(tc *testContext) bool {
// The block must be fetchable by its hash without any errors.
blockFromDb, err := tc.db.FetchBlockBySha(tc.blockHash)
if err != nil {
tc.t.Errorf("FetchBlockBySha (%s): block #%d (%s) err: %v",
tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
// The block fetched from the database must give back the same MsgBlock
// and raw bytes that were stored.
if !reflect.DeepEqual(tc.block.MsgBlock(), blockFromDb.MsgBlock()) {
tc.t.Errorf("FetchBlockBySha (%s): block #%d (%s) does not "+
"match stored block\ngot: %v\nwant: %v", tc.dbType,
tc.blockHeight, tc.blockHash,
spew.Sdump(blockFromDb.MsgBlock()),
spew.Sdump(tc.block.MsgBlock()))
return false
}
blockBytes, err := tc.block.Bytes()
if err != nil {
tc.t.Errorf("block.Bytes: %v", err)
return false
}
blockFromDbBytes, err := blockFromDb.Bytes()
if err != nil {
tc.t.Errorf("blockFromDb.Bytes: %v", err)
return false
}
if !reflect.DeepEqual(blockBytes, blockFromDbBytes) {
tc.t.Errorf("FetchBlockBySha (%s): block #%d (%s) bytes do "+
"not match stored bytes\ngot: %v\nwant: %v", tc.dbType,
tc.blockHeight, tc.blockHash,
spew.Sdump(blockFromDbBytes), spew.Sdump(blockBytes))
return false
}
return true
}
// testFetchBlockHeightBySha ensures FetchBlockHeightBySha conforms to the
// interface contract.
func testFetchBlockHeightBySha(tc *testContext) bool {
// The block height must be fetchable by its hash without any errors.
blockHeight, err := tc.db.FetchBlockHeightBySha(tc.blockHash)
if err != nil {
tc.t.Errorf("FetchBlockHeightBySha (%s): block #%d (%s) err: %v",
tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
// The block height fetched from the database must match the expected
// height.
if blockHeight != tc.blockHeight {
tc.t.Errorf("FetchBlockHeightBySha (%s): block #%d (%s) height "+
"does not match expected value - got: %v", tc.dbType,
tc.blockHeight, tc.blockHash, blockHeight)
return false
}
return true
}
// testFetchBlockHeaderBySha ensures FetchBlockHeaderBySha conforms to the
// interface contract.
func testFetchBlockHeaderBySha(tc *testContext) bool {
// The block header must be fetchable by its hash without any errors.
blockHeader, err := tc.db.FetchBlockHeaderBySha(tc.blockHash)
if err != nil {
tc.t.Errorf("FetchBlockHeaderBySha (%s): block #%d (%s) err: %v",
tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
// The block header fetched from the database must give back the same
// BlockHeader that was stored.
if !reflect.DeepEqual(&tc.block.MsgBlock().Header, blockHeader) {
tc.t.Errorf("FetchBlockHeaderBySha (%s): block header #%d (%s) "+
" does not match stored block\ngot: %v\nwant: %v",
tc.dbType, tc.blockHeight, tc.blockHash,
spew.Sdump(blockHeader),
spew.Sdump(&tc.block.MsgBlock().Header))
return false
}
return true
}
// testFetchBlockShaByHeight ensures FetchBlockShaByHeight conforms to the
// interface contract.
func testFetchBlockShaByHeight(tc *testContext) bool {
// The hash returned for the block by its height must be the expected
// value.
hashFromDb, err := tc.db.FetchBlockShaByHeight(tc.blockHeight)
if err != nil {
tc.t.Errorf("FetchBlockShaByHeight (%s): block #%d (%s) err: %v",
tc.dbType, tc.blockHeight, tc.blockHash, err)
return false
}
if !hashFromDb.IsEqual(tc.blockHash) {
tc.t.Errorf("FetchBlockShaByHeight (%s): block #%d (%s) hash "+
"does not match expected value - got: %v", tc.dbType,
tc.blockHeight, tc.blockHash, hashFromDb)
return false
}
return true
}
func testFetchBlockShaByHeightErrors(tc *testContext) bool {
// Invalid heights must error and return a nil hash.
tests := []int64{-1, tc.blockHeight + 1, tc.blockHeight + 2}
for i, wantHeight := range tests {
hashFromDb, err := tc.db.FetchBlockShaByHeight(wantHeight)
if err == nil {
tc.t.Errorf("FetchBlockShaByHeight #%d (%s): did not "+
"return error on invalid index: %d - got: %v, "+
"want: non-nil", i, tc.dbType, wantHeight, err)
return false
}
if hashFromDb != nil {
tc.t.Errorf("FetchBlockShaByHeight #%d (%s): returned "+
"hash is not nil on invalid index: %d - got: "+
"%v, want: nil", i, tc.dbType, wantHeight, err)
return false
}
}
return true
}
// testExistsTxSha ensures ExistsTxSha conforms to the interface contract.
func testExistsTxSha(tc *testContext) bool {
for i, tx := range tc.block.Transactions() {
// The transaction must exist in the database.
txHash := tx.Sha()
exists, err := tc.db.ExistsTxSha(txHash)
if err != nil {
tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) tx #%d "+
"(%s) unexpected error: %v", tc.dbType,
tc.blockHeight, tc.blockHash, i, txHash, err)
return false
}
if !exists {
_, err := tc.db.FetchTxBySha(txHash)
if err != nil {
tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) "+
"tx #%d (%s) does not exist", tc.dbType,
tc.blockHeight, tc.blockHash, i, txHash)
}
return false
}
}
return true
}
// testFetchTxBySha ensures FetchTxBySha conforms to the interface contract.
func testFetchTxBySha(tc *testContext) bool {
for i, tx := range tc.block.Transactions() {
txHash := tx.Sha()
txReplyList, err := tc.db.FetchTxBySha(txHash)
if err != nil {
tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+
"tx #%d (%s) err: %v", tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, err)
return false
}
if len(txReplyList) == 0 {
tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+
"tx #%d (%s) did not return reply data",
tc.dbType, tc.blockHeight, tc.blockHash, i,
txHash)
return false
}
txFromDb := txReplyList[len(txReplyList)-1].Tx
if !reflect.DeepEqual(tx.MsgTx(), txFromDb) {
tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+
"tx #%d (%s) does not match stored tx\n"+
"got: %v\nwant: %v", tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, spew.Sdump(txFromDb),
spew.Sdump(tx.MsgTx()))
return false
}
}
return true
}
// expectedSpentBuf returns the expected transaction spend information depending
// on the block height and and transaction number. NOTE: These figures are
// only valid for the specific set of test data provided at the time these tests
// were written. In particular, this means the first 256 blocks of the mainnet
// block chain.
//
// The first run through while the blocks are still being inserted, the tests
// are running against the latest block and therefore none of the outputs can
// be spent yet. However, on subsequent runs, all blocks have been inserted and
// therefore some of the transaction outputs are spent.
func expectedSpentBuf(tc *testContext, txNum int) []bool {
numTxOut := len(tc.block.MsgBlock().Transactions[txNum].TxOut)
spentBuf := make([]bool, numTxOut)
if tc.useSpends {
if tc.blockHeight == 9 && txNum == 0 {
// Spent by block 170, tx 1, input 0.
// tx f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16
spentBuf[0] = true
}
if tc.blockHeight == 170 && txNum == 1 {
// Spent by block 181, tx 1, input 0.
// tx a16f3ce4dd5deb92d98ef5cf8afeaf0775ebca408f708b2146c4fb42b41e14be
spentBuf[1] = true
}
if tc.blockHeight == 181 && txNum == 1 {
// Spent by block 182, tx 1, input 0.
// tx 591e91f809d716912ca1d4a9295e70c3e78bab077683f79350f101da64588073
spentBuf[1] = true
}
if tc.blockHeight == 182 && txNum == 1 {
// Spent by block 221, tx 1, input 0.
// tx 298ca2045d174f8a158961806ffc4ef96fad02d71a6b84d9fa0491813a776160
spentBuf[0] = true
// Spent by block 183, tx 1, input 0.
// tx 12b5633bad1f9c167d523ad1aa1947b2732a865bf5414eab2f9e5ae5d5c191ba
spentBuf[1] = true
}
if tc.blockHeight == 183 && txNum == 1 {
// Spent by block 187, tx 1, input 0.
// tx 4385fcf8b14497d0659adccfe06ae7e38e0b5dc95ff8a13d7c62035994a0cd79
spentBuf[0] = true
// Spent by block 248, tx 1, input 0.
// tx 828ef3b079f9c23829c56fe86e85b4a69d9e06e5b54ea597eef5fb3ffef509fe
spentBuf[1] = true
}
}
return spentBuf
}
func testFetchTxByShaListCommon(tc *testContext, includeSpent bool) bool {
fetchFunc := tc.db.FetchUnSpentTxByShaList
funcName := "FetchUnSpentTxByShaList"
if includeSpent {
fetchFunc = tc.db.FetchTxByShaList
funcName = "FetchTxByShaList"
}
transactions := tc.block.Transactions()
txHashes := make([]*btcwire.ShaHash, len(transactions))
for i, tx := range transactions {
txHashes[i] = tx.Sha()
}
txReplyList := fetchFunc(txHashes)
if len(txReplyList) != len(txHashes) {
tc.t.Errorf("%s (%s): block #%d (%s) tx reply list does not "+
" match expected length - got: %v, want: %v", funcName,
tc.dbType, tc.blockHeight, tc.blockHash,
len(txReplyList), len(txHashes))
return false
}
for i, tx := range transactions {
txHash := tx.Sha()
txD := txReplyList[i]
// The transaction hash in the reply must be the expected value.
if !txD.Sha.IsEqual(txHash) {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
"hash does not match expected value - got %v",
funcName, tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, txD.Sha)
return false
}
// The reply must not indicate any errors.
if txD.Err != nil {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
"returned unexpected error - got %v, want nil",
funcName, tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, txD.Err)
return false
}
// The transaction in the reply fetched from the database must
// be the same MsgTx that was stored.
if !reflect.DeepEqual(tx.MsgTx(), txD.Tx) {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) does "+
"not match stored tx\ngot: %v\nwant: %v",
funcName, tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, spew.Sdump(txD.Tx),
spew.Sdump(tx.MsgTx()))
return false
}
// The block hash in the reply from the database must be the
// expected value.
if txD.BlkSha == nil {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
"returned nil block hash", funcName, tc.dbType,
tc.blockHeight, tc.blockHash, i, txHash)
return false
}
if !txD.BlkSha.IsEqual(tc.blockHash) {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s)"+
"returned unexpected block hash - got %v",
funcName, tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, txD.BlkSha)
return false
}
// The block height in the reply from the database must be the
// expected value.
if txD.Height != tc.blockHeight {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
"returned unexpected block height - got %v",
funcName, tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, txD.Height)
return false
}
// The spend data in the reply from the database must not
// indicate any of the transactions that were just inserted are
// spent.
if txD.TxSpent == nil {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
"returned nil spend data", funcName, tc.dbType,
tc.blockHeight, tc.blockHash, i, txHash)
return false
}
spentBuf := expectedSpentBuf(tc, i)
if !reflect.DeepEqual(txD.TxSpent, spentBuf) {
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
"returned unexpected spend data - got %v, "+
"want %v", funcName, tc.dbType, tc.blockHeight,
tc.blockHash, i, txHash, txD.TxSpent, spentBuf)
return false
}
}
return true
}
// testFetchTxByShaList ensures FetchTxByShaList conforms to the interface
// contract.
func testFetchTxByShaList(tc *testContext) bool {
return testFetchTxByShaListCommon(tc, true)
}
// testFetchUnSpentTxByShaList ensures FetchUnSpentTxByShaList conforms to the
// interface contract.
func testFetchUnSpentTxByShaList(tc *testContext) bool {
return testFetchTxByShaListCommon(tc, false)
}
// testIntegrity performs a series of tests against the interface functions
// which fetch and check for data existence.
func testIntegrity(tc *testContext) bool {
// The block must now exist in the database.
if !testExistsSha(tc) {
return false
}
// Loading the block back from the database must give back
// the same MsgBlock and raw bytes that were stored.
if !testFetchBlockBySha(tc) {
return false
}
// The height returned for the block given its hash must be the
// expected value
if !testFetchBlockHeightBySha(tc) {
return false
}
// Loading the header back from the database must give back
// the same BlockHeader that was stored.
if !testFetchBlockHeaderBySha(tc) {
return false
}
// The hash returned for the block by its height must be the
// expected value.
if !testFetchBlockShaByHeight(tc) {
return false
}
// All of the transactions in the block must now exist in the
// database.
if !testExistsTxSha(tc) {
return false
}
// Loading all of the transactions in the block back from the
// database must give back the same MsgTx that was stored.
if !testFetchTxBySha(tc) {
return false
}
// All of the transactions in the block must be fetchable via
// FetchTxByShaList and all of the list replies must have the
// expected values.
if !testFetchTxByShaList(tc) {
return false
}
// All of the transactions in the block must be fetchable via
// FetchUnSpentTxByShaList and all of the list replies must have
// the expected values.
if !testFetchUnSpentTxByShaList(tc) {
return false
}
return true
}
// testInterface tests performs tests for the various interfaces of the database
// package which require state in the database for the given database type.
func testInterface(t *testing.T, dbType string) {
db, teardown, err := setupDB(dbType, "interface")
if err != nil {
t.Errorf("Failed to create test database (%s) %v", dbType, err)
return
}
defer teardown()
// Load up a bunch of test blocks.
blocks, err := loadBlocks(t)
if err != nil {
t.Errorf("Unable to load blocks from test data %v: %v",
blockDataFile, err)
return
}
// Create a test context to pass around.
context := testContext{t: t, dbType: dbType, db: db}
t.Logf("Loaded %d blocks for testing %s", len(blocks), dbType)
for height := int64(1); height < int64(len(blocks)); height++ {
// Get the appropriate block and hash and update the test
// context accordingly.
block := blocks[height]
blockHash, err := block.Sha()
if err != nil {
t.Errorf("block.Sha: %v", err)
return
}
context.blockHeight = height
context.blockHash = blockHash
context.block = block
// The block must insert without any errors and return the
// expected height.
if !testInsertBlock(&context) {
return
}
// The NewestSha function must return the correct information
// about the block that was just inserted.
if !testNewestSha(&context) {
return
}
// The block must pass all data integrity tests which involve
// invoking all and testing the result of all interface
// functions which deal with fetch and checking for data
// existence.
if !testIntegrity(&context) {
return
}
if !testFetchBlockShaByHeightErrors(&context) {
return
}
}
// Run the data integrity tests again after all blocks have been
// inserted to ensure the spend tracking is working properly.
context.useSpends = true
for height := int64(0); height < int64(len(blocks)); height++ {
// Get the appropriate block and hash and update the
// test context accordingly.
block := blocks[height]
blockHash, err := block.Sha()
if err != nil {
t.Errorf("block.Sha: %v", err)
return
}
context.blockHeight = height
context.blockHash = blockHash
context.block = block
testIntegrity(&context)
}
// TODO(davec): Need to figure out how to handle the special checks
// required for the duplicate transactions allowed by blocks 91842 and
// 91880 on the main network due to the old miner + Satoshi client bug.
// TODO(davec): Add tests for error conditions:
/*
- Don't allow duplicate blocks
- Don't allow insertion of block that contains a transaction that
already exists unless the previous one is fully spent
- Don't allow block that has a duplicate transaction in itself
- Don't allow block which contains a tx that references a missing tx
- Don't allow block which contains a tx that references another tx
that comes after it in the same block
*/
// TODO(davec): Add tests for the following functions:
/*
- Close()
- DropAfterBlockBySha(*btcwire.ShaHash) (err error)
x ExistsSha(sha *btcwire.ShaHash) (exists bool)
x FetchBlockBySha(sha *btcwire.ShaHash) (blk *btcutil.Block, err error)
x FetchBlockShaByHeight(height int64) (sha *btcwire.ShaHash, err error)
- FetchHeightRange(startHeight, endHeight int64) (rshalist []btcwire.ShaHash, err error)
x ExistsTxSha(sha *btcwire.ShaHash) (exists bool)
x FetchTxBySha(txsha *btcwire.ShaHash) ([]*TxListReply, error)
x FetchTxByShaList(txShaList []*btcwire.ShaHash) []*TxListReply
x FetchUnSpentTxByShaList(txShaList []*btcwire.ShaHash) []*TxListReply
x InsertBlock(block *btcutil.Block) (height int64, err error)
x NewestSha() (sha *btcwire.ShaHash, height int64, err error)
- RollbackClose()
- Sync()
*/
}

330
database/ldb/block.go Normal file
View File

@ -0,0 +1,330 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb
import (
"bytes"
"encoding/binary"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
"github.com/btcsuite/goleveldb/leveldb"
)
// FetchBlockBySha - return a btcutil Block
func (db *LevelDb) FetchBlockBySha(sha *btcwire.ShaHash) (blk *btcutil.Block, err error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.fetchBlockBySha(sha)
}
// fetchBlockBySha - return a btcutil Block
// Must be called with db lock held.
func (db *LevelDb) fetchBlockBySha(sha *btcwire.ShaHash) (blk *btcutil.Block, err error) {
buf, height, err := db.fetchSha(sha)
if err != nil {
return
}
blk, err = btcutil.NewBlockFromBytes(buf)
if err != nil {
return
}
blk.SetHeight(height)
return
}
// FetchBlockHeightBySha returns the block height for the given hash. This is
// part of the database.Db interface implementation.
func (db *LevelDb) FetchBlockHeightBySha(sha *btcwire.ShaHash) (int64, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.getBlkLoc(sha)
}
// FetchBlockHeaderBySha - return a btcwire ShaHash
func (db *LevelDb) FetchBlockHeaderBySha(sha *btcwire.ShaHash) (bh *btcwire.BlockHeader, err error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
// Read the raw block from the database.
buf, _, err := db.fetchSha(sha)
if err != nil {
return nil, err
}
// Only deserialize the header portion and ensure the transaction count
// is zero since this is a standalone header.
var blockHeader btcwire.BlockHeader
err = blockHeader.Deserialize(bytes.NewReader(buf))
if err != nil {
return nil, err
}
bh = &blockHeader
return bh, err
}
func (db *LevelDb) getBlkLoc(sha *btcwire.ShaHash) (int64, error) {
key := shaBlkToKey(sha)
data, err := db.lDb.Get(key, db.ro)
if err != nil {
if err == leveldb.ErrNotFound {
err = database.ErrBlockShaMissing
}
return 0, err
}
// deserialize
blkHeight := binary.LittleEndian.Uint64(data)
return int64(blkHeight), nil
}
func (db *LevelDb) getBlkByHeight(blkHeight int64) (rsha *btcwire.ShaHash, rbuf []byte, err error) {
var blkVal []byte
key := int64ToKey(blkHeight)
blkVal, err = db.lDb.Get(key, db.ro)
if err != nil {
log.Tracef("failed to find height %v", blkHeight)
return // exists ???
}
var sha btcwire.ShaHash
sha.SetBytes(blkVal[0:32])
blockdata := make([]byte, len(blkVal[32:]))
copy(blockdata[:], blkVal[32:])
return &sha, blockdata, nil
}
func (db *LevelDb) getBlk(sha *btcwire.ShaHash) (rblkHeight int64, rbuf []byte, err error) {
var blkHeight int64
blkHeight, err = db.getBlkLoc(sha)
if err != nil {
return
}
var buf []byte
_, buf, err = db.getBlkByHeight(blkHeight)
if err != nil {
return
}
return blkHeight, buf, nil
}
func (db *LevelDb) setBlk(sha *btcwire.ShaHash, blkHeight int64, buf []byte) {
// serialize
var lw [8]byte
binary.LittleEndian.PutUint64(lw[0:8], uint64(blkHeight))
shaKey := shaBlkToKey(sha)
blkKey := int64ToKey(blkHeight)
shaB := sha.Bytes()
blkVal := make([]byte, len(shaB)+len(buf))
copy(blkVal[0:], shaB)
copy(blkVal[len(shaB):], buf)
db.lBatch().Put(shaKey, lw[:])
db.lBatch().Put(blkKey, blkVal)
}
// insertSha stores a block hash and its associated data block with a
// previous sha of `prevSha'.
// insertSha shall be called with db lock held
func (db *LevelDb) insertBlockData(sha *btcwire.ShaHash, prevSha *btcwire.ShaHash, buf []byte) (int64, error) {
oBlkHeight, err := db.getBlkLoc(prevSha)
if err != nil {
// check current block count
// if count != 0 {
// err = database.PrevShaMissing
// return
// }
oBlkHeight = -1
if db.nextBlock != 0 {
return 0, err
}
}
// TODO(drahn) check curfile filesize, increment curfile if this puts it over
blkHeight := oBlkHeight + 1
db.setBlk(sha, blkHeight, buf)
// update the last block cache
db.lastBlkShaCached = true
db.lastBlkSha = *sha
db.lastBlkIdx = blkHeight
db.nextBlock = blkHeight + 1
return blkHeight, nil
}
// fetchSha returns the datablock for the given ShaHash.
func (db *LevelDb) fetchSha(sha *btcwire.ShaHash) (rbuf []byte,
rblkHeight int64, err error) {
var blkHeight int64
var buf []byte
blkHeight, buf, err = db.getBlk(sha)
if err != nil {
return
}
return buf, blkHeight, nil
}
// ExistsSha looks up the given block hash
// returns true if it is present in the database.
func (db *LevelDb) ExistsSha(sha *btcwire.ShaHash) (bool, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
// not in cache, try database
return db.blkExistsSha(sha)
}
// blkExistsSha looks up the given block hash
// returns true if it is present in the database.
// CALLED WITH LOCK HELD
func (db *LevelDb) blkExistsSha(sha *btcwire.ShaHash) (bool, error) {
_, err := db.getBlkLoc(sha)
switch err {
case nil:
return true, nil
case leveldb.ErrNotFound, database.ErrBlockShaMissing:
return false, nil
}
return false, err
}
// FetchBlockShaByHeight returns a block hash based on its height in the
// block chain.
func (db *LevelDb) FetchBlockShaByHeight(height int64) (sha *btcwire.ShaHash, err error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.fetchBlockShaByHeight(height)
}
// fetchBlockShaByHeight returns a block hash based on its height in the
// block chain.
func (db *LevelDb) fetchBlockShaByHeight(height int64) (rsha *btcwire.ShaHash, err error) {
key := int64ToKey(height)
blkVal, err := db.lDb.Get(key, db.ro)
if err != nil {
log.Tracef("failed to find height %v", height)
return // exists ???
}
var sha btcwire.ShaHash
sha.SetBytes(blkVal[0:32])
return &sha, nil
}
// FetchHeightRange looks up a range of blocks by the start and ending
// heights. Fetch is inclusive of the start height and exclusive of the
// ending height. To fetch all hashes from the start height until no
// more are present, use the special id `AllShas'.
func (db *LevelDb) FetchHeightRange(startHeight, endHeight int64) (rshalist []btcwire.ShaHash, err error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
var endidx int64
if endHeight == database.AllShas {
endidx = startHeight + 500
} else {
endidx = endHeight
}
shalist := make([]btcwire.ShaHash, 0, endidx-startHeight)
for height := startHeight; height < endidx; height++ {
// TODO(drahn) fix blkFile from height
key := int64ToKey(height)
blkVal, lerr := db.lDb.Get(key, db.ro)
if lerr != nil {
break
}
var sha btcwire.ShaHash
sha.SetBytes(blkVal[0:32])
shalist = append(shalist, sha)
}
if err != nil {
return
}
//log.Tracef("FetchIdxRange idx %v %v returned %v shas err %v", startHeight, endHeight, len(shalist), err)
return shalist, nil
}
// NewestSha returns the hash and block height of the most recent (end) block of
// the block chain. It will return the zero hash, -1 for the block height, and
// no error (nil) if there are not any blocks in the database yet.
func (db *LevelDb) NewestSha() (rsha *btcwire.ShaHash, rblkid int64, err error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
if db.lastBlkIdx == -1 {
return &btcwire.ShaHash{}, -1, nil
}
sha := db.lastBlkSha
return &sha, db.lastBlkIdx, nil
}
// fetchAddrIndexTip returns the last block height and block sha to be indexed.
// Meta-data about the address tip is currently cached in memory, and will be
// updated accordingly by functions that modify the state. This function is
// used on start up to load the info into memory. Callers will use the public
// version of this function below, which returns our cached copy.
func (db *LevelDb) fetchAddrIndexTip() (*btcwire.ShaHash, int64, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
data, err := db.lDb.Get(addrIndexMetaDataKey, db.ro)
if err != nil {
return &btcwire.ShaHash{}, -1, database.ErrAddrIndexDoesNotExist
}
var blkSha btcwire.ShaHash
blkSha.SetBytes(data[0:32])
blkHeight := binary.LittleEndian.Uint64(data[32:])
return &blkSha, int64(blkHeight), nil
}
// FetchAddrIndexTip returns the hash and block height of the most recent
// block whose transactions have been indexed by address. It will return
// ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the
// addrindex hasn't yet been built up.
func (db *LevelDb) FetchAddrIndexTip() (*btcwire.ShaHash, int64, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
if db.lastAddrIndexBlkIdx == -1 {
return &btcwire.ShaHash{}, -1, database.ErrAddrIndexDoesNotExist
}
sha := db.lastAddrIndexBlkSha
return &sha, db.lastAddrIndexBlkIdx, nil
}

View File

@ -0,0 +1,63 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"os"
"testing"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcwire"
)
// we need to test for an empty database and make certain it returns the proper
// values
func TestEmptyDB(t *testing.T) {
dbname := "tstdbempty"
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer os.RemoveAll(dbname)
defer os.RemoveAll(dbnamever)
sha, height, err := db.NewestSha()
if !sha.IsEqual(&btcwire.ShaHash{}) {
t.Errorf("sha not zero hash")
}
if height != -1 {
t.Errorf("height not -1 %v", height)
}
// This is a reopen test
if err := db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
db, err = database.OpenDB("leveldb", dbname)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
sha, height, err = db.NewestSha()
if !sha.IsEqual(&btcwire.ShaHash{}) {
t.Errorf("sha not zero hash")
}
if height != -1 {
t.Errorf("height not -1 %v", height)
}
}

View File

@ -0,0 +1,58 @@
//
package main
import (
"fmt"
"github.com/btcsuite/goleveldb/leveldb"
"github.com/btcsuite/goleveldb/leveldb/opt"
)
type tst struct {
key int
value string
}
var dataset = []tst{
//var dataset = []struct { key int, value string } {
{1, "one"},
{2, "two"},
{3, "three"},
{4, "four"},
{5, "five"},
}
func main() {
ro := &opt.ReadOptions{}
wo := &opt.WriteOptions{}
opts := &opt.Options{}
ldb, err := leveldb.OpenFile("dbfile", opts)
if err != nil {
fmt.Printf("db open failed %v\n", err)
return
}
batch := new(leveldb.Batch)
for _, datum := range dataset {
key := fmt.Sprintf("%v", datum.key)
batch.Put([]byte(key), []byte(datum.value))
}
err = ldb.Write(batch, wo)
for _, datum := range dataset {
key := fmt.Sprintf("%v", datum.key)
data, err := ldb.Get([]byte(key), ro)
if err != nil {
fmt.Printf("db read failed %v\n", err)
}
if string(data) != datum.value {
fmt.Printf("mismatched data from db key %v val %v db %v", key, datum.value, data)
}
}
fmt.Printf("completed\n")
ldb.Close()
}

14
database/ldb/doc.go Normal file
View File

@ -0,0 +1,14 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package ldb implements an instance of the database package backed by leveldb.
Database version number is stored in a flat file <dbname>.ver
Currently a single (littlendian) integer in the file. If there is
additional data to save in the future, the presence of additional
data can be indicated by changing the version number, then parsing the
file differently.
*/
package ldb

185
database/ldb/dup_test.go Normal file
View File

@ -0,0 +1,185 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
)
func Test_dupTx(t *testing.T) {
// Ignore db remove errors since it means we didn't have an old one.
dbname := fmt.Sprintf("tstdbdup0")
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer os.RemoveAll(dbname)
defer os.RemoveAll(dbnamever)
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
testdatafile := filepath.Join("testdata", "blocks1-256.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
t.Errorf("Unable to load blocks from test data for: %v",
err)
return
}
var lastSha *btcwire.ShaHash
// Populate with the fisrt 256 blocks, so we have blocks to 'mess with'
err = nil
out:
for height := int64(0); height < int64(len(blocks)); height++ {
block := blocks[height]
// except for NoVerify which does not allow lookups check inputs
mblock := block.MsgBlock()
var txneededList []*btcwire.ShaHash
for _, tx := range mblock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v ", origintxsha)
}
_, err = db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
txlist := db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break out
}
}
newheight, err := db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break out
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break out
}
newSha, blkid, err := db.NewestSha()
if err != nil {
t.Errorf("failed to obtain latest sha %v %v", height, err)
}
if blkid != height {
t.Errorf("height doe not match latest block height %v %v %v", blkid, height, err)
}
blkSha, _ := block.Sha()
if *newSha != *blkSha {
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
}
lastSha = blkSha
}
// generate a new block based on the last sha
// these block are not verified, so there are a bunch of garbage fields
// in the 'generated' block.
var bh btcwire.BlockHeader
bh.Version = 2
bh.PrevBlock = *lastSha
// Bits, Nonce are not filled in
mblk := btcwire.NewMsgBlock(&bh)
hash, _ := btcwire.NewShaHashFromStr("df2b060fa2e5e9c8ed5eaf6a45c13753ec8c63282b2688322eba40cd98ea067a")
po := btcwire.NewOutPoint(hash, 0)
txI := btcwire.NewTxIn(po, []byte("garbage"))
txO := btcwire.NewTxOut(50000000, []byte("garbageout"))
var tx btcwire.MsgTx
tx.AddTxIn(txI)
tx.AddTxOut(txO)
mblk.AddTransaction(&tx)
blk := btcutil.NewBlock(mblk)
fetchList := []*btcwire.ShaHash{hash}
listReply := db.FetchUnSpentTxByShaList(fetchList)
for _, lr := range listReply {
if lr.Err != nil {
t.Errorf("sha %v spent %v err %v\n", lr.Sha,
lr.TxSpent, lr.Err)
}
}
_, err = db.InsertBlock(blk)
if err != nil {
t.Errorf("failed to insert phony block %v", err)
}
// ok, did it 'spend' the tx ?
listReply = db.FetchUnSpentTxByShaList(fetchList)
for _, lr := range listReply {
if lr.Err != database.ErrTxShaMissing {
t.Errorf("sha %v spent %v err %v\n", lr.Sha,
lr.TxSpent, lr.Err)
}
}
txlist := blk.Transactions()
for _, tx := range txlist {
txsha := tx.Sha()
txReply, err := db.FetchTxBySha(txsha)
if err != nil {
t.Errorf("fully spent lookup %v err %v\n", hash, err)
} else {
for _, lr := range txReply {
if lr.Err != nil {
fmt.Errorf("stx %v spent %v err %v\n", lr.Sha,
lr.TxSpent, lr.Err)
}
}
}
}
t.Logf("Dropping block")
err = db.DropAfterBlockBySha(lastSha)
if err != nil {
t.Errorf("failed to drop spending block %v", err)
}
}

View File

@ -0,0 +1,199 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/btcsuite/btcd/database"
_ "github.com/btcsuite/btcd/database/ldb"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
)
var tstBlocks []*btcutil.Block
func loadblocks(t *testing.T) []*btcutil.Block {
if len(tstBlocks) != 0 {
return tstBlocks
}
testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
t.Errorf("Unable to load blocks from test data: %v", err)
return nil
}
tstBlocks = blocks
return blocks
}
func TestUnspentInsert(t *testing.T) {
testUnspentInsert(t)
}
// insert every block in the test chain
// after each insert, fetch all the tx affected by the latest
// block and verify that the the tx is spent/unspent
// new tx should be fully unspent, referenced tx should have
// the associated txout set to spent.
func testUnspentInsert(t *testing.T) {
// Ignore db remove errors since it means we didn't have an old one.
dbname := fmt.Sprintf("tstdbuspnt1")
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer os.RemoveAll(dbname)
defer os.RemoveAll(dbnamever)
defer func() {
if err := db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
blocks := loadblocks(t)
endtest:
for height := int64(0); height < int64(len(blocks)); height++ {
block := blocks[height]
// look up inputs to this tx
mblock := block.MsgBlock()
var txneededList []*btcwire.ShaHash
var txlookupList []*btcwire.ShaHash
var txOutList []*btcwire.ShaHash
var txInList []*btcwire.OutPoint
for _, tx := range mblock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
origintxsha := &txin.PreviousOutPoint.Hash
txInList = append(txInList, &txin.PreviousOutPoint)
txneededList = append(txneededList, origintxsha)
txlookupList = append(txlookupList, origintxsha)
exists, err := db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v ", origintxsha)
}
}
txshaname, _ := tx.TxSha()
txlookupList = append(txlookupList, &txshaname)
txOutList = append(txOutList, &txshaname)
}
txneededmap := map[btcwire.ShaHash]*database.TxListReply{}
txlist := db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break endtest
}
txneededmap[*txe.Sha] = txe
}
for _, spend := range txInList {
itxe := txneededmap[spend.Hash]
if itxe.TxSpent[spend.Index] == true {
t.Errorf("txin %v:%v is already spent", spend.Hash, spend.Index)
}
}
newheight, err := db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break endtest
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break endtest
}
txlookupmap := map[btcwire.ShaHash]*database.TxListReply{}
txlist = db.FetchTxByShaList(txlookupList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break endtest
}
txlookupmap[*txe.Sha] = txe
}
for _, spend := range txInList {
itxe := txlookupmap[spend.Hash]
if itxe.TxSpent[spend.Index] == false {
t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent)
}
}
for _, txo := range txOutList {
itxe := txlookupmap[*txo]
for i, spent := range itxe.TxSpent {
if spent == true {
t.Errorf("freshly inserted tx %v already spent %v", txo, i)
}
}
}
if len(txInList) == 0 {
continue
}
dropblock := blocks[height-1]
dropsha, _ := dropblock.Sha()
err = db.DropAfterBlockBySha(dropsha)
if err != nil {
t.Errorf("failed to drop block %v err %v", height, err)
break endtest
}
txlookupmap = map[btcwire.ShaHash]*database.TxListReply{}
txlist = db.FetchUnSpentTxByShaList(txlookupList)
for _, txe := range txlist {
if txe.Err != nil {
if _, ok := txneededmap[*txe.Sha]; ok {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break endtest
}
}
txlookupmap[*txe.Sha] = txe
}
for _, spend := range txInList {
itxe := txlookupmap[spend.Hash]
if itxe.TxSpent[spend.Index] == true {
t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent)
}
}
newheight, err = db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break endtest
}
txlookupmap = map[btcwire.ShaHash]*database.TxListReply{}
txlist = db.FetchTxByShaList(txlookupList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break endtest
}
txlookupmap[*txe.Sha] = txe
}
for _, spend := range txInList {
itxe := txlookupmap[spend.Hash]
if itxe.TxSpent[spend.Index] == false {
t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent)
}
}
}
}

View File

@ -0,0 +1,64 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb
import (
"bytes"
"testing"
"github.com/btcsuite/btcutil"
"golang.org/x/crypto/ripemd160"
)
func TestAddrIndexKeySerialization(t *testing.T) {
var hash160Bytes [ripemd160.Size]byte
fakeHash160 := btcutil.Hash160([]byte("testing"))
copy(fakeHash160, hash160Bytes[:])
fakeIndex := txAddrIndex{
hash160: hash160Bytes,
blkHeight: 1,
txoffset: 5,
txlen: 360,
}
serializedKey := addrIndexToKey(&fakeIndex)
unpackedIndex := unpackTxIndex(serializedKey[22:])
if unpackedIndex.blkHeight != fakeIndex.blkHeight {
t.Errorf("Incorrect block height. Unpack addr index key"+
"serialization failed. Expected %d, received %d",
1, unpackedIndex.blkHeight)
}
if unpackedIndex.txoffset != fakeIndex.txoffset {
t.Errorf("Incorrect tx offset. Unpack addr index key"+
"serialization failed. Expected %d, received %d",
5, unpackedIndex.txoffset)
}
if unpackedIndex.txlen != fakeIndex.txlen {
t.Errorf("Incorrect tx len. Unpack addr index key"+
"serialization failed. Expected %d, received %d",
360, unpackedIndex.txlen)
}
}
func TestBytesPrefix(t *testing.T) {
testKey := []byte("a")
prefixRange := bytesPrefix(testKey)
if !bytes.Equal(prefixRange.Start, []byte("a")) {
t.Errorf("Wrong prefix start, got %d, expected %d", prefixRange.Start,
[]byte("a"))
}
if !bytes.Equal(prefixRange.Limit, []byte("b")) {
t.Errorf("Wrong prefix end, got %d, expected %d", prefixRange.Limit,
[]byte("b"))
}
}

705
database/ldb/leveldb.go Normal file
View File

@ -0,0 +1,705 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb
import (
"encoding/binary"
"fmt"
"os"
"strconv"
"sync"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btclog"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
"github.com/btcsuite/goleveldb/leveldb"
"github.com/btcsuite/goleveldb/leveldb/opt"
)
const (
dbVersion int = 2
dbMaxTransCnt = 20000
dbMaxTransMem = 64 * 1024 * 1024 // 64 MB
)
var log = btclog.Disabled
type tTxInsertData struct {
txsha *btcwire.ShaHash
blockid int64
txoff int
txlen int
usedbuf []byte
}
// LevelDb holds internal state for databse.
type LevelDb struct {
// lock preventing multiple entry
dbLock sync.Mutex
// leveldb pieces
lDb *leveldb.DB
ro *opt.ReadOptions
wo *opt.WriteOptions
lbatch *leveldb.Batch
nextBlock int64
lastBlkShaCached bool
lastBlkSha btcwire.ShaHash
lastBlkIdx int64
lastAddrIndexBlkSha btcwire.ShaHash
lastAddrIndexBlkIdx int64
txUpdateMap map[btcwire.ShaHash]*txUpdateObj
txSpentUpdateMap map[btcwire.ShaHash]*spentTxUpdate
}
var self = database.DriverDB{DbType: "leveldb", CreateDB: CreateDB, OpenDB: OpenDB}
func init() {
database.AddDBDriver(self)
}
// parseArgs parses the arguments from the database package Open/Create methods.
func parseArgs(funcName string, args ...interface{}) (string, error) {
if len(args) != 1 {
return "", fmt.Errorf("Invalid arguments to ldb.%s -- "+
"expected database path string", funcName)
}
dbPath, ok := args[0].(string)
if !ok {
return "", fmt.Errorf("First argument to ldb.%s is invalid -- "+
"expected database path string", funcName)
}
return dbPath, nil
}
// OpenDB opens an existing database for use.
func OpenDB(args ...interface{}) (database.Db, error) {
dbpath, err := parseArgs("OpenDB", args...)
if err != nil {
return nil, err
}
log = database.GetLog()
db, err := openDB(dbpath, false)
if err != nil {
return nil, err
}
// Need to find last block and tx
var lastknownblock, nextunknownblock, testblock int64
increment := int64(100000)
ldb := db.(*LevelDb)
var lastSha *btcwire.ShaHash
// forward scan
blockforward:
for {
sha, err := ldb.fetchBlockShaByHeight(testblock)
if err == nil {
// block is found
lastSha = sha
lastknownblock = testblock
testblock += increment
} else {
if testblock == 0 {
//no blocks in db, odd but ok.
lastknownblock = -1
nextunknownblock = 0
var emptysha btcwire.ShaHash
lastSha = &emptysha
} else {
nextunknownblock = testblock
}
break blockforward
}
}
// narrow search
blocknarrow:
for lastknownblock != -1 {
testblock = (lastknownblock + nextunknownblock) / 2
sha, err := ldb.fetchBlockShaByHeight(testblock)
if err == nil {
lastknownblock = testblock
lastSha = sha
} else {
nextunknownblock = testblock
}
if lastknownblock+1 == nextunknownblock {
break blocknarrow
}
}
// Load the last block whose transactions have been indexed by address.
if sha, idx, err := ldb.fetchAddrIndexTip(); err == nil {
ldb.lastAddrIndexBlkSha = *sha
ldb.lastAddrIndexBlkIdx = idx
} else {
ldb.lastAddrIndexBlkIdx = -1
}
ldb.lastBlkSha = *lastSha
ldb.lastBlkIdx = lastknownblock
ldb.nextBlock = lastknownblock + 1
return db, nil
}
// CurrentDBVersion is the database version.
var CurrentDBVersion int32 = 1
func openDB(dbpath string, create bool) (pbdb database.Db, err error) {
var db LevelDb
var tlDb *leveldb.DB
var dbversion int32
defer func() {
if err == nil {
db.lDb = tlDb
db.txUpdateMap = map[btcwire.ShaHash]*txUpdateObj{}
db.txSpentUpdateMap = make(map[btcwire.ShaHash]*spentTxUpdate)
pbdb = &db
}
}()
if create == true {
err = os.Mkdir(dbpath, 0750)
if err != nil {
log.Errorf("mkdir failed %v %v", dbpath, err)
return
}
} else {
_, err = os.Stat(dbpath)
if err != nil {
err = database.ErrDbDoesNotExist
return
}
}
needVersionFile := false
verfile := dbpath + ".ver"
fi, ferr := os.Open(verfile)
if ferr == nil {
defer fi.Close()
ferr = binary.Read(fi, binary.LittleEndian, &dbversion)
if ferr != nil {
dbversion = ^0
}
} else {
if create == true {
needVersionFile = true
dbversion = CurrentDBVersion
}
}
opts := &opt.Options{
BlockCacher: opt.DefaultBlockCacher,
Compression: opt.NoCompression,
OpenFilesCacher: opt.DefaultOpenFilesCacher,
}
switch dbversion {
case 0:
opts = &opt.Options{}
case 1:
// uses defaults from above
default:
err = fmt.Errorf("unsupported db version %v", dbversion)
return
}
tlDb, err = leveldb.OpenFile(dbpath, opts)
if err != nil {
return
}
// If we opened the database successfully on 'create'
// update the
if needVersionFile {
fo, ferr := os.Create(verfile)
if ferr != nil {
// TODO(design) close and delete database?
err = ferr
return
}
defer fo.Close()
err = binary.Write(fo, binary.LittleEndian, dbversion)
if err != nil {
return
}
}
return
}
// CreateDB creates, initializes and opens a database for use.
func CreateDB(args ...interface{}) (database.Db, error) {
dbpath, err := parseArgs("Create", args...)
if err != nil {
return nil, err
}
log = database.GetLog()
// No special setup needed, just OpenBB
db, err := openDB(dbpath, true)
if err == nil {
ldb := db.(*LevelDb)
ldb.lastBlkIdx = -1
ldb.lastAddrIndexBlkIdx = -1
ldb.nextBlock = 0
}
return db, err
}
func (db *LevelDb) close() error {
return db.lDb.Close()
}
// Sync verifies that the database is coherent on disk,
// and no outstanding transactions are in flight.
func (db *LevelDb) Sync() error {
db.dbLock.Lock()
defer db.dbLock.Unlock()
// while specified by the API, does nothing
// however does grab lock to verify it does not return until other operations are complete.
return nil
}
// Close cleanly shuts down database, syncing all data.
func (db *LevelDb) Close() error {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.close()
}
// DropAfterBlockBySha will remove any blocks from the database after
// the given block.
func (db *LevelDb) DropAfterBlockBySha(sha *btcwire.ShaHash) (rerr error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
defer func() {
if rerr == nil {
rerr = db.processBatches()
} else {
db.lBatch().Reset()
}
}()
startheight := db.nextBlock - 1
keepidx, err := db.getBlkLoc(sha)
if err != nil {
// should the error here be normalized ?
log.Tracef("block loc failed %v ", sha)
return err
}
for height := startheight; height > keepidx; height = height - 1 {
var blk *btcutil.Block
blksha, buf, err := db.getBlkByHeight(height)
if err != nil {
return err
}
blk, err = btcutil.NewBlockFromBytes(buf)
if err != nil {
return err
}
for _, tx := range blk.MsgBlock().Transactions {
err = db.unSpend(tx)
if err != nil {
return err
}
}
// rather than iterate the list of tx backward, do it twice.
for _, tx := range blk.Transactions() {
var txUo txUpdateObj
txUo.delete = true
db.txUpdateMap[*tx.Sha()] = &txUo
}
db.lBatch().Delete(shaBlkToKey(blksha))
db.lBatch().Delete(int64ToKey(height))
}
db.nextBlock = keepidx + 1
return nil
}
// InsertBlock inserts raw block and transaction data from a block into the
// database. The first block inserted into the database will be treated as the
// genesis block. Every subsequent block insert requires the referenced parent
// block to already exist.
func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
defer func() {
if rerr == nil {
rerr = db.processBatches()
} else {
db.lBatch().Reset()
}
}()
blocksha, err := block.Sha()
if err != nil {
log.Warnf("Failed to compute block sha %v", blocksha)
return 0, err
}
mblock := block.MsgBlock()
rawMsg, err := block.Bytes()
if err != nil {
log.Warnf("Failed to obtain raw block sha %v", blocksha)
return 0, err
}
txloc, err := block.TxLoc()
if err != nil {
log.Warnf("Failed to obtain raw block sha %v", blocksha)
return 0, err
}
// Insert block into database
newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock,
rawMsg)
if err != nil {
log.Warnf("Failed to insert block %v %v %v", blocksha,
&mblock.Header.PrevBlock, err)
return 0, err
}
// At least two blocks in the long past were generated by faulty
// miners, the sha of the transaction exists in a previous block,
// detect this condition and 'accept' the block.
for txidx, tx := range mblock.Transactions {
txsha, err := block.TxSha(txidx)
if err != nil {
log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err)
return 0, err
}
spentbuflen := (len(tx.TxOut) + 7) / 8
spentbuf := make([]byte, spentbuflen, spentbuflen)
if len(tx.TxOut)%8 != 0 {
for i := uint(len(tx.TxOut) % 8); i < 8; i++ {
spentbuf[spentbuflen-1] |= (byte(1) << i)
}
}
err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf)
if err != nil {
log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
return 0, err
}
// Some old blocks contain duplicate transactions
// Attempt to cleanly bypass this problem by marking the
// first as fully spent.
// http://blockexplorer.com/b/91812 dup in 91842
// http://blockexplorer.com/b/91722 dup in 91880
if newheight == 91812 {
dupsha, err := btcwire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599")
if err != nil {
panic("invalid sha string in source")
}
if txsha.IsEqual(dupsha) {
// marking TxOut[0] as spent
po := btcwire.NewOutPoint(dupsha, 0)
txI := btcwire.NewTxIn(po, []byte("garbage"))
var spendtx btcwire.MsgTx
spendtx.AddTxIn(txI)
err = db.doSpend(&spendtx)
if err != nil {
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
}
}
}
if newheight == 91722 {
dupsha, err := btcwire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468")
if err != nil {
panic("invalid sha string in source")
}
if txsha.IsEqual(dupsha) {
// marking TxOut[0] as spent
po := btcwire.NewOutPoint(dupsha, 0)
txI := btcwire.NewTxIn(po, []byte("garbage"))
var spendtx btcwire.MsgTx
spendtx.AddTxIn(txI)
err = db.doSpend(&spendtx)
if err != nil {
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
}
}
}
err = db.doSpend(tx)
if err != nil {
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err)
return 0, err
}
}
return newheight, nil
}
// doSpend iterates all TxIn in a bitcoin transaction marking each associated
// TxOut as spent.
func (db *LevelDb) doSpend(tx *btcwire.MsgTx) error {
for txinidx := range tx.TxIn {
txin := tx.TxIn[txinidx]
inTxSha := txin.PreviousOutPoint.Hash
inTxidx := txin.PreviousOutPoint.Index
if inTxidx == ^uint32(0) {
continue
}
//log.Infof("spending %v %v", &inTxSha, inTxidx)
err := db.setSpentData(&inTxSha, inTxidx)
if err != nil {
return err
}
}
return nil
}
// unSpend iterates all TxIn in a bitcoin transaction marking each associated
// TxOut as unspent.
func (db *LevelDb) unSpend(tx *btcwire.MsgTx) error {
for txinidx := range tx.TxIn {
txin := tx.TxIn[txinidx]
inTxSha := txin.PreviousOutPoint.Hash
inTxidx := txin.PreviousOutPoint.Index
if inTxidx == ^uint32(0) {
continue
}
err := db.clearSpentData(&inTxSha, inTxidx)
if err != nil {
return err
}
}
return nil
}
func (db *LevelDb) setSpentData(sha *btcwire.ShaHash, idx uint32) error {
return db.setclearSpentData(sha, idx, true)
}
func (db *LevelDb) clearSpentData(sha *btcwire.ShaHash, idx uint32) error {
return db.setclearSpentData(sha, idx, false)
}
func (db *LevelDb) setclearSpentData(txsha *btcwire.ShaHash, idx uint32, set bool) error {
var txUo *txUpdateObj
var ok bool
if txUo, ok = db.txUpdateMap[*txsha]; !ok {
// not cached, load from db
var txU txUpdateObj
blkHeight, txOff, txLen, spentData, err := db.getTxData(txsha)
if err != nil {
// setting a fully spent tx is an error.
if set == true {
return err
}
// if we are clearing a tx and it wasn't found
// in the tx table, it could be in the fully spent
// (duplicates) table.
spentTxList, err := db.getTxFullySpent(txsha)
if err != nil {
return err
}
// need to reslice the list to exclude the most recent.
sTx := spentTxList[len(spentTxList)-1]
spentTxList[len(spentTxList)-1] = nil
if len(spentTxList) == 1 {
// write entry to delete tx from spent pool
// XXX
} else {
spentTxList = spentTxList[:len(spentTxList)-1]
// XXX format sTxList and set update Table
}
// Create 'new' Tx update data.
blkHeight = sTx.blkHeight
txOff = sTx.txoff
txLen = sTx.txlen
spentbuflen := (sTx.numTxO + 7) / 8
spentData = make([]byte, spentbuflen, spentbuflen)
for i := range spentData {
spentData[i] = ^byte(0)
}
}
txU.txSha = txsha
txU.blkHeight = blkHeight
txU.txoff = txOff
txU.txlen = txLen
txU.spentData = spentData
txUo = &txU
}
byteidx := idx / 8
byteoff := idx % 8
if set {
txUo.spentData[byteidx] |= (byte(1) << byteoff)
} else {
txUo.spentData[byteidx] &= ^(byte(1) << byteoff)
}
// check for fully spent Tx
fullySpent := true
for _, val := range txUo.spentData {
if val != ^byte(0) {
fullySpent = false
break
}
}
if fullySpent {
var txSu *spentTxUpdate
// Look up Tx in fully spent table
if txSuOld, ok := db.txSpentUpdateMap[*txsha]; ok {
txSu = txSuOld
} else {
var txSuStore spentTxUpdate
txSu = &txSuStore
txSuOld, err := db.getTxFullySpent(txsha)
if err == nil {
txSu.txl = txSuOld
}
}
// Fill in spentTx
var sTx spentTx
sTx.blkHeight = txUo.blkHeight
sTx.txoff = txUo.txoff
sTx.txlen = txUo.txlen
// XXX -- there is no way to comput the real TxOut
// from the spent array.
sTx.numTxO = 8 * len(txUo.spentData)
// append this txdata to fully spent txlist
txSu.txl = append(txSu.txl, &sTx)
// mark txsha as deleted in the txUpdateMap
log.Tracef("***tx %v is fully spent\n", txsha)
db.txSpentUpdateMap[*txsha] = txSu
txUo.delete = true
db.txUpdateMap[*txsha] = txUo
} else {
db.txUpdateMap[*txsha] = txUo
}
return nil
}
func int64ToKey(keyint int64) []byte {
key := strconv.FormatInt(keyint, 10)
return []byte(key)
}
func shaBlkToKey(sha *btcwire.ShaHash) []byte {
shaB := sha.Bytes()
return shaB
}
func shaTxToKey(sha *btcwire.ShaHash) []byte {
shaB := sha.Bytes()
shaB = append(shaB, "tx"...)
return shaB
}
func shaSpentTxToKey(sha *btcwire.ShaHash) []byte {
shaB := sha.Bytes()
shaB = append(shaB, "sx"...)
return shaB
}
func (db *LevelDb) lBatch() *leveldb.Batch {
if db.lbatch == nil {
db.lbatch = new(leveldb.Batch)
}
return db.lbatch
}
func (db *LevelDb) processBatches() error {
var err error
if len(db.txUpdateMap) != 0 || len(db.txSpentUpdateMap) != 0 || db.lbatch != nil {
if db.lbatch == nil {
db.lbatch = new(leveldb.Batch)
}
defer db.lbatch.Reset()
for txSha, txU := range db.txUpdateMap {
key := shaTxToKey(&txSha)
if txU.delete {
//log.Tracef("deleting tx %v", txSha)
db.lbatch.Delete(key)
} else {
//log.Tracef("inserting tx %v", txSha)
txdat := db.formatTx(txU)
db.lbatch.Put(key, txdat)
}
}
for txSha, txSu := range db.txSpentUpdateMap {
key := shaSpentTxToKey(&txSha)
if txSu.delete {
//log.Tracef("deleting tx %v", txSha)
db.lbatch.Delete(key)
} else {
//log.Tracef("inserting tx %v", txSha)
txdat := db.formatTxFullySpent(txSu.txl)
db.lbatch.Put(key, txdat)
}
}
err = db.lDb.Write(db.lbatch, db.wo)
if err != nil {
log.Tracef("batch failed %v\n", err)
return err
}
db.txUpdateMap = map[btcwire.ShaHash]*txUpdateObj{}
db.txSpentUpdateMap = make(map[btcwire.ShaHash]*spentTxUpdate)
}
return nil
}
// RollbackClose this is part of the database.Db interface and should discard
// recent changes to the db and the close the db. This currently just does
// a clean shutdown.
func (db *LevelDb) RollbackClose() error {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.close()
}

View File

@ -0,0 +1,597 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"bytes"
"compress/bzip2"
"encoding/binary"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcnet"
"github.com/btcsuite/btcscript"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
"golang.org/x/crypto/ripemd160"
)
var network = btcwire.MainNet
// testDb is used to store db related context for a running test.
// the `cleanUpFunc` *must* be called after each test to maintain db
// consistency across tests.
type testDb struct {
db database.Db
blocks []*btcutil.Block
dbName string
dbNameVer string
cleanUpFunc func()
}
func setUpTestDb(t *testing.T) (*testDb, error) {
// Ignore db remove errors since it means we didn't have an old one.
dbname := fmt.Sprintf("tstdbop1")
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
return nil, err
}
testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
return nil, err
}
cleanUp := func() {
db.Close()
os.RemoveAll(dbname)
os.RemoveAll(dbnamever)
}
return &testDb{
db: db,
blocks: blocks,
dbName: dbname,
dbNameVer: dbnamever,
cleanUpFunc: cleanUp,
}, nil
}
func TestOperational(t *testing.T) {
testOperationalMode(t)
}
// testAddrIndexOperations ensures that all normal operations concerning
// the optional address index function correctly.
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil.Block, newestSha *btcwire.ShaHash, newestBlockIdx int64) {
// Metadata about the current addr index state should be unset.
sha, height, err := db.FetchAddrIndexTip()
if err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
}
var zeroHash btcwire.ShaHash
if !sha.IsEqual(&zeroHash) {
t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)
}
if height != -1 {
t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
}
// Test enforcement of constraints for "limit" and "skip"
var fakeAddr btcutil.Address
_, err = db.FetchTxsForAddr(fakeAddr, -1, 0)
if err == nil {
t.Fatalf("Negative value for skip passed, should return an error")
}
_, err = db.FetchTxsForAddr(fakeAddr, 0, -1)
if err == nil {
t.Fatalf("Negative value for limit passed, should return an error")
}
// Simple test to index outputs(s) of the first tx.
testIndex := make(database.BlockAddrIndex)
testTx, err := newestBlock.Tx(0)
if err != nil {
t.Fatalf("Block has no transactions, unable to test addr "+
"indexing, err %v", err)
}
// Extract the dest addr from the tx.
_, testAddrs, _, err := btcscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].PkScript, &btcnet.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode tx output, err %v", err)
}
// Extract the hash160 from the output script.
var hash160Bytes [ripemd160.Size]byte
testHash160 := testAddrs[0].(*btcutil.AddressPubKey).AddressPubKeyHash().ScriptAddress()
copy(hash160Bytes[:], testHash160[:])
// Create a fake index.
blktxLoc, _ := newestBlock.TxLoc()
testIndex[hash160Bytes] = []*btcwire.TxLoc{&blktxLoc[0]}
// Insert our test addr index into the DB.
err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newestBlockIdx, newestSha, err)
}
// Chain Tip of address should've been updated.
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Check index retrieval.
txReplies, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000)
if err != nil {
t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
"address, err %v", err)
}
// Should have one reply.
if len(txReplies) != 1 {
t.Fatalf("Failed to properly index tx by address.")
}
// Our test tx and indexed tx should have the same sha.
indexedTx := txReplies[0]
if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
"fetched %v", testTx.Sha(), indexedTx.Sha)
}
// Shut down DB.
db.Sync()
db.Close()
// Re-Open, tip still should be updated to current height and sha.
db, err = database.OpenDB("leveldb", "tstdbop1")
if err != nil {
t.Fatalf("Unable to re-open created db, err %v", err)
}
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Delete the entire index.
err = db.DeleteAddrIndex()
if err != nil {
t.Fatalf("Couldn't delete address index, err %v", err)
}
// Former index should no longer exist.
txReplies, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReplies) != 0 {
t.Fatalf("Address index was not successfully deleted. "+
"Should have 0 tx's indexed, %v were returned.",
len(txReplies))
}
// Tip should be blanked out.
if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index was not fully deleted.")
}
}
func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *btcwire.ShaHash, newestBlockIdx int64) {
// Safe to ignore error, since height will be < 0 in "error" case.
sha, height, _ := db.FetchAddrIndexTip()
if newestBlockIdx != height {
t.Fatalf("Height of address index tip failed to update, "+
"expected %v, got %v", newestBlockIdx, height)
}
if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
t.Fatalf("Sha of address index tip failed to update, "+
"expected %v, got %v", newestSha, sha)
}
}
func testOperationalMode(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
// 4) exercise the optional addridex
testDb, err := setUpTestDb(t)
defer testDb.cleanUpFunc()
if err != nil {
t.Errorf("Unable to load blocks from test data: %v", err)
return
}
err = nil
out:
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
block := testDb.blocks[height]
mblock := block.MsgBlock()
var txneededList []*btcwire.ShaHash
for _, tx := range mblock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v ", origintxsha)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
txlist := testDb.db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break out
}
}
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break out
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break out
}
newSha, blkid, err := testDb.db.NewestSha()
if err != nil {
t.Errorf("failed to obtain latest sha %v %v", height, err)
}
if blkid != height {
t.Errorf("height does not match latest block height %v %v %v", blkid, height, err)
}
blkSha, _ := block.Sha()
if *newSha != *blkSha {
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
}
}
// now that the db is populated, do some additional tests
testFetchHeightRange(t, testDb.db, testDb.blocks)
// Ensure all operations dealing with the optional address index behave
// correctly.
newSha, blkid, err := testDb.db.NewestSha()
testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid)
}
func TestBackout(t *testing.T) {
testBackout(t)
}
func testBackout(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
testDb, err := setUpTestDb(t)
defer testDb.cleanUpFunc()
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
if len(testDb.blocks) < 120 {
t.Errorf("test data too small")
return
}
err = nil
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
if height == 100 {
t.Logf("Syncing at block height 100")
testDb.db.Sync()
}
if height == 120 {
t.Logf("Simulating unexpected application quit")
// Simulate unexpected application quit
testDb.db.RollbackClose()
break
}
block := testDb.blocks[height]
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
return
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
return
}
}
// db was closed at height 120, so no cleanup is possible.
// reopen db
testDb.db, err = database.OpenDB("leveldb", testDb.dbName)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer func() {
if err := testDb.db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
sha, err := testDb.blocks[99].Sha()
if err != nil {
t.Errorf("failed to get block 99 sha err %v", err)
return
}
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("failed to load block 99 from db %v", err)
return
}
sha, err = testDb.blocks[119].Sha()
if err != nil {
t.Errorf("failed to get block 110 sha err %v", err)
return
}
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("loaded block 119 from db")
return
}
block := testDb.blocks[119]
mblock := block.MsgBlock()
txsha, err := mblock.Transactions[0].TxSha()
exists, err := testDb.db.ExistsTxSha(&txsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("tx %v not located db\n", txsha)
}
_, err = testDb.db.FetchTxBySha(&txsha)
if err != nil {
t.Errorf("tx %v not located db\n", txsha)
return
}
}
var savedblocks []*btcutil.Block
func loadBlocks(t *testing.T, file string) (blocks []*btcutil.Block, err error) {
if len(savedblocks) != 0 {
blocks = savedblocks
return
}
testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
var dr io.Reader
var fi io.ReadCloser
fi, err = os.Open(testdatafile)
if err != nil {
t.Errorf("failed to open file %v, err %v", testdatafile, err)
return
}
if strings.HasSuffix(testdatafile, ".bz2") {
z := bzip2.NewReader(fi)
dr = z
} else {
dr = fi
}
defer func() {
if err := fi.Close(); err != nil {
t.Errorf("failed to close file %v %v", testdatafile, err)
}
}()
// Set the first block as the genesis block.
genesis := btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock)
blocks = append(blocks, genesis)
var block *btcutil.Block
err = nil
for height := int64(1); err == nil; height++ {
var rintbuf uint32
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
if err == io.EOF {
// hit end of file at expected offset: no warning
height--
err = nil
break
}
if err != nil {
t.Errorf("failed to load network type, err %v", err)
break
}
if rintbuf != uint32(network) {
t.Errorf("Block doesn't match network: %v expects %v",
rintbuf, network)
break
}
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
blocklen := rintbuf
rbytes := make([]byte, blocklen)
// read block
dr.Read(rbytes)
block, err = btcutil.NewBlockFromBytes(rbytes)
if err != nil {
t.Errorf("failed to parse block %v", height)
return
}
blocks = append(blocks, block)
}
savedblocks = blocks
return
}
func testFetchHeightRange(t *testing.T, db database.Db, blocks []*btcutil.Block) {
var testincrement int64 = 50
var testcnt int64 = 100
shanames := make([]*btcwire.ShaHash, len(blocks))
nBlocks := int64(len(blocks))
for i := range blocks {
blockSha, err := blocks[i].Sha()
if err != nil {
t.Errorf("FetchHeightRange: unexpected failure computing block sah %v", err)
}
shanames[i] = blockSha
}
for startheight := int64(0); startheight < nBlocks; startheight += testincrement {
endheight := startheight + testcnt
if endheight > nBlocks {
endheight = database.AllShas
}
shalist, err := db.FetchHeightRange(startheight, endheight)
if err != nil {
t.Errorf("FetchHeightRange: unexpected failure looking up shas %v", err)
}
if endheight == database.AllShas {
if int64(len(shalist)) != nBlocks-startheight {
t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist))
}
} else {
if int64(len(shalist)) != testcnt {
t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist))
}
}
for i := range shalist {
sha0 := *shanames[int64(i)+startheight]
sha1 := shalist[i]
if sha0 != sha1 {
t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int64(i)+startheight, startheight, endheight, sha0, sha1)
}
}
}
}
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
testDb, err := setUpTestDb(t)
defer testDb.cleanUpFunc()
// Insert a block with some fake test transactions. The block will have
// 10 copies of a fake transaction involving same address.
addrString := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
targetAddr, err := btcutil.DecodeAddress(addrString, &btcnet.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode test address: %v", err)
}
outputScript, err := btcscript.PayToAddrScript(targetAddr)
if err != nil {
t.Fatalf("Unable make test pkScript %v", err)
}
fakeTxOut := btcwire.NewTxOut(10, outputScript)
var emptyHash btcwire.ShaHash
fakeHeader := btcwire.NewBlockHeader(&emptyHash, &emptyHash, 1, 1)
msgBlock := btcwire.NewMsgBlock(fakeHeader)
for i := 0; i < 10; i++ {
mtx := btcwire.NewMsgTx()
mtx.AddTxOut(fakeTxOut)
msgBlock.AddTransaction(mtx)
}
// Insert the test block into the DB.
testBlock := btcutil.NewBlock(msgBlock)
newheight, err := testDb.db.InsertBlock(testBlock)
if err != nil {
t.Fatalf("Unable to insert block into db: %v", err)
}
// Create and insert an address index for out test addr.
txLoc, _ := testBlock.TxLoc()
index := make(database.BlockAddrIndex)
for i := range testBlock.Transactions() {
var hash160 [ripemd160.Size]byte
scriptAddr := targetAddr.ScriptAddress()
copy(hash160[:], scriptAddr[:])
index[hash160] = append(index[hash160], &txLoc[i])
}
blkSha, _ := testBlock.Sha()
err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newheight, blkSha, err)
return
}
// Try skipping the first 4 results, should get 6 in return.
txReply, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReply) != 6 {
t.Fatalf("Did not correctly skip forward in txs for address reply"+
" got %v txs, expected %v", len(txReply), 6)
}
// Limit the number of results to 3.
txReply, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReply) != 3 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 3)
}
// Skip 1, limit 5.
txReply, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReply) != 5 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 5)
}
}

583
database/ldb/tx.go Normal file
View File

@ -0,0 +1,583 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb
import (
"bytes"
"encoding/binary"
"errors"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
"github.com/btcsuite/goleveldb/leveldb"
"github.com/btcsuite/goleveldb/leveldb/util"
"golang.org/x/crypto/ripemd160"
)
const (
// Each address index is 34 bytes:
// --------------------------------------------------------
// | Prefix | Hash160 | BlkHeight | Tx Offset | Tx Size |
// --------------------------------------------------------
// | 2 bytes | 20 bytes | 4 bytes | 4 bytes | 4 bytes |
// --------------------------------------------------------
addrIndexKeyLength = 2 + ripemd160.Size + 4 + 4 + 4
batchDeleteThreshold = 10000
)
var addrIndexMetaDataKey = []byte("addrindex")
// All address index entries share this prefix to facilitate the use of
// iterators.
var addrIndexKeyPrefix = []byte("a-")
type txUpdateObj struct {
txSha *btcwire.ShaHash
blkHeight int64
txoff int
txlen int
ntxout int
spentData []byte
delete bool
}
type spentTx struct {
blkHeight int64
txoff int
txlen int
numTxO int
delete bool
}
type spentTxUpdate struct {
txl []*spentTx
delete bool
}
type txAddrIndex struct {
hash160 [ripemd160.Size]byte
blkHeight int64
txoffset int
txlen int
}
// InsertTx inserts a tx hash and its associated data into the database.
func (db *LevelDb) InsertTx(txsha *btcwire.ShaHash, height int64, txoff int, txlen int, spentbuf []byte) (err error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.insertTx(txsha, height, txoff, txlen, spentbuf)
}
// insertTx inserts a tx hash and its associated data into the database.
// Must be called with db lock held.
func (db *LevelDb) insertTx(txSha *btcwire.ShaHash, height int64, txoff int, txlen int, spentbuf []byte) (err error) {
var txU txUpdateObj
txU.txSha = txSha
txU.blkHeight = height
txU.txoff = txoff
txU.txlen = txlen
txU.spentData = spentbuf
db.txUpdateMap[*txSha] = &txU
return nil
}
// formatTx generates the value buffer for the Tx db.
func (db *LevelDb) formatTx(txu *txUpdateObj) []byte {
blkHeight := uint64(txu.blkHeight)
txOff := uint32(txu.txoff)
txLen := uint32(txu.txlen)
spentbuf := txu.spentData
txW := make([]byte, 16+len(spentbuf))
binary.LittleEndian.PutUint64(txW[0:8], blkHeight)
binary.LittleEndian.PutUint32(txW[8:12], txOff)
binary.LittleEndian.PutUint32(txW[12:16], txLen)
copy(txW[16:], spentbuf)
return txW[:]
}
func (db *LevelDb) getTxData(txsha *btcwire.ShaHash) (int64, int, int, []byte, error) {
key := shaTxToKey(txsha)
buf, err := db.lDb.Get(key, db.ro)
if err != nil {
return 0, 0, 0, nil, err
}
blkHeight := binary.LittleEndian.Uint64(buf[0:8])
txOff := binary.LittleEndian.Uint32(buf[8:12])
txLen := binary.LittleEndian.Uint32(buf[12:16])
spentBuf := make([]byte, len(buf)-16)
copy(spentBuf, buf[16:])
return int64(blkHeight), int(txOff), int(txLen), spentBuf, nil
}
func (db *LevelDb) getTxFullySpent(txsha *btcwire.ShaHash) ([]*spentTx, error) {
var badTxList, spentTxList []*spentTx
key := shaSpentTxToKey(txsha)
buf, err := db.lDb.Get(key, db.ro)
if err == leveldb.ErrNotFound {
return badTxList, database.ErrTxShaMissing
} else if err != nil {
return badTxList, err
}
txListLen := len(buf) / 20
spentTxList = make([]*spentTx, txListLen, txListLen)
for i := range spentTxList {
offset := i * 20
blkHeight := binary.LittleEndian.Uint64(buf[offset : offset+8])
txOff := binary.LittleEndian.Uint32(buf[offset+8 : offset+12])
txLen := binary.LittleEndian.Uint32(buf[offset+12 : offset+16])
numTxO := binary.LittleEndian.Uint32(buf[offset+16 : offset+20])
sTx := spentTx{
blkHeight: int64(blkHeight),
txoff: int(txOff),
txlen: int(txLen),
numTxO: int(numTxO),
}
spentTxList[i] = &sTx
}
return spentTxList, nil
}
func (db *LevelDb) formatTxFullySpent(sTxList []*spentTx) []byte {
txW := make([]byte, 20*len(sTxList))
for i, sTx := range sTxList {
blkHeight := uint64(sTx.blkHeight)
txOff := uint32(sTx.txoff)
txLen := uint32(sTx.txlen)
numTxO := uint32(sTx.numTxO)
offset := i * 20
binary.LittleEndian.PutUint64(txW[offset:offset+8], blkHeight)
binary.LittleEndian.PutUint32(txW[offset+8:offset+12], txOff)
binary.LittleEndian.PutUint32(txW[offset+12:offset+16], txLen)
binary.LittleEndian.PutUint32(txW[offset+16:offset+20], numTxO)
}
return txW
}
// ExistsTxSha returns if the given tx sha exists in the database
func (db *LevelDb) ExistsTxSha(txsha *btcwire.ShaHash) (bool, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
return db.existsTxSha(txsha)
}
// existsTxSha returns if the given tx sha exists in the database.o
// Must be called with the db lock held.
func (db *LevelDb) existsTxSha(txSha *btcwire.ShaHash) (bool, error) {
_, _, _, _, err := db.getTxData(txSha)
switch err {
case nil:
return true, nil
case leveldb.ErrNotFound:
return false, nil
}
return false, err
}
// FetchTxByShaList returns the most recent tx of the name fully spent or not
func (db *LevelDb) FetchTxByShaList(txShaList []*btcwire.ShaHash) []*database.TxListReply {
db.dbLock.Lock()
defer db.dbLock.Unlock()
// until the fully spent separation of tx is complete this is identical
// to FetchUnSpentTxByShaList
replies := make([]*database.TxListReply, len(txShaList))
for i, txsha := range txShaList {
tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha)
btxspent := []bool{}
if err == nil {
btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut))
for idx := range tx.TxOut {
byteidx := idx / 8
byteoff := uint(idx % 8)
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
}
}
if err == database.ErrTxShaMissing {
// if the unspent pool did not have the tx,
// look in the fully spent pool (only last instance)
sTxList, fSerr := db.getTxFullySpent(txsha)
if fSerr == nil && len(sTxList) != 0 {
idx := len(sTxList) - 1
stx := sTxList[idx]
tx, blockSha, _, _, err = db.fetchTxDataByLoc(
stx.blkHeight, stx.txoff, stx.txlen, []byte{})
if err == nil {
btxspent = make([]bool, len(tx.TxOut))
for i := range btxspent {
btxspent[i] = true
}
}
}
}
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err}
replies[i] = &txlre
}
return replies
}
// FetchUnSpentTxByShaList given a array of ShaHash, look up the transactions
// and return them in a TxListReply array.
func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*btcwire.ShaHash) []*database.TxListReply {
db.dbLock.Lock()
defer db.dbLock.Unlock()
replies := make([]*database.TxListReply, len(txShaList))
for i, txsha := range txShaList {
tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha)
btxspent := []bool{}
if err == nil {
btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut))
for idx := range tx.TxOut {
byteidx := idx / 8
byteoff := uint(idx % 8)
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
}
}
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err}
replies[i] = &txlre
}
return replies
}
// fetchTxDataBySha returns several pieces of data regarding the given sha.
func (db *LevelDb) fetchTxDataBySha(txsha *btcwire.ShaHash) (rtx *btcwire.MsgTx, rblksha *btcwire.ShaHash, rheight int64, rtxspent []byte, err error) {
var blkHeight int64
var txspent []byte
var txOff, txLen int
blkHeight, txOff, txLen, txspent, err = db.getTxData(txsha)
if err != nil {
if err == leveldb.ErrNotFound {
err = database.ErrTxShaMissing
}
return
}
return db.fetchTxDataByLoc(blkHeight, txOff, txLen, txspent)
}
// fetchTxDataByLoc returns several pieces of data regarding the given tx
// located by the block/offset/size location
func (db *LevelDb) fetchTxDataByLoc(blkHeight int64, txOff int, txLen int, txspent []byte) (rtx *btcwire.MsgTx, rblksha *btcwire.ShaHash, rheight int64, rtxspent []byte, err error) {
var blksha *btcwire.ShaHash
var blkbuf []byte
blksha, blkbuf, err = db.getBlkByHeight(blkHeight)
if err != nil {
if err == leveldb.ErrNotFound {
err = database.ErrTxShaMissing
}
return
}
//log.Trace("transaction %v is at block %v %v txoff %v, txlen %v\n",
// txsha, blksha, blkHeight, txOff, txLen)
if len(blkbuf) < txOff+txLen {
err = database.ErrTxShaMissing
return
}
rbuf := bytes.NewReader(blkbuf[txOff : txOff+txLen])
var tx btcwire.MsgTx
err = tx.Deserialize(rbuf)
if err != nil {
log.Warnf("unable to decode tx block %v %v txoff %v txlen %v",
blkHeight, blksha, txOff, txLen)
return
}
return &tx, blksha, blkHeight, txspent, nil
}
// FetchTxBySha returns some data for the given Tx Sha.
func (db *LevelDb) FetchTxBySha(txsha *btcwire.ShaHash) ([]*database.TxListReply, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
replylen := 0
replycnt := 0
tx, blksha, height, txspent, txerr := db.fetchTxDataBySha(txsha)
if txerr == nil {
replylen++
} else {
if txerr != database.ErrTxShaMissing {
return []*database.TxListReply{}, txerr
}
}
sTxList, fSerr := db.getTxFullySpent(txsha)
if fSerr != nil {
if fSerr != database.ErrTxShaMissing {
return []*database.TxListReply{}, fSerr
}
} else {
replylen += len(sTxList)
}
replies := make([]*database.TxListReply, replylen)
if fSerr == nil {
for _, stx := range sTxList {
tx, blksha, _, _, err := db.fetchTxDataByLoc(
stx.blkHeight, stx.txoff, stx.txlen, []byte{})
if err != nil {
if err != leveldb.ErrNotFound {
return []*database.TxListReply{}, err
}
continue
}
btxspent := make([]bool, len(tx.TxOut), len(tx.TxOut))
for i := range btxspent {
btxspent[i] = true
}
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: stx.blkHeight, TxSpent: btxspent, Err: nil}
replies[replycnt] = &txlre
replycnt++
}
}
if txerr == nil {
btxspent := make([]bool, len(tx.TxOut), len(tx.TxOut))
for idx := range tx.TxOut {
byteidx := idx / 8
byteoff := uint(idx % 8)
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
}
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: height, TxSpent: btxspent, Err: nil}
replies[replycnt] = &txlre
replycnt++
}
return replies, nil
}
// addrIndexToKey serializes the passed txAddrIndex for storage within the DB.
func addrIndexToKey(index *txAddrIndex) []byte {
record := make([]byte, addrIndexKeyLength, addrIndexKeyLength)
copy(record[:2], addrIndexKeyPrefix)
copy(record[2:22], index.hash160[:])
// The index itself.
binary.LittleEndian.PutUint32(record[22:26], uint32(index.blkHeight))
binary.LittleEndian.PutUint32(record[26:30], uint32(index.txoffset))
binary.LittleEndian.PutUint32(record[30:34], uint32(index.txlen))
return record
}
// unpackTxIndex deserializes the raw bytes of a address tx index.
func unpackTxIndex(rawIndex []byte) *txAddrIndex {
return &txAddrIndex{
blkHeight: int64(binary.LittleEndian.Uint32(rawIndex[0:4])),
txoffset: int(binary.LittleEndian.Uint32(rawIndex[4:8])),
txlen: int(binary.LittleEndian.Uint32(rawIndex[8:12])),
}
}
// bytesPrefix returns key range that satisfy the given prefix.
// This only applicable for the standard 'bytes comparer'.
func bytesPrefix(prefix []byte) *util.Range {
var limit []byte
for i := len(prefix) - 1; i >= 0; i-- {
c := prefix[i]
if c < 0xff {
limit = make([]byte, i+1)
copy(limit, prefix)
limit[i] = c + 1
break
}
}
return &util.Range{Start: prefix, Limit: limit}
}
// FetchTxsForAddr looks up and returns all transactions which either
// spend from a previously created output of the passed address, or
// create a new output locked to the passed address. The, `limit` parameter
// should be the max number of transactions to be returned. Additionally, if the
// caller wishes to seek forward in the results some amount, the 'seek'
// represents how many results to skip.
func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int,
limit int) ([]*database.TxListReply, error) {
db.dbLock.Lock()
defer db.dbLock.Unlock()
// Enforce constraints for skip and limit.
if skip < 0 {
return nil, errors.New("offset for skip must be positive")
}
if limit < 0 {
return nil, errors.New("value for limit must be positive")
}
// Parse address type, bailing on an unknown type.
var addrKey []byte
switch addr := addr.(type) {
case *btcutil.AddressPubKeyHash:
hash160 := addr.Hash160()
addrKey = hash160[:]
case *btcutil.AddressScriptHash:
hash160 := addr.Hash160()
addrKey = hash160[:]
case *btcutil.AddressPubKey:
hash160 := addr.AddressPubKeyHash().Hash160()
addrKey = hash160[:]
default:
return nil, database.ErrUnsupportedAddressType
}
// Create the prefix for our search.
addrPrefix := make([]byte, 22, 22)
copy(addrPrefix[:2], addrIndexKeyPrefix)
copy(addrPrefix[2:], addrKey)
var replies []*database.TxListReply
iter := db.lDb.NewIterator(bytesPrefix(addrPrefix), nil)
for skip != 0 && iter.Next() {
skip--
}
// Iterate through all address indexes that match the targeted prefix.
for iter.Next() && limit != 0 {
rawIndex := make([]byte, 22, 22)
copy(rawIndex, iter.Key()[22:])
addrIndex := unpackTxIndex(rawIndex)
tx, blkSha, blkHeight, _, err := db.fetchTxDataByLoc(addrIndex.blkHeight,
addrIndex.txoffset, addrIndex.txlen, []byte{})
if err != nil {
// Eat a possible error due to a potential re-org.
continue
}
txSha, _ := tx.TxSha()
txReply := &database.TxListReply{Sha: &txSha, Tx: tx,
BlkSha: blkSha, Height: blkHeight, TxSpent: []bool{}, Err: err}
replies = append(replies, txReply)
limit--
}
iter.Release()
return replies, nil
}
// UpdateAddrIndexForBlock updates the stored addrindex with passed
// index information for a particular block height. Additionally, it
// will update the stored meta-data related to the curent tip of the
// addr index. These two operations are performed in an atomic
// transaction which is commited before the function returns.
// Transactions indexed by address are stored with the following format:
// * prefix || hash160 || blockHeight || txoffset || txlen
// Indexes are stored purely in the key, with blank data for the actual value
// in order to facilitate ease of iteration by their shared prefix and
// also to allow limiting the number of returned transactions (RPC).
// Alternatively, indexes for each address could be stored as an
// append-only list for the stored value. However, this add unnecessary
// overhead when storing and retrieving since the entire list must
// be fetched each time.
func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *btcwire.ShaHash, blkHeight int64, addrIndex database.BlockAddrIndex) error {
db.dbLock.Lock()
defer db.dbLock.Unlock()
var blankData []byte
batch := db.lBatch()
defer db.lbatch.Reset()
// Write all data for the new address indexes in a single batch
// transaction.
for addrKey, indexes := range addrIndex {
for _, txLoc := range indexes {
index := &txAddrIndex{
hash160: addrKey,
blkHeight: blkHeight,
txoffset: txLoc.TxStart,
txlen: txLoc.TxLen,
}
// The index is stored purely in the key.
packedIndex := addrIndexToKey(index)
batch.Put(packedIndex, blankData)
}
}
// Update tip of addrindex.
newIndexTip := make([]byte, 40, 40)
copy(newIndexTip[:32], blkSha.Bytes())
binary.LittleEndian.PutUint64(newIndexTip[32:], uint64(blkHeight))
batch.Put(addrIndexMetaDataKey, newIndexTip)
if err := db.lDb.Write(batch, db.wo); err != nil {
return err
}
db.lastAddrIndexBlkIdx = blkHeight
db.lastAddrIndexBlkSha = *blkSha
return nil
}
// DeleteAddrIndex deletes the entire addrindex stored within the DB.
// It also resets the cached in-memory metadata about the addr index.
func (db *LevelDb) DeleteAddrIndex() error {
db.dbLock.Lock()
defer db.dbLock.Unlock()
batch := db.lBatch()
defer batch.Reset()
// Delete the entire index along with any metadata about it.
iter := db.lDb.NewIterator(bytesPrefix(addrIndexKeyPrefix), db.ro)
numInBatch := 0
for iter.Next() {
key := iter.Key()
batch.Delete(key)
numInBatch++
// Delete in chunks to potentially avoid very large batches.
if numInBatch >= batchDeleteThreshold {
if err := db.lDb.Write(batch, db.wo); err != nil {
return err
}
batch.Reset()
numInBatch = 0
}
}
iter.Release()
batch.Delete(addrIndexMetaDataKey)
if err := db.lDb.Write(batch, db.wo); err != nil {
return err
}
db.lastAddrIndexBlkIdx = -1
db.lastAddrIndexBlkSha = btcwire.ShaHash{}
return nil
}

63
database/log.go Normal file
View File

@ -0,0 +1,63 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package database
import (
"errors"
"io"
"github.com/btcsuite/btclog"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until either UseLogger or SetLogWriter are called.
func DisableLog() {
log = btclog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using btclog.
func UseLogger(logger btclog.Logger) {
log = logger
}
// SetLogWriter uses a specified io.Writer to output package logging info.
// This allows a caller to direct package logging output without needing a
// dependency on seelog. If the caller is also using btclog, UseLogger should
// be used instead.
func SetLogWriter(w io.Writer, level string) error {
if w == nil {
return errors.New("nil writer")
}
lvl, ok := btclog.LogLevelFromString(level)
if !ok {
return errors.New("invalid log level")
}
l, err := btclog.NewLoggerFromWriter(w, lvl)
if err != nil {
return err
}
UseLogger(l)
return nil
}
// GetLog returns the currently active logger.
func GetLog() btclog.Logger {
return log
}

12
database/memdb/doc.go Normal file
View File

@ -0,0 +1,12 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
/*
Package memdb implements an instance of the database package that uses memory
for the block storage.
This is primary used for testing purposes as normal operations require a
persistent block storage mechanism which this is not.
*/
package memdb

49
database/memdb/driver.go Normal file
View File

@ -0,0 +1,49 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package memdb
import (
"fmt"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btclog"
)
var log = btclog.Disabled
func init() {
driver := database.DriverDB{DbType: "memdb", CreateDB: CreateDB, OpenDB: OpenDB}
database.AddDBDriver(driver)
}
// parseArgs parses the arguments from the database package Open/Create methods.
func parseArgs(funcName string, args ...interface{}) error {
if len(args) != 0 {
return fmt.Errorf("memdb.%s does not accept any arguments",
funcName)
}
return nil
}
// OpenDB opens an existing database for use.
func OpenDB(args ...interface{}) (database.Db, error) {
if err := parseArgs("OpenDB", args...); err != nil {
return nil, err
}
// A memory database is not persistent, so let CreateDB handle it.
return CreateDB()
}
// CreateDB creates, initializes, and opens a database for use.
func CreateDB(args ...interface{}) (database.Db, error) {
if err := parseArgs("CreateDB", args...); err != nil {
return nil, err
}
log = database.GetLog()
return newMemDb(), nil
}

767
database/memdb/memdb.go Normal file
View File

@ -0,0 +1,767 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package memdb
import (
"errors"
"fmt"
"math"
"sync"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
)
// Errors that the various database functions may return.
var (
ErrDbClosed = errors.New("database is closed")
)
var (
zeroHash = btcwire.ShaHash{}
// The following two hashes are ones that must be specially handled.
// See the comments where they're used for more details.
dupTxHash91842 = newShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599")
dupTxHash91880 = newShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468")
)
// tTxInsertData holds information about the location and spent status of
// a transaction.
type tTxInsertData struct {
blockHeight int64
offset int
spentBuf []bool
}
// newShaHashFromStr converts the passed big-endian hex string into a
// btcwire.ShaHash. It only differs from the one available in btcwire in that
// it ignores the error since it will only (and must only) be called with
// hard-coded, and therefore known good, hashes.
func newShaHashFromStr(hexStr string) *btcwire.ShaHash {
sha, _ := btcwire.NewShaHashFromStr(hexStr)
return sha
}
// isCoinbaseInput returns whether or not the passed transaction input is a
// coinbase input. A coinbase is a special transaction created by miners that
// has no inputs. This is represented in the block chain by a transaction with
// a single input that has a previous output transaction index set to the
// maximum value along with a zero hash.
func isCoinbaseInput(txIn *btcwire.TxIn) bool {
prevOut := &txIn.PreviousOutPoint
if prevOut.Index == math.MaxUint32 && prevOut.Hash.IsEqual(&zeroHash) {
return true
}
return false
}
// isFullySpent returns whether or not a transaction represented by the passed
// transaction insert data is fully spent. A fully spent transaction is one
// where all outputs are spent.
func isFullySpent(txD *tTxInsertData) bool {
for _, spent := range txD.spentBuf {
if !spent {
return false
}
}
return true
}
// MemDb is a concrete implementation of the database.Db interface which provides
// a memory-only database. Since it is memory-only, it is obviously not
// persistent and is mostly only useful for testing purposes.
type MemDb struct {
// Embed a mutex for safe concurrent access.
sync.Mutex
// blocks holds all of the bitcoin blocks that will be in the memory
// database.
blocks []*btcwire.MsgBlock
// blocksBySha keeps track of block heights by hash. The height can
// be used as an index into the blocks slice.
blocksBySha map[btcwire.ShaHash]int64
// txns holds information about transactions such as which their
// block height and spent status of all their outputs.
txns map[btcwire.ShaHash][]*tTxInsertData
// closed indicates whether or not the database has been closed and is
// therefore invalidated.
closed bool
}
// removeTx removes the passed transaction including unspending it.
func (db *MemDb) removeTx(msgTx *btcwire.MsgTx, txHash *btcwire.ShaHash) {
// Undo all of the spends for the transaction.
for _, txIn := range msgTx.TxIn {
if isCoinbaseInput(txIn) {
continue
}
prevOut := &txIn.PreviousOutPoint
originTxns, exists := db.txns[prevOut.Hash]
if !exists {
log.Warnf("Unable to find input transaction %s to "+
"unspend %s index %d", prevOut.Hash, txHash,
prevOut.Index)
continue
}
originTxD := originTxns[len(originTxns)-1]
originTxD.spentBuf[prevOut.Index] = false
}
// Remove the info for the most recent version of the transaction.
txns := db.txns[*txHash]
lastIndex := len(txns) - 1
txns[lastIndex] = nil
txns = txns[:lastIndex]
db.txns[*txHash] = txns
// Remove the info entry from the map altogether if there not any older
// versions of the transaction.
if len(txns) == 0 {
delete(db.txns, *txHash)
}
}
// Close cleanly shuts down database. This is part of the database.Db interface
// implementation.
//
// All data is purged upon close with this implementation since it is a
// memory-only database.
func (db *MemDb) Close() error {
db.Lock()
defer db.Unlock()
if db.closed {
return ErrDbClosed
}
db.blocks = nil
db.blocksBySha = nil
db.txns = nil
db.closed = true
return nil
}
// DropAfterBlockBySha removes any blocks from the database after the given
// block. This is different than a simple truncate since the spend information
// for each block must also be unwound. This is part of the database.Db interface
// implementation.
func (db *MemDb) DropAfterBlockBySha(sha *btcwire.ShaHash) error {
db.Lock()
defer db.Unlock()
if db.closed {
return ErrDbClosed
}
// Begin by attempting to find the height associated with the passed
// hash.
height, exists := db.blocksBySha[*sha]
if !exists {
return fmt.Errorf("block %v does not exist in the database",
sha)
}
// The spend information has to be undone in reverse order, so loop
// backwards from the last block through the block just after the passed
// block. While doing this unspend all transactions in each block and
// remove the block.
endHeight := int64(len(db.blocks) - 1)
for i := endHeight; i > height; i-- {
// Unspend and remove each transaction in reverse order because
// later transactions in a block can reference earlier ones.
transactions := db.blocks[i].Transactions
for j := len(transactions) - 1; j >= 0; j-- {
tx := transactions[j]
txHash, _ := tx.TxSha()
db.removeTx(tx, &txHash)
}
db.blocks[i] = nil
db.blocks = db.blocks[:i]
}
return nil
}
// ExistsSha returns whether or not the given block hash is present in the
// database. This is part of the database.Db interface implementation.
func (db *MemDb) ExistsSha(sha *btcwire.ShaHash) (bool, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return false, ErrDbClosed
}
if _, exists := db.blocksBySha[*sha]; exists {
return true, nil
}
return false, nil
}
// FetchBlockBySha returns a btcutil.Block. The implementation may cache the
// underlying data if desired. This is part of the database.Db interface
// implementation.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
func (db *MemDb) FetchBlockBySha(sha *btcwire.ShaHash) (*btcutil.Block, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return nil, ErrDbClosed
}
if blockHeight, exists := db.blocksBySha[*sha]; exists {
block := btcutil.NewBlock(db.blocks[int(blockHeight)])
block.SetHeight(blockHeight)
return block, nil
}
return nil, fmt.Errorf("block %v is not in database", sha)
}
// FetchBlockHeightBySha returns the block height for the given hash. This is
// part of the database.Db interface implementation.
func (db *MemDb) FetchBlockHeightBySha(sha *btcwire.ShaHash) (int64, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return 0, ErrDbClosed
}
if blockHeight, exists := db.blocksBySha[*sha]; exists {
return blockHeight, nil
}
return 0, fmt.Errorf("block %v is not in database", sha)
}
// FetchBlockHeaderBySha returns a btcwire.BlockHeader for the given sha. The
// implementation may cache the underlying data if desired. This is part of the
// database.Db interface implementation.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
func (db *MemDb) FetchBlockHeaderBySha(sha *btcwire.ShaHash) (*btcwire.BlockHeader, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return nil, ErrDbClosed
}
if blockHeight, exists := db.blocksBySha[*sha]; exists {
return &db.blocks[int(blockHeight)].Header, nil
}
return nil, fmt.Errorf("block header %v is not in database", sha)
}
// FetchBlockShaByHeight returns a block hash based on its height in the block
// chain. This is part of the database.Db interface implementation.
func (db *MemDb) FetchBlockShaByHeight(height int64) (*btcwire.ShaHash, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return nil, ErrDbClosed
}
numBlocks := int64(len(db.blocks))
if height < 0 || height > numBlocks-1 {
return nil, fmt.Errorf("unable to fetch block height %d since "+
"it is not within the valid range (%d-%d)", height, 0,
numBlocks-1)
}
msgBlock := db.blocks[height]
blockHash, err := msgBlock.BlockSha()
if err != nil {
return nil, err
}
return &blockHash, nil
}
// FetchHeightRange looks up a range of blocks by the start and ending heights.
// Fetch is inclusive of the start height and exclusive of the ending height.
// To fetch all hashes from the start height until no more are present, use the
// special id `AllShas'. This is part of the database.Db interface implementation.
func (db *MemDb) FetchHeightRange(startHeight, endHeight int64) ([]btcwire.ShaHash, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return nil, ErrDbClosed
}
// When the user passes the special AllShas id, adjust the end height
// accordingly.
if endHeight == database.AllShas {
endHeight = int64(len(db.blocks))
}
// Ensure requested heights are sane.
if startHeight < 0 {
return nil, fmt.Errorf("start height of fetch range must not "+
"be less than zero - got %d", startHeight)
}
if endHeight < startHeight {
return nil, fmt.Errorf("end height of fetch range must not "+
"be less than the start height - got start %d, end %d",
startHeight, endHeight)
}
// Fetch as many as are availalbe within the specified range.
lastBlockIndex := int64(len(db.blocks) - 1)
hashList := make([]btcwire.ShaHash, 0, endHeight-startHeight)
for i := startHeight; i < endHeight; i++ {
if i > lastBlockIndex {
break
}
msgBlock := db.blocks[i]
blockHash, err := msgBlock.BlockSha()
if err != nil {
return nil, err
}
hashList = append(hashList, blockHash)
}
return hashList, nil
}
// ExistsTxSha returns whether or not the given transaction hash is present in
// the database and is not fully spent. This is part of the database.Db interface
// implementation.
func (db *MemDb) ExistsTxSha(sha *btcwire.ShaHash) (bool, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return false, ErrDbClosed
}
if txns, exists := db.txns[*sha]; exists {
return !isFullySpent(txns[len(txns)-1]), nil
}
return false, nil
}
// FetchTxBySha returns some data for the given transaction hash. The
// implementation may cache the underlying data if desired. This is part of the
// database.Db interface implementation.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
func (db *MemDb) FetchTxBySha(txHash *btcwire.ShaHash) ([]*database.TxListReply, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return nil, ErrDbClosed
}
txns, exists := db.txns[*txHash]
if !exists {
log.Warnf("FetchTxBySha: requested hash of %s does not exist",
txHash)
return nil, database.ErrTxShaMissing
}
txHashCopy := *txHash
replyList := make([]*database.TxListReply, len(txns))
for i, txD := range txns {
msgBlock := db.blocks[txD.blockHeight]
blockSha, err := msgBlock.BlockSha()
if err != nil {
return nil, err
}
spentBuf := make([]bool, len(txD.spentBuf))
copy(spentBuf, txD.spentBuf)
reply := database.TxListReply{
Sha: &txHashCopy,
Tx: msgBlock.Transactions[txD.offset],
BlkSha: &blockSha,
Height: txD.blockHeight,
TxSpent: spentBuf,
Err: nil,
}
replyList[i] = &reply
}
return replyList, nil
}
// fetchTxByShaList fetches transactions and information about them given an
// array of transaction hashes. The result is a slice of of TxListReply objects
// which contain the transaction and information about it such as what block and
// block height it's contained in and which outputs are spent.
//
// The includeSpent flag indicates whether or not information about transactions
// which are fully spent should be returned. When the flag is not set, the
// corresponding entry in the TxListReply slice for fully spent transactions
// will indicate the transaction does not exist.
//
// This function must be called with the db lock held.
func (db *MemDb) fetchTxByShaList(txShaList []*btcwire.ShaHash, includeSpent bool) []*database.TxListReply {
replyList := make([]*database.TxListReply, 0, len(txShaList))
for i, hash := range txShaList {
// Every requested entry needs a response, so start with nothing
// more than a response with the requested hash marked missing.
// The reply will be updated below with the appropriate
// information if the transaction exists.
reply := database.TxListReply{
Sha: txShaList[i],
Err: database.ErrTxShaMissing,
}
replyList = append(replyList, &reply)
if db.closed {
reply.Err = ErrDbClosed
continue
}
if txns, exists := db.txns[*hash]; exists {
// A given transaction may have duplicates so long as the
// previous one is fully spent. We are only interested
// in the most recent version of the transaction for
// this function. The FetchTxBySha function can be
// used to get all versions of a transaction.
txD := txns[len(txns)-1]
if !includeSpent && isFullySpent(txD) {
continue
}
// Look up the referenced block and get its hash. Set
// the reply error appropriately and go to the next
// requested transaction if anything goes wrong.
msgBlock := db.blocks[txD.blockHeight]
blockSha, err := msgBlock.BlockSha()
if err != nil {
reply.Err = err
continue
}
// Make a copy of the spent buf to return so the caller
// can't accidentally modify it.
spentBuf := make([]bool, len(txD.spentBuf))
copy(spentBuf, txD.spentBuf)
// Populate the reply.
reply.Tx = msgBlock.Transactions[txD.offset]
reply.BlkSha = &blockSha
reply.Height = txD.blockHeight
reply.TxSpent = spentBuf
reply.Err = nil
}
}
return replyList
}
// FetchTxByShaList returns a TxListReply given an array of transaction hashes.
// The implementation may cache the underlying data if desired. This is part of
// the database.Db interface implementation.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
// FetchTxByShaList returns a TxListReply given an array of transaction
// hashes. This function differs from FetchUnSpentTxByShaList in that it
// returns the most recent version of fully spent transactions. Due to the
// increased number of transaction fetches, this function is typically more
// expensive than the unspent counterpart, however the specific performance
// details depend on the concrete implementation. The implementation may cache
// the underlying data if desired. This is part of the database.Db interface
// implementation.
//
// To fetch all versions of a specific transaction, call FetchTxBySha.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
func (db *MemDb) FetchTxByShaList(txShaList []*btcwire.ShaHash) []*database.TxListReply {
db.Lock()
defer db.Unlock()
return db.fetchTxByShaList(txShaList, true)
}
// FetchUnSpentTxByShaList returns a TxListReply given an array of transaction
// hashes. Any transactions which are fully spent will indicate they do not
// exist by setting the Err field to TxShaMissing. The implementation may cache
// the underlying data if desired. This is part of the database.Db interface
// implementation.
//
// To obtain results which do contain the most recent version of a fully spent
// transactions, call FetchTxByShaList. To fetch all versions of a specific
// transaction, call FetchTxBySha.
//
// This implementation does not use any additional cache since the entire
// database is already in memory.
func (db *MemDb) FetchUnSpentTxByShaList(txShaList []*btcwire.ShaHash) []*database.TxListReply {
db.Lock()
defer db.Unlock()
return db.fetchTxByShaList(txShaList, false)
}
// InsertBlock inserts raw block and transaction data from a block into the
// database. The first block inserted into the database will be treated as the
// genesis block. Every subsequent block insert requires the referenced parent
// block to already exist. This is part of the database.Db interface
// implementation.
func (db *MemDb) InsertBlock(block *btcutil.Block) (int64, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return 0, ErrDbClosed
}
blockHash, err := block.Sha()
if err != nil {
return 0, err
}
// Reject the insert if the previously reference block does not exist
// except in the case there are no blocks inserted yet where the first
// inserted block is assumed to be a genesis block.
msgBlock := block.MsgBlock()
if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists {
if len(db.blocks) > 0 {
return 0, database.ErrPrevShaMissing
}
}
// Build a map of in-flight transactions because some of the inputs in
// this block could be referencing other transactions earlier in this
// block which are not yet in the chain.
txInFlight := map[btcwire.ShaHash]int{}
transactions := block.Transactions()
for i, tx := range transactions {
txInFlight[*tx.Sha()] = i
}
// Loop through all transactions and inputs to ensure there are no error
// conditions that would prevent them from be inserted into the db.
// Although these checks could could be done in the loop below, checking
// for error conditions up front means the code below doesn't have to
// deal with rollback on errors.
newHeight := int64(len(db.blocks))
for i, tx := range transactions {
// Two old blocks contain duplicate transactions due to being
// mined by faulty miners and accepted by the origin Satoshi
// client. Rules have since been added to the ensure this
// problem can no longer happen, but the two duplicate
// transactions which were originally accepted are forever in
// the block chain history and must be dealth with specially.
// http://blockexplorer.com/b/91842
// http://blockexplorer.com/b/91880
if newHeight == 91842 && tx.Sha().IsEqual(dupTxHash91842) {
continue
}
if newHeight == 91880 && tx.Sha().IsEqual(dupTxHash91880) {
continue
}
for _, txIn := range tx.MsgTx().TxIn {
if isCoinbaseInput(txIn) {
continue
}
// It is acceptable for a transaction input to reference
// the output of another transaction in this block only
// if the referenced transaction comes before the
// current one in this block.
prevOut := &txIn.PreviousOutPoint
if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok {
if i <= inFlightIndex {
log.Warnf("InsertBlock: requested hash "+
" of %s does not exist in-flight",
tx.Sha())
return 0, database.ErrTxShaMissing
}
} else {
originTxns, exists := db.txns[prevOut.Hash]
if !exists {
log.Warnf("InsertBlock: requested hash "+
"of %s by %s does not exist",
prevOut.Hash, tx.Sha())
return 0, database.ErrTxShaMissing
}
originTxD := originTxns[len(originTxns)-1]
if prevOut.Index > uint32(len(originTxD.spentBuf)) {
log.Warnf("InsertBlock: requested hash "+
"of %s with index %d does not "+
"exist", tx.Sha(), prevOut.Index)
return 0, database.ErrTxShaMissing
}
}
}
// Prevent duplicate transactions in the same block.
if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists &&
inFlightIndex < i {
log.Warnf("Block contains duplicate transaction %s",
tx.Sha())
return 0, database.ErrDuplicateSha
}
// Prevent duplicate transactions unless the old one is fully
// spent.
if txns, exists := db.txns[*tx.Sha()]; exists {
txD := txns[len(txns)-1]
if !isFullySpent(txD) {
log.Warnf("Attempt to insert duplicate "+
"transaction %s", tx.Sha())
return 0, database.ErrDuplicateSha
}
}
}
db.blocks = append(db.blocks, msgBlock)
db.blocksBySha[*blockHash] = newHeight
// Insert information about eacj transaction and spend all of the
// outputs referenced by the inputs to the transactions.
for i, tx := range block.Transactions() {
// Insert the transaction data.
txD := tTxInsertData{
blockHeight: newHeight,
offset: i,
spentBuf: make([]bool, len(tx.MsgTx().TxOut)),
}
db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD)
// Spend all of the inputs.
for _, txIn := range tx.MsgTx().TxIn {
// Coinbase transaction has no inputs.
if isCoinbaseInput(txIn) {
continue
}
// Already checked for existing and valid ranges above.
prevOut := &txIn.PreviousOutPoint
originTxns := db.txns[prevOut.Hash]
originTxD := originTxns[len(originTxns)-1]
originTxD.spentBuf[prevOut.Index] = true
}
}
return newHeight, nil
}
// NewestSha returns the hash and block height of the most recent (end) block of
// the block chain. It will return the zero hash, -1 for the block height, and
// no error (nil) if there are not any blocks in the database yet. This is part
// of the database.Db interface implementation.
func (db *MemDb) NewestSha() (*btcwire.ShaHash, int64, error) {
db.Lock()
defer db.Unlock()
if db.closed {
return nil, 0, ErrDbClosed
}
// When the database has not had a genesis block inserted yet, return
// values specified by interface contract.
numBlocks := len(db.blocks)
if numBlocks == 0 {
return &zeroHash, -1, nil
}
blockSha, err := db.blocks[numBlocks-1].BlockSha()
if err != nil {
return nil, -1, err
}
return &blockSha, int64(numBlocks - 1), nil
}
// FetchAddrIndexTip isn't currently implemented. This is a part of the
// database.Db interface implementation.
func (db *MemDb) FetchAddrIndexTip() (*btcwire.ShaHash, int64, error) {
return nil, 0, database.ErrNotImplemented
}
// UpdateAddrIndexForBlock isn't currently implemented. This is a part of the
// database.Db interface implementation.
func (db *MemDb) UpdateAddrIndexForBlock(*btcwire.ShaHash, int64,
database.BlockAddrIndex) error {
return database.ErrNotImplemented
}
// FetchTxsForAddr isn't currently implemented. This is a part of the database.Db
// interface implementation.
func (db *MemDb) FetchTxsForAddr(btcutil.Address, int, int) ([]*database.TxListReply, error) {
return nil, database.ErrNotImplemented
}
// DeleteAddrIndex isn't currently implemented. This is a part of the database.Db
// interface implementation.
func (db *MemDb) DeleteAddrIndex() error {
return database.ErrNotImplemented
}
// RollbackClose discards the recent database changes to the previously saved
// data at last Sync and closes the database. This is part of the database.Db
// interface implementation.
//
// The database is completely purged on close with this implementation since the
// entire database is only in memory. As a result, this function behaves no
// differently than Close.
func (db *MemDb) RollbackClose() error {
// Rollback doesn't apply to a memory database, so just call Close.
// Close handles the mutex locks.
return db.Close()
}
// Sync verifies that the database is coherent on disk and no outstanding
// transactions are in flight. This is part of the database.Db interface
// implementation.
//
// This implementation does not write any data to disk, so this function only
// grabs a lock to ensure it doesn't return until other operations are complete.
func (db *MemDb) Sync() error {
db.Lock()
defer db.Unlock()
if db.closed {
return ErrDbClosed
}
// There is nothing extra to do to sync the memory database. However,
// the lock is still grabbed to ensure the function does not return
// until other operations are complete.
return nil
}
// newMemDb returns a new memory-only database ready for block inserts.
func newMemDb() *MemDb {
db := MemDb{
blocks: make([]*btcwire.MsgBlock, 0, 200000),
blocksBySha: make(map[btcwire.ShaHash]int64),
txns: make(map[btcwire.ShaHash][]*tTxInsertData),
}
return &db
}

View File

@ -0,0 +1,118 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package memdb_test
import (
"reflect"
"testing"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcd/database/memdb"
"github.com/btcsuite/btcnet"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwire"
)
// TestClosed ensure calling the interface functions on a closed database
// returns appropriate errors for the interface functions that return errors
// and does not panic or otherwise misbehave for functions which do not return
// errors.
func TestClosed(t *testing.T) {
db, err := database.CreateDB("memdb")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
_, err = db.InsertBlock(btcutil.NewBlock(btcnet.MainNetParams.GenesisBlock))
if err != nil {
t.Errorf("InsertBlock: %v", err)
}
if err := db.Close(); err != nil {
t.Errorf("Close: unexpected error %v", err)
}
genesisHash := btcnet.MainNetParams.GenesisHash
if err := db.DropAfterBlockBySha(genesisHash); err != memdb.ErrDbClosed {
t.Errorf("DropAfterBlockBySha: unexpected error %v", err)
}
if _, err := db.ExistsSha(genesisHash); err != memdb.ErrDbClosed {
t.Errorf("ExistsSha: Unexpected error: %v", err)
}
if _, err := db.FetchBlockBySha(genesisHash); err != memdb.ErrDbClosed {
t.Errorf("FetchBlockBySha: unexpected error %v", err)
}
if _, err := db.FetchBlockShaByHeight(0); err != memdb.ErrDbClosed {
t.Errorf("FetchBlockShaByHeight: unexpected error %v", err)
}
if _, err := db.FetchHeightRange(0, 1); err != memdb.ErrDbClosed {
t.Errorf("FetchHeightRange: unexpected error %v", err)
}
genesisCoinbaseTx := btcnet.MainNetParams.GenesisBlock.Transactions[0]
coinbaseHash, err := genesisCoinbaseTx.TxSha()
if err != nil {
t.Errorf("TxSha: unexpected error %v", err)
}
if _, err := db.ExistsTxSha(&coinbaseHash); err != memdb.ErrDbClosed {
t.Errorf("ExistsTxSha: unexpected error %v", err)
}
if _, err := db.FetchTxBySha(genesisHash); err != memdb.ErrDbClosed {
t.Errorf("FetchTxBySha: unexpected error %v", err)
}
requestHashes := []*btcwire.ShaHash{genesisHash}
reply := db.FetchTxByShaList(requestHashes)
if len(reply) != len(requestHashes) {
t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+
"got: %d, want: %d", len(reply), len(requestHashes))
}
for i, txLR := range reply {
wantReply := &database.TxListReply{
Sha: requestHashes[i],
Err: memdb.ErrDbClosed,
}
if !reflect.DeepEqual(wantReply, txLR) {
t.Errorf("FetchTxByShaList unexpected reply\ngot: %v\n"+
"want: %v", txLR, wantReply)
}
}
reply = db.FetchUnSpentTxByShaList(requestHashes)
if len(reply) != len(requestHashes) {
t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+
"got: %d, want: %d", len(reply), len(requestHashes))
}
for i, txLR := range reply {
wantReply := &database.TxListReply{
Sha: requestHashes[i],
Err: memdb.ErrDbClosed,
}
if !reflect.DeepEqual(wantReply, txLR) {
t.Errorf("FetchUnSpentTxByShaList unexpected reply\n"+
"got: %v\nwant: %v", txLR, wantReply)
}
}
if _, _, err := db.NewestSha(); err != memdb.ErrDbClosed {
t.Errorf("NewestSha: unexpected error %v", err)
}
if err := db.Sync(); err != memdb.ErrDbClosed {
t.Errorf("Sync: unexpected error %v", err)
}
if err := db.RollbackClose(); err != memdb.ErrDbClosed {
t.Errorf("RollbackClose: unexpected error %v", err)
}
if err := db.Close(); err != memdb.ErrDbClosed {
t.Errorf("Close: unexpected error %v", err)
}
}

BIN
database/testdata/blocks1-256.bz2 vendored Normal file

Binary file not shown.