2018-08-23 06:37:27 +02:00
|
|
|
// Copyright (c) 2013-2018 The btcsuite developers
|
|
|
|
// Copyright (c) 2015-2018 The Decred developers
|
2015-04-20 22:28:00 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package txscript
|
|
|
|
|
|
|
|
import (
|
2016-10-19 03:02:00 +02:00
|
|
|
"bytes"
|
|
|
|
"crypto/sha256"
|
2015-04-20 22:28:00 +02:00
|
|
|
"fmt"
|
|
|
|
"math/big"
|
2019-03-13 07:12:56 +01:00
|
|
|
"strings"
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2021-11-19 03:43:53 +01:00
|
|
|
"github.com/btcsuite/btcd/btcec/v2"
|
2022-01-07 02:19:53 +01:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2015-04-20 22:28:00 +02:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
)
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// ScriptFlags is a bitmask defining additional operations or tests that will be
|
|
|
|
// done when executing a script pair.
|
2015-04-20 22:28:00 +02:00
|
|
|
type ScriptFlags uint32
|
|
|
|
|
|
|
|
const (
|
2016-02-25 18:17:12 +01:00
|
|
|
// ScriptBip16 defines whether the bip16 threshold has passed and thus
|
2015-04-20 22:28:00 +02:00
|
|
|
// pay-to-script hash transactions will be fully validated.
|
|
|
|
ScriptBip16 ScriptFlags = 1 << iota
|
|
|
|
|
|
|
|
// ScriptStrictMultiSig defines whether to verify the stack item
|
|
|
|
// used by CHECKMULTISIG is zero length.
|
|
|
|
ScriptStrictMultiSig
|
|
|
|
|
|
|
|
// ScriptDiscourageUpgradableNops defines whether to verify that
|
|
|
|
// NOP1 through NOP10 are reserved for future soft-fork upgrades. This
|
|
|
|
// flag must not be used for consensus critical code nor applied to
|
|
|
|
// blocks as this flag is only for stricter standard transaction
|
|
|
|
// checks. This flag is only applied when the above opcodes are
|
|
|
|
// executed.
|
|
|
|
ScriptDiscourageUpgradableNops
|
|
|
|
|
2015-10-05 18:11:56 +02:00
|
|
|
// ScriptVerifyCheckLockTimeVerify defines whether to verify that
|
|
|
|
// a transaction output is spendable based on the locktime.
|
|
|
|
// This is BIP0065.
|
|
|
|
ScriptVerifyCheckLockTimeVerify
|
|
|
|
|
2015-10-23 23:05:24 +02:00
|
|
|
// ScriptVerifyCheckSequenceVerify defines whether to allow execution
|
|
|
|
// pathways of a script to be restricted based on the age of the output
|
|
|
|
// being spent. This is BIP0112.
|
|
|
|
ScriptVerifyCheckSequenceVerify
|
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
// ScriptVerifyCleanStack defines that the stack must contain only
|
|
|
|
// one stack element after evaluation and that the element must be
|
|
|
|
// true if interpreted as a boolean. This is rule 6 of BIP0062.
|
2016-10-19 03:02:00 +02:00
|
|
|
// This flag should never be used without the ScriptBip16 flag nor the
|
|
|
|
// ScriptVerifyWitness flag.
|
2015-04-20 22:28:00 +02:00
|
|
|
ScriptVerifyCleanStack
|
|
|
|
|
|
|
|
// ScriptVerifyDERSignatures defines that signatures are required
|
|
|
|
// to compily with the DER format.
|
|
|
|
ScriptVerifyDERSignatures
|
|
|
|
|
|
|
|
// ScriptVerifyLowS defines that signtures are required to comply with
|
|
|
|
// the DER format and whose S value is <= order / 2. This is rule 5
|
|
|
|
// of BIP0062.
|
|
|
|
ScriptVerifyLowS
|
|
|
|
|
|
|
|
// ScriptVerifyMinimalData defines that signatures must use the smallest
|
|
|
|
// push operator. This is both rules 3 and 4 of BIP0062.
|
|
|
|
ScriptVerifyMinimalData
|
|
|
|
|
2016-11-18 04:08:06 +01:00
|
|
|
// ScriptVerifyNullFail defines that signatures must be empty if
|
|
|
|
// a CHECKSIG or CHECKMULTISIG operation fails.
|
|
|
|
ScriptVerifyNullFail
|
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
// ScriptVerifySigPushOnly defines that signature scripts must contain
|
|
|
|
// only pushed data. This is rule 2 of BIP0062.
|
|
|
|
ScriptVerifySigPushOnly
|
|
|
|
|
|
|
|
// ScriptVerifyStrictEncoding defines that signature scripts and
|
|
|
|
// public keys must follow the strict encoding requirements.
|
|
|
|
ScriptVerifyStrictEncoding
|
2016-10-19 03:02:00 +02:00
|
|
|
|
|
|
|
// ScriptVerifyWitness defines whether or not to verify a transaction
|
|
|
|
// output using a witness program template.
|
|
|
|
ScriptVerifyWitness
|
|
|
|
|
|
|
|
// ScriptVerifyDiscourageUpgradeableWitnessProgram makes witness
|
|
|
|
// program with versions 2-16 non-standard.
|
|
|
|
ScriptVerifyDiscourageUpgradeableWitnessProgram
|
2017-04-27 01:27:44 +02:00
|
|
|
|
|
|
|
// ScriptVerifyMinimalIf makes a script with an OP_IF/OP_NOTIF whose
|
|
|
|
// operand is anything other than empty vector or [0x01] non-standard.
|
|
|
|
ScriptVerifyMinimalIf
|
2017-04-27 01:32:13 +02:00
|
|
|
|
|
|
|
// ScriptVerifyWitnessPubKeyType makes a script within a check-sig
|
|
|
|
// operation whose public key isn't serialized in a compressed format
|
|
|
|
// non-standard.
|
|
|
|
ScriptVerifyWitnessPubKeyType
|
2022-01-07 02:28:30 +01:00
|
|
|
|
|
|
|
// ScriptVerifyTaproot defines whether or not to verify a transaction
|
|
|
|
// output using the new taproot validation rules.
|
|
|
|
ScriptVerifyTaproot
|
2015-04-20 22:28:00 +02:00
|
|
|
)
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
const (
|
2017-01-07 18:31:03 +01:00
|
|
|
// MaxStackSize is the maximum combined height of stack and alt stack
|
2015-04-29 20:08:56 +02:00
|
|
|
// during execution.
|
2017-01-07 18:31:03 +01:00
|
|
|
MaxStackSize = 1000
|
2015-04-29 20:08:56 +02:00
|
|
|
|
2017-01-07 18:31:03 +01:00
|
|
|
// MaxScriptSize is the maximum allowed length of a raw script.
|
|
|
|
MaxScriptSize = 10000
|
2016-10-19 03:02:00 +02:00
|
|
|
|
|
|
|
// payToWitnessPubKeyHashDataSize is the size of the witness program's
|
|
|
|
// data push for a pay-to-witness-pub-key-hash output.
|
|
|
|
payToWitnessPubKeyHashDataSize = 20
|
|
|
|
|
|
|
|
// payToWitnessScriptHashDataSize is the size of the witness program's
|
|
|
|
// data push for a pay-to-witness-script-hash output.
|
|
|
|
payToWitnessScriptHashDataSize = 32
|
2022-01-07 02:28:30 +01:00
|
|
|
|
|
|
|
// payToTaprootDataSize is the size of the witness program push for
|
|
|
|
// taproot spends. This will be the serialized x-coordinate of the
|
|
|
|
// top-level taproot output public key.
|
|
|
|
payToTaprootDataSize = 32
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// BaseSegwitWitnessVersion is the original witness version that defines
|
|
|
|
// the initial set of segwit validation logic.
|
|
|
|
BaseSegwitWitnessVersion = 0
|
|
|
|
|
|
|
|
// TaprootWitnessVersion is the witness version that defines the new
|
|
|
|
// taproot verification logic.
|
|
|
|
TaprootWitnessVersion = 1
|
2015-04-29 20:08:56 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// halforder is used to tame ECDSA malleability (see BIP0062).
|
|
|
|
var halfOrder = new(big.Int).Rsh(btcec.S256().N, 1)
|
|
|
|
|
2022-01-07 02:19:53 +01:00
|
|
|
// taprootExecutionCtx houses the special context-specific information we need
|
|
|
|
// to validate a taproot script spend. This includes the annex, the running sig
|
|
|
|
// op count tally, and other relevant information.
|
|
|
|
type taprootExecutionCtx struct {
|
|
|
|
annex []byte
|
|
|
|
|
|
|
|
codeSepPos uint32
|
|
|
|
|
|
|
|
tapLeafHash chainhash.Hash
|
|
|
|
|
|
|
|
sigOpsBudget uint32
|
|
|
|
}
|
|
|
|
|
|
|
|
// sigOpsDelta is both the starting budget for sig ops for tapscript
|
|
|
|
// verification, as well as the decrease in the total budget when we encounter
|
|
|
|
// a signature.
|
|
|
|
const sigOpsDelta = 50
|
|
|
|
|
|
|
|
// tallysigOp attempts to decrease the current sig ops budget by sigOpsDelta.
|
|
|
|
// An error is returned if after subtracting the delta, the budget is below
|
|
|
|
// zero.
|
|
|
|
func (t *taprootExecutionCtx) tallysigOp() error {
|
|
|
|
t.sigOpsBudget -= sigOpsDelta
|
|
|
|
|
|
|
|
if t.sigOpsBudget == 0 {
|
|
|
|
return fmt.Errorf("max sig ops exceeded")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newTaprootExecutionCtx returns a fresh instance of the taproot execution
|
|
|
|
// context.
|
|
|
|
func newTaprootExecutionCtx(inputWitnessSize uint32) *taprootExecutionCtx {
|
|
|
|
return &taprootExecutionCtx{
|
|
|
|
codeSepPos: blankCodeSepValue,
|
|
|
|
sigOpsBudget: sigOpsDelta + inputWitnessSize,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
// Engine is the virtual machine that executes scripts.
|
|
|
|
type Engine struct {
|
2019-03-13 07:12:58 +01:00
|
|
|
// The following fields are set when the engine is created and must not be
|
|
|
|
// changed afterwards. The entries of the signature cache are mutated
|
|
|
|
// during execution, however, the cache pointer itself is not changed.
|
|
|
|
//
|
|
|
|
// flags specifies the additional flags which modify the execution behavior
|
|
|
|
// of the engine.
|
|
|
|
//
|
|
|
|
// tx identifies the transaction that contains the input which in turn
|
|
|
|
// contains the signature script being executed.
|
|
|
|
//
|
|
|
|
// txIdx identifies the input index within the transaction that contains
|
|
|
|
// the signature script being executed.
|
|
|
|
//
|
|
|
|
// version specifies the version of the public key script to execute. Since
|
|
|
|
// signature scripts redeem public keys scripts, this means the same version
|
|
|
|
// also extends to signature scripts and redeem scripts in the case of
|
|
|
|
// pay-to-script-hash.
|
|
|
|
//
|
|
|
|
// bip16 specifies that the public key script is of a special form that
|
|
|
|
// indicates it is a BIP16 pay-to-script-hash and therefore the
|
|
|
|
// execution must be treated as such.
|
|
|
|
//
|
|
|
|
// sigCache caches the results of signature verifications. This is useful
|
|
|
|
// since transaction scripts are often executed more than once from various
|
|
|
|
// contexts (e.g. new block templates, when transactions are first seen
|
|
|
|
// prior to being mined, part of full block verification, etc).
|
2022-01-07 02:28:30 +01:00
|
|
|
//
|
|
|
|
// hashCache caches the midstate of segwit v0 and v1 sighashes to
|
|
|
|
// optimize worst-case hashing complexity.
|
|
|
|
//
|
|
|
|
// prevOutFetcher is used to look up all the previous output of
|
|
|
|
// taproot transactions, as that information is hashed into the
|
|
|
|
// sighash digest for such inputs.
|
2022-01-07 02:15:57 +01:00
|
|
|
flags ScriptFlags
|
|
|
|
tx wire.MsgTx
|
|
|
|
txIdx int
|
|
|
|
version uint16
|
|
|
|
bip16 bool
|
|
|
|
sigCache *SigCache
|
|
|
|
hashCache *TxSigHashes
|
|
|
|
prevOutFetcher PrevOutputFetcher
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
// The following fields handle keeping track of the current execution state
|
|
|
|
// of the engine.
|
|
|
|
//
|
|
|
|
// scripts houses the raw scripts that are executed by the engine. This
|
|
|
|
// includes the signature script as well as the public key script. It also
|
|
|
|
// includes the redeem script in the case of pay-to-script-hash.
|
|
|
|
//
|
|
|
|
// scriptIdx tracks the index into the scripts array for the current program
|
|
|
|
// counter.
|
|
|
|
//
|
|
|
|
// opcodeIdx tracks the number of the opcode within the current script for
|
|
|
|
// the current program counter. Note that it differs from the actual byte
|
|
|
|
// index into the script and is really only used for disassembly purposes.
|
|
|
|
//
|
|
|
|
// lastCodeSep specifies the position within the current script of the last
|
|
|
|
// OP_CODESEPARATOR.
|
|
|
|
//
|
|
|
|
// tokenizer provides the token stream of the current script being executed
|
|
|
|
// and doubles as state tracking for the program counter within the script.
|
|
|
|
//
|
|
|
|
// savedFirstStack keeps a copy of the stack from the first script when
|
|
|
|
// performing pay-to-script-hash execution.
|
|
|
|
//
|
|
|
|
// dstack is the primary data stack the various opcodes push and pop data
|
|
|
|
// to and from during execution.
|
|
|
|
//
|
|
|
|
// astack is the alternate data stack the various opcodes push and pop data
|
|
|
|
// to and from during execution.
|
|
|
|
//
|
|
|
|
// condStack tracks the conditional execution state with support for
|
|
|
|
// multiple nested conditional execution opcodes.
|
|
|
|
//
|
|
|
|
// numOps tracks the total number of non-push operations in a script and is
|
|
|
|
// primarily used to enforce maximum limits.
|
|
|
|
scripts [][]byte
|
2015-04-21 00:58:04 +02:00
|
|
|
scriptIdx int
|
2019-03-13 07:12:58 +01:00
|
|
|
opcodeIdx int
|
2015-04-29 20:08:56 +02:00
|
|
|
lastCodeSep int
|
2019-03-13 07:12:58 +01:00
|
|
|
tokenizer ScriptTokenizer
|
|
|
|
savedFirstStack [][]byte
|
|
|
|
dstack stack
|
|
|
|
astack stack
|
2015-04-21 00:58:04 +02:00
|
|
|
condStack []int
|
|
|
|
numOps int
|
2016-10-19 03:02:00 +02:00
|
|
|
witnessVersion int
|
|
|
|
witnessProgram []byte
|
|
|
|
inputAmount int64
|
2022-01-07 02:19:53 +01:00
|
|
|
taprootCtx *taprootExecutionCtx
|
2015-04-21 00:58:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// hasFlag returns whether the script engine instance has the passed flag set.
|
|
|
|
func (vm *Engine) hasFlag(flag ScriptFlags) bool {
|
|
|
|
return vm.flags&flag == flag
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-23 07:46:14 +02:00
|
|
|
// isBranchExecuting returns whether or not the current conditional branch is
|
|
|
|
// actively executing. For example, when the data stack has an OP_FALSE on it
|
|
|
|
// and an OP_IF is encountered, the branch is inactive until an OP_ELSE or
|
|
|
|
// OP_ENDIF is encountered. It properly handles nested conditionals.
|
|
|
|
func (vm *Engine) isBranchExecuting() bool {
|
|
|
|
if len(vm.condStack) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return vm.condStack[len(vm.condStack)-1] == OpCondTrue
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:52 +01:00
|
|
|
// isOpcodeDisabled returns whether or not the opcode is disabled and thus is
|
|
|
|
// always bad to see in the instruction stream (even if turned off by a
|
|
|
|
// conditional).
|
|
|
|
func isOpcodeDisabled(opcode byte) bool {
|
|
|
|
switch opcode {
|
|
|
|
case OP_CAT:
|
|
|
|
return true
|
|
|
|
case OP_SUBSTR:
|
|
|
|
return true
|
|
|
|
case OP_LEFT:
|
|
|
|
return true
|
|
|
|
case OP_RIGHT:
|
|
|
|
return true
|
|
|
|
case OP_INVERT:
|
|
|
|
return true
|
|
|
|
case OP_AND:
|
|
|
|
return true
|
|
|
|
case OP_OR:
|
|
|
|
return true
|
|
|
|
case OP_XOR:
|
|
|
|
return true
|
|
|
|
case OP_2MUL:
|
|
|
|
return true
|
|
|
|
case OP_2DIV:
|
|
|
|
return true
|
|
|
|
case OP_MUL:
|
|
|
|
return true
|
|
|
|
case OP_DIV:
|
|
|
|
return true
|
|
|
|
case OP_MOD:
|
|
|
|
return true
|
|
|
|
case OP_LSHIFT:
|
|
|
|
return true
|
|
|
|
case OP_RSHIFT:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:53 +01:00
|
|
|
// isOpcodeAlwaysIllegal returns whether or not the opcode is always illegal
|
|
|
|
// when passed over by the program counter even if in a non-executed branch (it
|
|
|
|
// isn't a coincidence that they are conditionals).
|
|
|
|
func isOpcodeAlwaysIllegal(opcode byte) bool {
|
|
|
|
switch opcode {
|
|
|
|
case OP_VERIF:
|
|
|
|
return true
|
|
|
|
case OP_VERNOTIF:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:54 +01:00
|
|
|
// isOpcodeConditional returns whether or not the opcode is a conditional opcode
|
|
|
|
// which changes the conditional execution stack when executed.
|
|
|
|
func isOpcodeConditional(opcode byte) bool {
|
|
|
|
switch opcode {
|
|
|
|
case OP_IF:
|
|
|
|
return true
|
|
|
|
case OP_NOTIF:
|
|
|
|
return true
|
|
|
|
case OP_ELSE:
|
|
|
|
return true
|
|
|
|
case OP_ENDIF:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:55 +01:00
|
|
|
// checkMinimalDataPush returns whether or not the provided opcode is the
|
|
|
|
// smallest possible way to represent the given data. For example, the value 15
|
|
|
|
// could be pushed with OP_DATA_1 15 (among other variations); however, OP_15 is
|
|
|
|
// a single opcode that represents the same value and is only a single byte
|
|
|
|
// versus two bytes.
|
|
|
|
func checkMinimalDataPush(op *opcode, data []byte) error {
|
|
|
|
opcodeVal := op.value
|
|
|
|
dataLen := len(data)
|
|
|
|
switch {
|
|
|
|
case dataLen == 0 && opcodeVal != OP_0:
|
|
|
|
str := fmt.Sprintf("zero length data push is encoded with opcode %s "+
|
|
|
|
"instead of OP_0", op.name)
|
|
|
|
return scriptError(ErrMinimalData, str)
|
|
|
|
case dataLen == 1 && data[0] >= 1 && data[0] <= 16:
|
|
|
|
if opcodeVal != OP_1+data[0]-1 {
|
|
|
|
// Should have used OP_1 .. OP_16
|
|
|
|
str := fmt.Sprintf("data push of the value %d encoded with opcode "+
|
|
|
|
"%s instead of OP_%d", data[0], op.name, data[0])
|
|
|
|
return scriptError(ErrMinimalData, str)
|
|
|
|
}
|
|
|
|
case dataLen == 1 && data[0] == 0x81:
|
|
|
|
if opcodeVal != OP_1NEGATE {
|
|
|
|
str := fmt.Sprintf("data push of the value -1 encoded with opcode "+
|
|
|
|
"%s instead of OP_1NEGATE", op.name)
|
|
|
|
return scriptError(ErrMinimalData, str)
|
|
|
|
}
|
|
|
|
case dataLen <= 75:
|
|
|
|
if int(opcodeVal) != dataLen {
|
|
|
|
// Should have used a direct push
|
|
|
|
str := fmt.Sprintf("data push of %d bytes encoded with opcode %s "+
|
|
|
|
"instead of OP_DATA_%d", dataLen, op.name, dataLen)
|
|
|
|
return scriptError(ErrMinimalData, str)
|
|
|
|
}
|
|
|
|
case dataLen <= 255:
|
|
|
|
if opcodeVal != OP_PUSHDATA1 {
|
|
|
|
str := fmt.Sprintf("data push of %d bytes encoded with opcode %s "+
|
|
|
|
"instead of OP_PUSHDATA1", dataLen, op.name)
|
|
|
|
return scriptError(ErrMinimalData, str)
|
|
|
|
}
|
|
|
|
case dataLen <= 65535:
|
|
|
|
if opcodeVal != OP_PUSHDATA2 {
|
|
|
|
str := fmt.Sprintf("data push of %d bytes encoded with opcode %s "+
|
|
|
|
"instead of OP_PUSHDATA2", dataLen, op.name)
|
|
|
|
return scriptError(ErrMinimalData, str)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-27 19:51:06 +02:00
|
|
|
// executeOpcode peforms execution on the passed opcode. It takes into account
|
|
|
|
// whether or not it is hidden by conditionals, but some rules still must be
|
|
|
|
// tested in this case.
|
2019-03-13 07:13:07 +01:00
|
|
|
func (vm *Engine) executeOpcode(op *opcode, data []byte) error {
|
2015-04-29 20:08:56 +02:00
|
|
|
// Disabled opcodes are fail on program counter.
|
2019-03-13 07:13:07 +01:00
|
|
|
if isOpcodeDisabled(op.value) {
|
|
|
|
str := fmt.Sprintf("attempt to execute disabled opcode %s", op.name)
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrDisabledOpcode, str)
|
2015-04-27 19:51:06 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Always-illegal opcodes are fail on program counter.
|
2019-03-13 07:13:07 +01:00
|
|
|
if isOpcodeAlwaysIllegal(op.value) {
|
|
|
|
str := fmt.Sprintf("attempt to execute reserved opcode %s", op.name)
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrReservedOpcode, str)
|
2015-04-27 19:51:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note that this includes OP_RESERVED which counts as a push operation.
|
2019-03-13 07:13:07 +01:00
|
|
|
if op.value > OP_16 {
|
2015-04-27 19:51:06 +02:00
|
|
|
vm.numOps++
|
|
|
|
if vm.numOps > MaxOpsPerScript {
|
2017-01-07 18:31:03 +01:00
|
|
|
str := fmt.Sprintf("exceeded max operation limit of %d",
|
|
|
|
MaxOpsPerScript)
|
|
|
|
return scriptError(ErrTooManyOperations, str)
|
2015-04-27 19:51:06 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 07:13:07 +01:00
|
|
|
} else if len(data) > MaxScriptElementSize {
|
2017-01-07 18:31:03 +01:00
|
|
|
str := fmt.Sprintf("element size %d exceeds max allowed size %d",
|
2019-03-13 07:13:07 +01:00
|
|
|
len(data), MaxScriptElementSize)
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrElementTooBig, str)
|
2015-04-27 19:51:06 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Nothing left to do when this is not a conditional opcode and it is
|
|
|
|
// not in an executing branch.
|
2019-03-13 07:13:07 +01:00
|
|
|
if !vm.isBranchExecuting() && !isOpcodeConditional(op.value) {
|
2015-04-27 19:51:06 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure all executed data push opcodes use the minimal encoding when
|
2015-04-29 20:08:56 +02:00
|
|
|
// the minimal data verification flag is set.
|
2015-04-27 19:51:06 +02:00
|
|
|
if vm.dstack.verifyMinimalData && vm.isBranchExecuting() &&
|
2019-03-13 07:13:07 +01:00
|
|
|
op.value >= 0 && op.value <= OP_PUSHDATA4 {
|
2015-04-29 20:08:56 +02:00
|
|
|
|
2019-03-13 07:13:07 +01:00
|
|
|
if err := checkMinimalDataPush(op, data); err != nil {
|
2015-04-27 19:51:06 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:13:08 +01:00
|
|
|
return op.opfunc(op, data, vm)
|
2015-04-27 19:51:06 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// checkValidPC returns an error if the current script position is not valid for
|
|
|
|
// execution.
|
|
|
|
func (vm *Engine) checkValidPC() error {
|
2015-04-29 20:08:56 +02:00
|
|
|
if vm.scriptIdx >= len(vm.scripts) {
|
2019-03-13 07:12:58 +01:00
|
|
|
str := fmt.Sprintf("script index %d beyond total scripts %d",
|
|
|
|
vm.scriptIdx, len(vm.scripts))
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrInvalidProgramCounter, str)
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2017-04-27 01:32:13 +02:00
|
|
|
// isWitnessVersionActive returns true if a witness program was extracted
|
|
|
|
// during the initialization of the Engine, and the program's version matches
|
|
|
|
// the specified version.
|
|
|
|
func (vm *Engine) isWitnessVersionActive(version uint) bool {
|
|
|
|
return vm.witnessProgram != nil && uint(vm.witnessVersion) == version
|
|
|
|
}
|
|
|
|
|
2016-10-19 03:02:00 +02:00
|
|
|
// verifyWitnessProgram validates the stored witness program using the passed
|
|
|
|
// witness as input.
|
2022-01-07 02:28:30 +01:00
|
|
|
func (vm *Engine) verifyWitnessProgram(witness wire.TxWitness) error {
|
|
|
|
switch {
|
|
|
|
|
|
|
|
// We're attempting to verify a base (witness version 0) segwit output,
|
|
|
|
// so we'll be looking for either a p2wsh or a p2wkh spend.
|
|
|
|
case vm.isWitnessVersionActive(BaseSegwitWitnessVersion):
|
2016-10-19 03:02:00 +02:00
|
|
|
switch len(vm.witnessProgram) {
|
|
|
|
case payToWitnessPubKeyHashDataSize: // P2WKH
|
|
|
|
// The witness stack should consist of exactly two
|
|
|
|
// items: the signature, and the pubkey.
|
|
|
|
if len(witness) != 2 {
|
|
|
|
err := fmt.Sprintf("should have exactly two "+
|
|
|
|
"items in witness, instead have %v", len(witness))
|
2017-04-27 01:33:28 +02:00
|
|
|
return scriptError(ErrWitnessProgramMismatch, err)
|
2016-10-19 03:02:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we'll resume execution as if it were a regular
|
|
|
|
// p2pkh transaction.
|
|
|
|
pkScript, err := payToPubKeyHashScript(vm.witnessProgram)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
const scriptVersion = 0
|
|
|
|
err = checkScriptParses(vm.version, pkScript)
|
2016-10-19 03:02:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the stack to the provided witness stack, then
|
|
|
|
// append the pkScript generated above as the next
|
|
|
|
// script to execute.
|
2019-03-13 07:12:58 +01:00
|
|
|
vm.scripts = append(vm.scripts, pkScript)
|
2016-10-19 03:02:00 +02:00
|
|
|
vm.SetStack(witness)
|
|
|
|
|
|
|
|
case payToWitnessScriptHashDataSize: // P2WSH
|
|
|
|
// Additionally, The witness stack MUST NOT be empty at
|
|
|
|
// this point.
|
|
|
|
if len(witness) == 0 {
|
|
|
|
return scriptError(ErrWitnessProgramEmpty, "witness "+
|
|
|
|
"program empty passed empty witness")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Obtain the witness script which should be the last
|
|
|
|
// element in the passed stack. The size of the script
|
|
|
|
// MUST NOT exceed the max script size.
|
|
|
|
witnessScript := witness[len(witness)-1]
|
|
|
|
if len(witnessScript) > MaxScriptSize {
|
|
|
|
str := fmt.Sprintf("witnessScript size %d "+
|
|
|
|
"is larger than max allowed size %d",
|
|
|
|
len(witnessScript), MaxScriptSize)
|
|
|
|
return scriptError(ErrScriptTooBig, str)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the serialized pkScript at the end of
|
|
|
|
// the witness stack matches the witness program.
|
|
|
|
witnessHash := sha256.Sum256(witnessScript)
|
|
|
|
if !bytes.Equal(witnessHash[:], vm.witnessProgram) {
|
2017-04-27 01:33:28 +02:00
|
|
|
return scriptError(ErrWitnessProgramMismatch,
|
2016-10-19 03:02:00 +02:00
|
|
|
"witness program hash mismatch")
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// With all the validity checks passed, assert that the
|
|
|
|
// script parses without failure.
|
|
|
|
const scriptVersion = 0
|
|
|
|
err := checkScriptParses(vm.version, witnessScript)
|
2016-10-19 03:02:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The hash matched successfully, so use the witness as
|
|
|
|
// the stack, and set the witnessScript to be the next
|
|
|
|
// script executed.
|
2019-03-13 07:12:58 +01:00
|
|
|
vm.scripts = append(vm.scripts, witnessScript)
|
2016-10-19 03:02:00 +02:00
|
|
|
vm.SetStack(witness[:len(witness)-1])
|
|
|
|
|
|
|
|
default:
|
|
|
|
errStr := fmt.Sprintf("length of witness program "+
|
|
|
|
"must either be %v or %v bytes, instead is %v bytes",
|
|
|
|
payToWitnessPubKeyHashDataSize,
|
|
|
|
payToWitnessScriptHashDataSize,
|
|
|
|
len(vm.witnessProgram))
|
|
|
|
return scriptError(ErrWitnessProgramWrongLength, errStr)
|
|
|
|
}
|
|
|
|
|
2022-01-07 02:28:30 +01:00
|
|
|
// We're attempting to to verify a taproot input, and the witness
|
|
|
|
// program data push is of the expected size, so we'll be looking for a
|
|
|
|
// normal key-path spend, or a merkle proof for a tapscript with
|
|
|
|
// execution afterwards.
|
|
|
|
case vm.isWitnessVersionActive(TaprootWitnessVersion) &&
|
|
|
|
len(vm.witnessProgram) == payToTaprootDataSize && !vm.bip16:
|
|
|
|
|
|
|
|
// If taproot isn't currently active, then we'll return a
|
|
|
|
// success here in place as we don't apply the new rules unless
|
|
|
|
// the flag flips, as governed by the version bits deployment.
|
|
|
|
if !vm.hasFlag(ScriptVerifyTaproot) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there're no stack elements at all, then this is an
|
|
|
|
// invalid spend.
|
|
|
|
if len(witness) == 0 {
|
|
|
|
return scriptError(ErrWitnessProgramEmpty, "witness "+
|
|
|
|
"program empty passed empty witness")
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we know taproot is active, so we'll populate
|
|
|
|
// the taproot execution context.
|
|
|
|
vm.taprootCtx = newTaprootExecutionCtx(
|
|
|
|
uint32(witness.SerializeSize()),
|
|
|
|
)
|
|
|
|
|
|
|
|
// If we can detect the annex, then drop that off the stack,
|
|
|
|
// we'll only need it to compute the sighash later.
|
|
|
|
if isAnnexedWitness(witness) {
|
|
|
|
// TODO(roasbeef): need the annex stored somewhere?
|
|
|
|
// * compute annex hash: sha(sizeAnnex || annex)
|
|
|
|
vm.taprootCtx.annex, _ = extractAnnex(witness)
|
|
|
|
|
|
|
|
// Snip the annex off the end of the witness stack .
|
|
|
|
witness = witness[:len(witness)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
// From here, we'll either be validating a normal key spend, or
|
|
|
|
// a spend from the tap script leaf using a committed leaf.
|
|
|
|
switch {
|
|
|
|
// If there's only a single element left on the stack (the
|
|
|
|
// signature), then we'll apply the normal top-level schnorr
|
|
|
|
// signature verification.
|
|
|
|
case len(witness) == 1:
|
|
|
|
// As we only have a single element left (after maybe
|
|
|
|
// removing the annex), we'll do normal taproot
|
|
|
|
// keyspend validation.
|
|
|
|
rawSig := witness[0]
|
|
|
|
err := VerifyTaprootKeySpend(
|
|
|
|
vm.witnessProgram, rawSig, &vm.tx, vm.txIdx,
|
|
|
|
vm.prevOutFetcher, vm.hashCache, vm.sigCache,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
// TODO(roasbeef): proper error
|
|
|
|
return err
|
2016-10-19 03:02:00 +02:00
|
|
|
}
|
2022-01-07 02:28:30 +01:00
|
|
|
|
|
|
|
case vm.hasFlag(ScriptVerifyDiscourageUpgradeableWitnessProgram):
|
|
|
|
errStr := fmt.Sprintf("new witness program versions "+
|
|
|
|
"invalid: %v", vm.witnessProgram)
|
|
|
|
return scriptError(ErrDiscourageUpgradableWitnessProgram, errStr)
|
|
|
|
|
|
|
|
default:
|
|
|
|
// If we encounter an unknown witness program version and we
|
|
|
|
// aren't discouraging future unknown witness based soft-forks,
|
|
|
|
// then we de-activate the segwit behavior within the VM for
|
|
|
|
// the remainder of execution.
|
|
|
|
vm.witnessProgram = nil
|
2016-10-19 03:02:00 +02:00
|
|
|
}
|
2022-01-07 02:28:30 +01:00
|
|
|
|
|
|
|
// TODO(roasbeef): other sanity checks here
|
|
|
|
switch {
|
|
|
|
case vm.isWitnessVersionActive(BaseSegwitWitnessVersion):
|
|
|
|
// All elements within the witness stack must not be greater
|
|
|
|
// than the maximum bytes which are allowed to be pushed onto
|
|
|
|
// the stack.
|
|
|
|
for _, witElement := range vm.GetStack() {
|
|
|
|
if len(witElement) > MaxScriptElementSize {
|
|
|
|
str := fmt.Sprintf("element size %d exceeds "+
|
|
|
|
"max allowed size %d", len(witElement),
|
|
|
|
MaxScriptElementSize)
|
|
|
|
return scriptError(ErrElementTooBig, str)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2016-10-19 03:02:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// DisasmPC returns the string for the disassembly of the opcode that will be
|
2019-03-13 07:12:58 +01:00
|
|
|
// next to execute when Step is called.
|
2015-04-29 20:08:56 +02:00
|
|
|
func (vm *Engine) DisasmPC() (string, error) {
|
2019-03-13 07:12:58 +01:00
|
|
|
if err := vm.checkValidPC(); err != nil {
|
2015-04-29 20:08:56 +02:00
|
|
|
return "", err
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
// Create a copy of the current tokenizer and parse the next opcode in the
|
|
|
|
// copy to avoid mutating the current one.
|
|
|
|
peekTokenizer := vm.tokenizer
|
|
|
|
if !peekTokenizer.Next() {
|
|
|
|
// Note that due to the fact that all scripts are checked for parse
|
|
|
|
// failures before this code ever runs, there should never be an error
|
|
|
|
// here, but check again to be safe in case a refactor breaks that
|
|
|
|
// assumption or new script versions are introduced with different
|
|
|
|
// semantics.
|
|
|
|
if err := peekTokenizer.Err(); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note that this should be impossible to hit in practice because the
|
|
|
|
// only way it could happen would be for the final opcode of a script to
|
|
|
|
// already be parsed without the script index having been updated, which
|
|
|
|
// is not the case since stepping the script always increments the
|
|
|
|
// script index when parsing and executing the final opcode of a script.
|
|
|
|
//
|
|
|
|
// However, check again to be safe in case a refactor breaks that
|
|
|
|
// assumption or new script versions are introduced with different
|
|
|
|
// semantics.
|
|
|
|
str := fmt.Sprintf("program counter beyond script index %d (bytes %x)",
|
|
|
|
vm.scriptIdx, vm.scripts[vm.scriptIdx])
|
|
|
|
return "", scriptError(ErrInvalidProgramCounter, str)
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf strings.Builder
|
|
|
|
disasmOpcode(&buf, peekTokenizer.op, peekTokenizer.Data(), false)
|
|
|
|
return fmt.Sprintf("%02x:%04x: %s", vm.scriptIdx, vm.opcodeIdx,
|
|
|
|
buf.String()), nil
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// DisasmScript returns the disassembly string for the script at the requested
|
|
|
|
// offset index. Index 0 is the signature script and 1 is the public key
|
2019-03-13 07:12:58 +01:00
|
|
|
// script. In the case of pay-to-script-hash, index 2 is the redeem script once
|
|
|
|
// the execution has progressed far enough to have successfully verified script
|
|
|
|
// hash and thus add the script to the scripts to execute.
|
2015-04-29 20:08:56 +02:00
|
|
|
func (vm *Engine) DisasmScript(idx int) (string, error) {
|
|
|
|
if idx >= len(vm.scripts) {
|
2017-01-07 18:31:03 +01:00
|
|
|
str := fmt.Sprintf("script index %d >= total scripts %d", idx,
|
|
|
|
len(vm.scripts))
|
|
|
|
return "", scriptError(ErrInvalidIndex, str)
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
var disbuf strings.Builder
|
|
|
|
script := vm.scripts[idx]
|
|
|
|
tokenizer := MakeScriptTokenizer(vm.version, script)
|
|
|
|
var opcodeIdx int
|
|
|
|
for tokenizer.Next() {
|
|
|
|
disbuf.WriteString(fmt.Sprintf("%02x:%04x: ", idx, opcodeIdx))
|
|
|
|
disasmOpcode(&disbuf, tokenizer.op, tokenizer.Data(), false)
|
|
|
|
disbuf.WriteByte('\n')
|
|
|
|
opcodeIdx++
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
return disbuf.String(), tokenizer.Err()
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckErrorCondition returns nil if the running script has ended and was
|
2015-04-29 20:08:56 +02:00
|
|
|
// successful, leaving a a true boolean on the stack. An error otherwise,
|
2015-04-20 22:28:00 +02:00
|
|
|
// including if the script has not finished.
|
|
|
|
func (vm *Engine) CheckErrorCondition(finalScript bool) error {
|
2019-03-13 07:12:58 +01:00
|
|
|
// Check execution is actually done by ensuring the script index is after
|
|
|
|
// the final script in the array script.
|
2015-04-20 22:28:00 +02:00
|
|
|
if vm.scriptIdx < len(vm.scripts) {
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrScriptUnfinished,
|
|
|
|
"error check when script unfinished")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
2016-10-19 03:02:00 +02:00
|
|
|
|
2017-04-27 01:32:13 +02:00
|
|
|
// If we're in version zero witness execution mode, and this was the
|
|
|
|
// final script, then the stack MUST be clean in order to maintain
|
|
|
|
// compatibility with BIP16.
|
2022-01-07 02:28:30 +01:00
|
|
|
if finalScript && vm.isWitnessVersionActive(BaseSegwitWitnessVersion) &&
|
|
|
|
vm.dstack.Depth() != 1 {
|
2017-04-27 01:32:13 +02:00
|
|
|
return scriptError(ErrEvalFalse, "witness program must "+
|
|
|
|
"have clean stack")
|
2016-10-19 03:02:00 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// The final script must end with exactly one data stack item when the
|
|
|
|
// verify clean stack flag is set. Otherwise, there must be at least one
|
|
|
|
// data stack item in order to interpret it as a boolean.
|
2015-04-21 00:58:04 +02:00
|
|
|
if finalScript && vm.hasFlag(ScriptVerifyCleanStack) &&
|
|
|
|
vm.dstack.Depth() != 1 {
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
str := fmt.Sprintf("stack must contain exactly one item (contains %d)",
|
|
|
|
vm.dstack.Depth())
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrCleanStack, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
} else if vm.dstack.Depth() < 1 {
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrEmptyStack,
|
|
|
|
"stack empty at end of script execution")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
v, err := vm.dstack.PopBool()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-03 05:02:04 +01:00
|
|
|
if !v {
|
2015-04-29 20:08:56 +02:00
|
|
|
// Log interesting data.
|
2015-04-20 22:28:00 +02:00
|
|
|
log.Tracef("%v", newLogClosure(func() string {
|
2019-03-13 07:12:58 +01:00
|
|
|
var buf strings.Builder
|
|
|
|
buf.WriteString("scripts failed:\n")
|
|
|
|
for i := range vm.scripts {
|
|
|
|
dis, _ := vm.DisasmScript(i)
|
|
|
|
buf.WriteString(fmt.Sprintf("script%d:\n", i))
|
|
|
|
buf.WriteString(dis)
|
|
|
|
}
|
|
|
|
return buf.String()
|
2015-04-20 22:28:00 +02:00
|
|
|
}))
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrEvalFalse,
|
|
|
|
"false stack entry at end of script execution")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// Step executes the next instruction and moves the program counter to the next
|
|
|
|
// opcode in the script, or the next script if the current has ended. Step will
|
|
|
|
// return true in the case that the last opcode was successfully executed.
|
2015-04-29 20:08:56 +02:00
|
|
|
//
|
|
|
|
// The result of calling Step or any other method is undefined if an error is
|
|
|
|
// returned.
|
2015-04-20 22:28:00 +02:00
|
|
|
func (vm *Engine) Step() (done bool, err error) {
|
2019-03-13 07:12:58 +01:00
|
|
|
// Verify the engine is pointing to a valid program counter.
|
|
|
|
if err := vm.checkValidPC(); err != nil {
|
2015-04-20 22:28:00 +02:00
|
|
|
return true, err
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
// Attempt to parse the next opcode from the current script.
|
|
|
|
if !vm.tokenizer.Next() {
|
|
|
|
// Note that due to the fact that all scripts are checked for parse
|
|
|
|
// failures before this code ever runs, there should never be an error
|
|
|
|
// here, but check again to be safe in case a refactor breaks that
|
|
|
|
// assumption or new script versions are introduced with different
|
|
|
|
// semantics.
|
|
|
|
if err := vm.tokenizer.Err(); err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
str := fmt.Sprintf("attempt to step beyond script index %d (bytes %x)",
|
|
|
|
vm.scriptIdx, vm.scripts[vm.scriptIdx])
|
|
|
|
return true, scriptError(ErrInvalidProgramCounter, str)
|
|
|
|
}
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Execute the opcode while taking into account several things such as
|
2019-03-13 07:12:58 +01:00
|
|
|
// disabled opcodes, illegal opcodes, maximum allowed operations per script,
|
|
|
|
// maximum script element sizes, and conditionals.
|
2019-03-13 07:13:07 +01:00
|
|
|
err = vm.executeOpcode(vm.tokenizer.op, vm.tokenizer.Data())
|
2015-04-20 22:28:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// The number of elements in the combination of the data and alt stacks
|
|
|
|
// must not exceed the maximum number of stack elements allowed.
|
2017-01-07 18:31:03 +01:00
|
|
|
combinedStackSize := vm.dstack.Depth() + vm.astack.Depth()
|
|
|
|
if combinedStackSize > MaxStackSize {
|
|
|
|
str := fmt.Sprintf("combined stack size %d > max allowed %d",
|
|
|
|
combinedStackSize, MaxStackSize)
|
|
|
|
return false, scriptError(ErrStackOverflow, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Prepare for next instruction.
|
2019-03-13 07:12:58 +01:00
|
|
|
vm.opcodeIdx++
|
|
|
|
if vm.tokenizer.Done() {
|
|
|
|
// Illegal to have a conditional that straddles two scripts.
|
|
|
|
if len(vm.condStack) != 0 {
|
2017-01-07 18:31:03 +01:00
|
|
|
return false, scriptError(ErrUnbalancedConditional,
|
|
|
|
"end of script reached in conditional execution")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// Alt stack doesn't persist between scripts.
|
2015-04-20 22:28:00 +02:00
|
|
|
_ = vm.astack.DropN(vm.astack.Depth())
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// The number of operations is per script.
|
|
|
|
vm.numOps = 0
|
|
|
|
|
|
|
|
// Reset the opcode index for the next script.
|
|
|
|
vm.opcodeIdx = 0
|
|
|
|
|
|
|
|
// Advance to the next script as needed.
|
|
|
|
switch {
|
|
|
|
case vm.scriptIdx == 0 && vm.bip16:
|
2015-04-20 22:28:00 +02:00
|
|
|
vm.scriptIdx++
|
|
|
|
vm.savedFirstStack = vm.GetStack()
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
case vm.scriptIdx == 1 && vm.bip16:
|
2015-04-20 22:28:00 +02:00
|
|
|
// Put us past the end for CheckErrorCondition()
|
|
|
|
vm.scriptIdx++
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
// Check script ran successfully.
|
2015-04-20 22:28:00 +02:00
|
|
|
err := vm.CheckErrorCondition(false)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// Obtain the redeem script from the first stack and ensure it
|
|
|
|
// parses.
|
2015-04-20 22:28:00 +02:00
|
|
|
script := vm.savedFirstStack[len(vm.savedFirstStack)-1]
|
2019-03-13 07:12:58 +01:00
|
|
|
if err := checkScriptParses(vm.version, script); err != nil {
|
2015-04-20 22:28:00 +02:00
|
|
|
return false, err
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
vm.scripts = append(vm.scripts, script)
|
2015-04-29 20:08:56 +02:00
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// Set stack to be the stack from first script minus the redeem
|
2015-04-29 20:08:56 +02:00
|
|
|
// script itself
|
2015-04-20 22:28:00 +02:00
|
|
|
vm.SetStack(vm.savedFirstStack[:len(vm.savedFirstStack)-1])
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
case vm.scriptIdx == 1 && vm.witnessProgram != nil,
|
|
|
|
vm.scriptIdx == 2 && vm.witnessProgram != nil && vm.bip16: // np2sh
|
2017-04-27 01:32:13 +02:00
|
|
|
|
2016-10-19 03:02:00 +02:00
|
|
|
vm.scriptIdx++
|
|
|
|
|
|
|
|
witness := vm.tx.TxIn[vm.txIdx].Witness
|
|
|
|
if err := vm.verifyWitnessProgram(witness); err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
default:
|
2015-04-20 22:28:00 +02:00
|
|
|
vm.scriptIdx++
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
// Skip empty scripts.
|
|
|
|
if vm.scriptIdx < len(vm.scripts) && len(vm.scripts[vm.scriptIdx]) == 0 {
|
2015-04-20 22:28:00 +02:00
|
|
|
vm.scriptIdx++
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
vm.lastCodeSep = 0
|
2015-04-20 22:28:00 +02:00
|
|
|
if vm.scriptIdx >= len(vm.scripts) {
|
|
|
|
return true, nil
|
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
// Finally, update the current tokenizer used to parse through scripts
|
|
|
|
// one opcode at a time to start from the beginning of the new script
|
|
|
|
// associated with the program counter.
|
|
|
|
vm.tokenizer = MakeScriptTokenizer(vm.version, vm.scripts[vm.scriptIdx])
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Execute will execute all scripts in the script engine and return either nil
|
|
|
|
// for successful validation or an error if one occurred.
|
|
|
|
func (vm *Engine) Execute() (err error) {
|
2019-03-13 07:12:58 +01:00
|
|
|
// All script versions other than 0 currently execute without issue,
|
|
|
|
// making all outputs to them anyone can pay. In the future this
|
|
|
|
// will allow for the addition of new scripting languages.
|
|
|
|
if vm.version != 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
done := false
|
2016-11-03 05:02:04 +01:00
|
|
|
for !done {
|
2015-04-29 20:08:56 +02:00
|
|
|
log.Tracef("%v", newLogClosure(func() string {
|
|
|
|
dis, err := vm.DisasmPC()
|
|
|
|
if err != nil {
|
2019-03-13 07:12:58 +01:00
|
|
|
return fmt.Sprintf("stepping - failed to disasm pc: %v", err)
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
|
|
|
return fmt.Sprintf("stepping %v", dis)
|
|
|
|
}))
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
done, err = vm.Step()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Tracef("%v", newLogClosure(func() string {
|
|
|
|
var dstr, astr string
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// Log the non-empty stacks when tracing.
|
2015-04-29 20:08:56 +02:00
|
|
|
if vm.dstack.Depth() != 0 {
|
|
|
|
dstr = "Stack:\n" + vm.dstack.String()
|
|
|
|
}
|
|
|
|
if vm.astack.Depth() != 0 {
|
|
|
|
astr = "AltStack:\n" + vm.astack.String()
|
|
|
|
}
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
return dstr + astr
|
|
|
|
}))
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
return vm.CheckErrorCondition(true)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// subScript returns the script since the last OP_CODESEPARATOR.
|
2019-03-13 07:12:58 +01:00
|
|
|
func (vm *Engine) subScript() []byte {
|
2015-04-29 20:08:56 +02:00
|
|
|
return vm.scripts[vm.scriptIdx][vm.lastCodeSep:]
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkHashTypeEncoding returns whether or not the passed hashtype adheres to
|
|
|
|
// the strict encoding requirements if enabled.
|
|
|
|
func (vm *Engine) checkHashTypeEncoding(hashType SigHashType) error {
|
2015-04-21 00:58:04 +02:00
|
|
|
if !vm.hasFlag(ScriptVerifyStrictEncoding) {
|
2015-04-20 22:28:00 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sigHashType := hashType & ^SigHashAnyOneCanPay
|
|
|
|
if sigHashType < SigHashAll || sigHashType > SigHashSingle {
|
2017-01-07 18:31:03 +01:00
|
|
|
str := fmt.Sprintf("invalid hash type 0x%x", hashType)
|
|
|
|
return scriptError(ErrInvalidSigHashType, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:11:16 +01:00
|
|
|
// isStrictPubKeyEncoding returns whether or not the passed public key adheres
|
|
|
|
// to the strict encoding requirements.
|
|
|
|
func isStrictPubKeyEncoding(pubKey []byte) bool {
|
|
|
|
if len(pubKey) == 33 && (pubKey[0] == 0x02 || pubKey[0] == 0x03) {
|
|
|
|
// Compressed
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if len(pubKey) == 65 {
|
|
|
|
switch pubKey[0] {
|
|
|
|
case 0x04:
|
|
|
|
// Uncompressed
|
|
|
|
return true
|
|
|
|
|
|
|
|
case 0x06, 0x07:
|
|
|
|
// Hybrid
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
// checkPubKeyEncoding returns whether or not the passed public key adheres to
|
|
|
|
// the strict encoding requirements if enabled.
|
|
|
|
func (vm *Engine) checkPubKeyEncoding(pubKey []byte) error {
|
2017-04-27 01:32:13 +02:00
|
|
|
if vm.hasFlag(ScriptVerifyWitnessPubKeyType) &&
|
2022-01-07 02:28:30 +01:00
|
|
|
vm.isWitnessVersionActive(BaseSegwitWitnessVersion) &&
|
|
|
|
!btcec.IsCompressedPubKey(pubKey) {
|
2017-04-27 01:32:13 +02:00
|
|
|
|
2020-01-12 23:11:46 +01:00
|
|
|
str := "only compressed keys are accepted post-segwit"
|
2017-04-27 01:32:13 +02:00
|
|
|
return scriptError(ErrWitnessPubKeyType, str)
|
|
|
|
}
|
|
|
|
|
2015-04-21 00:58:04 +02:00
|
|
|
if !vm.hasFlag(ScriptVerifyStrictEncoding) {
|
2015-04-20 22:28:00 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(pubKey) == 33 && (pubKey[0] == 0x02 || pubKey[0] == 0x03) {
|
|
|
|
// Compressed
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if len(pubKey) == 65 && pubKey[0] == 0x04 {
|
|
|
|
// Uncompressed
|
|
|
|
return nil
|
|
|
|
}
|
2017-04-27 01:32:13 +02:00
|
|
|
|
2017-01-07 18:31:03 +01:00
|
|
|
return scriptError(ErrPubKeyType, "unsupported public key type")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkSignatureEncoding returns whether or not the passed signature adheres to
|
|
|
|
// the strict encoding requirements if enabled.
|
|
|
|
func (vm *Engine) checkSignatureEncoding(sig []byte) error {
|
2015-04-21 00:58:04 +02:00
|
|
|
if !vm.hasFlag(ScriptVerifyDERSignatures) &&
|
|
|
|
!vm.hasFlag(ScriptVerifyLowS) &&
|
|
|
|
!vm.hasFlag(ScriptVerifyStrictEncoding) {
|
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// The format of a DER encoded signature is as follows:
|
|
|
|
//
|
|
|
|
// 0x30 <total length> 0x02 <length of R> <R> 0x02 <length of S> <S>
|
|
|
|
// - 0x30 is the ASN.1 identifier for a sequence
|
|
|
|
// - Total length is 1 byte and specifies length of all remaining data
|
|
|
|
// - 0x02 is the ASN.1 identifier that specifies an integer follows
|
|
|
|
// - Length of R is 1 byte and specifies how many bytes R occupies
|
|
|
|
// - R is the arbitrary length big-endian encoded number which
|
|
|
|
// represents the R value of the signature. DER encoding dictates
|
|
|
|
// that the value must be encoded using the minimum possible number
|
|
|
|
// of bytes. This implies the first byte can only be null if the
|
|
|
|
// highest bit of the next byte is set in order to prevent it from
|
|
|
|
// being interpreted as a negative number.
|
|
|
|
// - 0x02 is once again the ASN.1 integer identifier
|
|
|
|
// - Length of S is 1 byte and specifies how many bytes S occupies
|
|
|
|
// - S is the arbitrary length big-endian encoded number which
|
|
|
|
// represents the S value of the signature. The encoding rules are
|
|
|
|
// identical as those for R.
|
2018-08-23 06:37:27 +02:00
|
|
|
const (
|
|
|
|
asn1SequenceID = 0x30
|
|
|
|
asn1IntegerID = 0x02
|
|
|
|
|
|
|
|
// minSigLen is the minimum length of a DER encoded signature and is
|
|
|
|
// when both R and S are 1 byte each.
|
|
|
|
//
|
|
|
|
// 0x30 + <1-byte> + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte>
|
|
|
|
minSigLen = 8
|
|
|
|
|
|
|
|
// maxSigLen is the maximum length of a DER encoded signature and is
|
|
|
|
// when both R and S are 33 bytes each. It is 33 bytes because a
|
|
|
|
// 256-bit integer requires 32 bytes and an additional leading null byte
|
|
|
|
// might required if the high bit is set in the value.
|
|
|
|
//
|
|
|
|
// 0x30 + <1-byte> + 0x02 + 0x21 + <33 bytes> + 0x2 + 0x21 + <33 bytes>
|
|
|
|
maxSigLen = 72
|
|
|
|
|
|
|
|
// sequenceOffset is the byte offset within the signature of the
|
|
|
|
// expected ASN.1 sequence identifier.
|
|
|
|
sequenceOffset = 0
|
|
|
|
|
|
|
|
// dataLenOffset is the byte offset within the signature of the expected
|
|
|
|
// total length of all remaining data in the signature.
|
|
|
|
dataLenOffset = 1
|
|
|
|
|
|
|
|
// rTypeOffset is the byte offset within the signature of the ASN.1
|
|
|
|
// identifier for R and is expected to indicate an ASN.1 integer.
|
|
|
|
rTypeOffset = 2
|
|
|
|
|
|
|
|
// rLenOffset is the byte offset within the signature of the length of
|
|
|
|
// R.
|
|
|
|
rLenOffset = 3
|
|
|
|
|
|
|
|
// rOffset is the byte offset within the signature of R.
|
|
|
|
rOffset = 4
|
|
|
|
)
|
|
|
|
|
|
|
|
// The signature must adhere to the minimum and maximum allowed length.
|
|
|
|
sigLen := len(sig)
|
|
|
|
if sigLen < minSigLen {
|
|
|
|
str := fmt.Sprintf("malformed signature: too short: %d < %d", sigLen,
|
|
|
|
minSigLen)
|
|
|
|
return scriptError(ErrSigTooShort, str)
|
|
|
|
}
|
|
|
|
if sigLen > maxSigLen {
|
|
|
|
str := fmt.Sprintf("malformed signature: too long: %d > %d", sigLen,
|
|
|
|
maxSigLen)
|
|
|
|
return scriptError(ErrSigTooLong, str)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The signature must start with the ASN.1 sequence identifier.
|
|
|
|
if sig[sequenceOffset] != asn1SequenceID {
|
|
|
|
str := fmt.Sprintf("malformed signature: format has wrong type: %#x",
|
|
|
|
sig[sequenceOffset])
|
|
|
|
return scriptError(ErrSigInvalidSeqID, str)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The signature must indicate the correct amount of data for all elements
|
|
|
|
// related to R and S.
|
|
|
|
if int(sig[dataLenOffset]) != sigLen-2 {
|
2017-01-07 18:31:03 +01:00
|
|
|
str := fmt.Sprintf("malformed signature: bad length: %d != %d",
|
2018-08-23 06:37:27 +02:00
|
|
|
sig[dataLenOffset], sigLen-2)
|
|
|
|
return scriptError(ErrSigInvalidDataLen, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// Calculate the offsets of the elements related to S and ensure S is inside
|
|
|
|
// the signature.
|
|
|
|
//
|
|
|
|
// rLen specifies the length of the big-endian encoded number which
|
|
|
|
// represents the R value of the signature.
|
|
|
|
//
|
|
|
|
// sTypeOffset is the offset of the ASN.1 identifier for S and, like its R
|
|
|
|
// counterpart, is expected to indicate an ASN.1 integer.
|
|
|
|
//
|
|
|
|
// sLenOffset and sOffset are the byte offsets within the signature of the
|
|
|
|
// length of S and S itself, respectively.
|
|
|
|
rLen := int(sig[rLenOffset])
|
|
|
|
sTypeOffset := rOffset + rLen
|
|
|
|
sLenOffset := sTypeOffset + 1
|
|
|
|
if sTypeOffset >= sigLen {
|
|
|
|
str := "malformed signature: S type indicator missing"
|
|
|
|
return scriptError(ErrSigMissingSTypeID, str)
|
|
|
|
}
|
|
|
|
if sLenOffset >= sigLen {
|
|
|
|
str := "malformed signature: S length missing"
|
|
|
|
return scriptError(ErrSigMissingSLen, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// The lengths of R and S must match the overall length of the signature.
|
|
|
|
//
|
|
|
|
// sLen specifies the length of the big-endian encoded number which
|
|
|
|
// represents the S value of the signature.
|
|
|
|
sOffset := sLenOffset + 1
|
|
|
|
sLen := int(sig[sLenOffset])
|
|
|
|
if sOffset+sLen != sigLen {
|
|
|
|
str := "malformed signature: invalid S length"
|
|
|
|
return scriptError(ErrSigInvalidSLen, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// R elements must be ASN.1 integers.
|
|
|
|
if sig[rTypeOffset] != asn1IntegerID {
|
|
|
|
str := fmt.Sprintf("malformed signature: R integer marker: %#x != %#x",
|
|
|
|
sig[rTypeOffset], asn1IntegerID)
|
|
|
|
return scriptError(ErrSigInvalidRIntID, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Zero-length integers are not allowed for R.
|
2015-04-20 22:28:00 +02:00
|
|
|
if rLen == 0 {
|
2018-08-23 06:37:27 +02:00
|
|
|
str := "malformed signature: R length is zero"
|
|
|
|
return scriptError(ErrSigZeroRLen, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// R must not be negative.
|
2018-08-23 06:37:27 +02:00
|
|
|
if sig[rOffset]&0x80 != 0 {
|
|
|
|
str := "malformed signature: R is negative"
|
|
|
|
return scriptError(ErrSigNegativeR, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// Null bytes at the start of R are not allowed, unless R would otherwise be
|
|
|
|
// interpreted as a negative number.
|
|
|
|
if rLen > 1 && sig[rOffset] == 0x00 && sig[rOffset+1]&0x80 == 0 {
|
|
|
|
str := "malformed signature: R value has too much padding"
|
|
|
|
return scriptError(ErrSigTooMuchRPadding, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// S elements must be ASN.1 integers.
|
|
|
|
if sig[sTypeOffset] != asn1IntegerID {
|
|
|
|
str := fmt.Sprintf("malformed signature: S integer marker: %#x != %#x",
|
|
|
|
sig[sTypeOffset], asn1IntegerID)
|
|
|
|
return scriptError(ErrSigInvalidSIntID, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Zero-length integers are not allowed for S.
|
2015-04-20 22:28:00 +02:00
|
|
|
if sLen == 0 {
|
2018-08-23 06:37:27 +02:00
|
|
|
str := "malformed signature: S length is zero"
|
|
|
|
return scriptError(ErrSigZeroSLen, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// S must not be negative.
|
2018-08-23 06:37:27 +02:00
|
|
|
if sig[sOffset]&0x80 != 0 {
|
|
|
|
str := "malformed signature: S is negative"
|
|
|
|
return scriptError(ErrSigNegativeS, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// Null bytes at the start of S are not allowed, unless S would otherwise be
|
|
|
|
// interpreted as a negative number.
|
|
|
|
if sLen > 1 && sig[sOffset] == 0x00 && sig[sOffset+1]&0x80 == 0 {
|
|
|
|
str := "malformed signature: S value has too much padding"
|
|
|
|
return scriptError(ErrSigTooMuchSPadding, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:37:27 +02:00
|
|
|
// Verify the S value is <= half the order of the curve. This check is done
|
|
|
|
// because when it is higher, the complement modulo the order can be used
|
|
|
|
// instead which is a shorter encoding by 1 byte. Further, without
|
|
|
|
// enforcing this, it is possible to replace a signature in a valid
|
|
|
|
// transaction with the complement while still being a valid signature that
|
|
|
|
// verifies. This would result in changing the transaction hash and thus is
|
|
|
|
// a source of malleability.
|
2015-04-21 00:58:04 +02:00
|
|
|
if vm.hasFlag(ScriptVerifyLowS) {
|
2018-08-23 06:37:27 +02:00
|
|
|
sValue := new(big.Int).SetBytes(sig[sOffset : sOffset+sLen])
|
2015-04-20 22:28:00 +02:00
|
|
|
if sValue.Cmp(halfOrder) > 0 {
|
2018-08-23 06:37:27 +02:00
|
|
|
return scriptError(ErrSigHighS, "signature is not canonical due "+
|
|
|
|
"to unnecessarily high S value")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getStack returns the contents of stack as a byte array bottom up
|
2015-04-25 23:19:43 +02:00
|
|
|
func getStack(stack *stack) [][]byte {
|
2015-04-20 22:28:00 +02:00
|
|
|
array := make([][]byte, stack.Depth())
|
|
|
|
for i := range array {
|
|
|
|
// PeekByteArry can't fail due to overflow, already checked
|
2015-04-30 03:16:00 +02:00
|
|
|
array[len(array)-i-1], _ = stack.PeekByteArray(int32(i))
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
return array
|
|
|
|
}
|
|
|
|
|
|
|
|
// setStack sets the stack to the contents of the array where the last item in
|
|
|
|
// the array is the top item in the stack.
|
2015-04-25 23:19:43 +02:00
|
|
|
func setStack(stack *stack, data [][]byte) {
|
2015-04-20 22:28:00 +02:00
|
|
|
// This can not error. Only errors are for invalid arguments.
|
|
|
|
_ = stack.DropN(stack.Depth())
|
|
|
|
|
|
|
|
for i := range data {
|
|
|
|
stack.PushByteArray(data[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetStack returns the contents of the primary stack as an array. where the
|
|
|
|
// last item in the array is the top of the stack.
|
|
|
|
func (vm *Engine) GetStack() [][]byte {
|
|
|
|
return getStack(&vm.dstack)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStack sets the contents of the primary stack to the contents of the
|
|
|
|
// provided array where the last item in the array will be the top of the stack.
|
|
|
|
func (vm *Engine) SetStack(data [][]byte) {
|
|
|
|
setStack(&vm.dstack, data)
|
|
|
|
}
|
|
|
|
|
2016-04-11 21:22:25 +02:00
|
|
|
// GetAltStack returns the contents of the alternate stack as an array where the
|
2015-04-20 22:28:00 +02:00
|
|
|
// last item in the array is the top of the stack.
|
|
|
|
func (vm *Engine) GetAltStack() [][]byte {
|
|
|
|
return getStack(&vm.astack)
|
|
|
|
}
|
|
|
|
|
2016-04-11 21:22:25 +02:00
|
|
|
// SetAltStack sets the contents of the alternate stack to the contents of the
|
2015-04-20 22:28:00 +02:00
|
|
|
// provided array where the last item in the array will be the top of the stack.
|
|
|
|
func (vm *Engine) SetAltStack(data [][]byte) {
|
|
|
|
setStack(&vm.astack, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewEngine returns a new script engine for the provided public key script,
|
|
|
|
// transaction, and input index. The flags modify the behavior of the script
|
|
|
|
// engine according to the description provided by each flag.
|
2016-10-19 03:02:00 +02:00
|
|
|
func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, flags ScriptFlags,
|
2022-01-07 02:15:57 +01:00
|
|
|
sigCache *SigCache, hashCache *TxSigHashes, inputAmount int64,
|
2022-01-07 02:28:30 +01:00
|
|
|
prevOutFetcher PrevOutputFetcher) (*Engine, error) {
|
2022-01-07 02:15:57 +01:00
|
|
|
|
2019-03-13 07:12:22 +01:00
|
|
|
const scriptVersion = 0
|
2016-10-19 03:02:00 +02:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// The provided transaction input index must refer to a valid input.
|
2015-04-20 22:28:00 +02:00
|
|
|
if txIdx < 0 || txIdx >= len(tx.TxIn) {
|
2017-01-07 18:31:03 +01:00
|
|
|
str := fmt.Sprintf("transaction input index %d is negative or "+
|
|
|
|
">= %d", txIdx, len(tx.TxIn))
|
|
|
|
return nil, scriptError(ErrInvalidIndex, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
scriptSig := tx.TxIn[txIdx].SignatureScript
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// When both the signature script and public key script are empty the result
|
|
|
|
// is necessarily an error since the stack would end up being empty which is
|
|
|
|
// equivalent to a false top element. Thus, just return the relevant error
|
|
|
|
// now as an optimization.
|
2017-01-07 18:31:03 +01:00
|
|
|
if len(scriptSig) == 0 && len(scriptPubKey) == 0 {
|
|
|
|
return nil, scriptError(ErrEvalFalse,
|
|
|
|
"false stack entry at end of script execution")
|
|
|
|
}
|
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// The clean stack flag (ScriptVerifyCleanStack) is not allowed without
|
2018-01-26 05:37:50 +01:00
|
|
|
// either the pay-to-script-hash (P2SH) evaluation (ScriptBip16)
|
2016-10-19 02:45:40 +02:00
|
|
|
// flag or the Segregated Witness (ScriptVerifyWitness) flag.
|
2015-04-29 20:08:56 +02:00
|
|
|
//
|
|
|
|
// Recall that evaluating a P2SH script without the flag set results in
|
2016-10-19 02:45:40 +02:00
|
|
|
// non-P2SH evaluation which leaves the P2SH inputs on the stack.
|
|
|
|
// Thus, allowing the clean stack flag without the P2SH flag would make
|
|
|
|
// it possible to have a situation where P2SH would not be a soft fork
|
|
|
|
// when it should be. The same goes for segwit which will pull in
|
|
|
|
// additional scripts for execution from the witness stack.
|
2022-01-07 02:15:57 +01:00
|
|
|
vm := Engine{
|
|
|
|
flags: flags,
|
|
|
|
sigCache: sigCache,
|
|
|
|
hashCache: hashCache,
|
|
|
|
inputAmount: inputAmount,
|
2022-01-07 02:28:30 +01:00
|
|
|
prevOutFetcher: prevOutFetcher,
|
2022-01-07 02:15:57 +01:00
|
|
|
}
|
2016-10-19 02:45:40 +02:00
|
|
|
if vm.hasFlag(ScriptVerifyCleanStack) && (!vm.hasFlag(ScriptBip16) &&
|
|
|
|
!vm.hasFlag(ScriptVerifyWitness)) {
|
2017-01-07 18:31:03 +01:00
|
|
|
return nil, scriptError(ErrInvalidFlags,
|
|
|
|
"invalid flags combination")
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// The signature script must only contain data pushes when the
|
|
|
|
// associated flag is set.
|
2015-04-21 00:58:04 +02:00
|
|
|
if vm.hasFlag(ScriptVerifySigPushOnly) && !IsPushOnlyScript(scriptSig) {
|
2017-01-07 18:31:03 +01:00
|
|
|
return nil, scriptError(ErrNotPushOnly,
|
|
|
|
"signature script is not push only")
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
|
|
|
|
2019-04-19 09:50:54 +02:00
|
|
|
// The signature script must only contain data pushes for PS2H which is
|
|
|
|
// determined based on the form of the public key script.
|
|
|
|
if vm.hasFlag(ScriptBip16) && isScriptHashScript(scriptPubKey) {
|
|
|
|
// Only accept input scripts that push data for P2SH.
|
|
|
|
// Notice that the push only checks have already been done when
|
|
|
|
// the flag to verify signature scripts are push only is set
|
|
|
|
// above, so avoid checking again.
|
|
|
|
alreadyChecked := vm.hasFlag(ScriptVerifySigPushOnly)
|
|
|
|
if !alreadyChecked && !IsPushOnlyScript(scriptSig) {
|
|
|
|
return nil, scriptError(ErrNotPushOnly,
|
|
|
|
"pay to script hash is not push only")
|
|
|
|
}
|
|
|
|
vm.bip16 = true
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// The engine stores the scripts using a slice. This allows multiple
|
|
|
|
// scripts to be executed in sequence. For example, with a
|
|
|
|
// pay-to-script-hash transaction, there will be ultimately be a third
|
|
|
|
// script to execute.
|
2015-04-20 22:28:00 +02:00
|
|
|
scripts := [][]byte{scriptSig, scriptPubKey}
|
2019-03-13 07:12:58 +01:00
|
|
|
for _, scr := range scripts {
|
2017-01-07 18:31:03 +01:00
|
|
|
if len(scr) > MaxScriptSize {
|
2019-03-13 07:12:58 +01:00
|
|
|
str := fmt.Sprintf("script size %d is larger than max allowed "+
|
|
|
|
"size %d", len(scr), MaxScriptSize)
|
2017-01-07 18:31:03 +01:00
|
|
|
return nil, scriptError(ErrScriptTooBig, str)
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
|
|
|
|
const scriptVersion = 0
|
|
|
|
if err := checkScriptParses(scriptVersion, scr); err != nil {
|
2015-04-20 22:28:00 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-04-29 20:08:56 +02:00
|
|
|
}
|
2019-03-13 07:12:58 +01:00
|
|
|
vm.scripts = scripts
|
2015-04-20 22:28:00 +02:00
|
|
|
|
2015-04-29 20:08:56 +02:00
|
|
|
// Advance the program counter to the public key script if the signature
|
2019-03-13 07:12:58 +01:00
|
|
|
// script is empty since there is nothing to execute for it in that case.
|
|
|
|
if len(scriptSig) == 0 {
|
2015-04-29 20:08:56 +02:00
|
|
|
vm.scriptIdx++
|
2015-04-20 22:28:00 +02:00
|
|
|
}
|
2015-04-21 00:58:04 +02:00
|
|
|
if vm.hasFlag(ScriptVerifyMinimalData) {
|
2015-04-20 22:28:00 +02:00
|
|
|
vm.dstack.verifyMinimalData = true
|
|
|
|
vm.astack.verifyMinimalData = true
|
|
|
|
}
|
|
|
|
|
2016-10-19 02:32:50 +02:00
|
|
|
// Check to see if we should execute in witness verification mode
|
|
|
|
// according to the set flags. We check both the pkScript, and sigScript
|
|
|
|
// here since in the case of nested p2sh, the scriptSig will be a valid
|
|
|
|
// witness program. For nested p2sh, all the bytes after the first data
|
|
|
|
// push should *exactly* match the witness program template.
|
|
|
|
if vm.hasFlag(ScriptVerifyWitness) {
|
|
|
|
// If witness evaluation is enabled, then P2SH MUST also be
|
|
|
|
// active.
|
|
|
|
if !vm.hasFlag(ScriptBip16) {
|
|
|
|
errStr := "P2SH must be enabled to do witness verification"
|
|
|
|
return nil, scriptError(ErrInvalidFlags, errStr)
|
|
|
|
}
|
|
|
|
|
|
|
|
var witProgram []byte
|
|
|
|
|
|
|
|
switch {
|
2019-03-13 07:12:58 +01:00
|
|
|
case IsWitnessProgram(vm.scripts[1]):
|
2016-10-19 02:32:50 +02:00
|
|
|
// The scriptSig must be *empty* for all native witness
|
|
|
|
// programs, otherwise we introduce malleability.
|
|
|
|
if len(scriptSig) != 0 {
|
|
|
|
errStr := "native witness program cannot " +
|
|
|
|
"also have a signature script"
|
|
|
|
return nil, scriptError(ErrWitnessMalleated, errStr)
|
|
|
|
}
|
|
|
|
|
|
|
|
witProgram = scriptPubKey
|
|
|
|
case len(tx.TxIn[txIdx].Witness) != 0 && vm.bip16:
|
|
|
|
// The sigScript MUST be *exactly* a single canonical
|
|
|
|
// data push of the witness program, otherwise we
|
|
|
|
// reintroduce malleability.
|
2016-10-19 03:02:00 +02:00
|
|
|
sigPops := vm.scripts[0]
|
2019-03-13 07:12:58 +01:00
|
|
|
if len(sigPops) > 2 &&
|
|
|
|
isCanonicalPush(sigPops[0], sigPops[1:]) &&
|
|
|
|
IsWitnessProgram(sigPops[1:]) {
|
2016-10-19 03:02:00 +02:00
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
witProgram = sigPops[1:]
|
2016-10-19 02:32:50 +02:00
|
|
|
} else {
|
|
|
|
errStr := "signature script for witness " +
|
|
|
|
"nested p2sh is not canonical"
|
|
|
|
return nil, scriptError(ErrWitnessMalleatedP2SH, errStr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if witProgram != nil {
|
|
|
|
var err error
|
|
|
|
vm.witnessVersion, vm.witnessProgram, err = ExtractWitnessProgramInfo(witProgram)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we didn't find a witness program in either the
|
|
|
|
// pkScript or as a datapush within the sigScript, then
|
|
|
|
// there MUST NOT be any witness data associated with
|
|
|
|
// the input being validated.
|
2017-04-27 01:32:13 +02:00
|
|
|
if vm.witnessProgram == nil && len(tx.TxIn[txIdx].Witness) != 0 {
|
2016-10-19 02:32:50 +02:00
|
|
|
errStr := "non-witness inputs cannot have a witness"
|
|
|
|
return nil, scriptError(ErrWitnessUnexpected, errStr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-03-13 07:12:58 +01:00
|
|
|
// Setup the current tokenizer used to parse through the script one opcode
|
|
|
|
// at a time with the script associated with the program counter.
|
|
|
|
vm.tokenizer = MakeScriptTokenizer(scriptVersion, scripts[vm.scriptIdx])
|
|
|
|
|
2015-04-20 22:28:00 +02:00
|
|
|
vm.tx = *tx
|
|
|
|
vm.txIdx = txIdx
|
|
|
|
|
|
|
|
return &vm, nil
|
|
|
|
}
|