remove repetitive words

Signed-off-by: cuinix <915115094@qq.com>
This commit is contained in:
cuinix 2024-03-07 14:05:47 +08:00
parent 716c6dddd8
commit 60bc30dd08
15 changed files with 21 additions and 21 deletions

View File

@ -48,7 +48,7 @@ func LazyAdd() SchedulerOption {
// set of Requests, executes them, and returns the error from the operation. // set of Requests, executes them, and returns the error from the operation.
type Scheduler interface { type Scheduler interface {
// Execute schedules a Request for execution with the next available // Execute schedules a Request for execution with the next available
// batch. This method blocks until the the underlying closure has been // batch. This method blocks until the underlying closure has been
// run against the database. The resulting error is returned to the // run against the database. The resulting error is returned to the
// caller. // caller.
Execute(req *Request) error Execute(req *Request) error

View File

@ -57,7 +57,7 @@ func LoadCert(certPath, keyPath string) (tls.Certificate, *x509.Certificate,
return tls.Certificate{}, nil, err return tls.Certificate{}, nil, err
} }
// Now parse the the PEM block of the certificate into its x509 data // Now parse the PEM block of the certificate into its x509 data
// structure so it can be examined in more detail. // structure so it can be examined in more detail.
x509Cert, err := x509.ParseCertificate(certData.Certificate[0]) x509Cert, err := x509.ParseCertificate(certData.Certificate[0])
if err != nil { if err != nil {
@ -82,7 +82,7 @@ func LoadCertFromBytes(certBytes, keyBytes []byte) (tls.Certificate,
return tls.Certificate{}, nil, err return tls.Certificate{}, nil, err
} }
// Now parse the the PEM block of the certificate into its x509 data // Now parse the PEM block of the certificate into its x509 data
// structure so it can be examined in more detail. // structure so it can be examined in more detail.
x509Cert, err := x509.ParseCertificate(certData.Certificate[0]) x509Cert, err := x509.ParseCertificate(certData.Certificate[0])
if err != nil { if err != nil {

View File

@ -103,7 +103,7 @@ func (e *etcdLeaderElector) Leader(ctx context.Context) (string, error) {
} }
// Campaign will start a new leader election campaign. Campaign will block until // Campaign will start a new leader election campaign. Campaign will block until
// the elector context is canceled or the the caller is elected as the leader. // the elector context is canceled or the caller is elected as the leader.
func (e *etcdLeaderElector) Campaign(ctx context.Context) error { func (e *etcdLeaderElector) Campaign(ctx context.Context) error {
return e.election.Campaign(ctx, e.id) return e.election.Campaign(ctx, e.id)
} }

View File

@ -1469,7 +1469,7 @@ func (s *Switch) failAddPacket(packet *htlcPacket, failure *LinkError) error {
log.Error(failure.Error()) log.Error(failure.Error())
// Create a failure packet for this htlc. The the full set of // Create a failure packet for this htlc. The full set of
// information about the htlc failure is included so that they can // information about the htlc failure is included so that they can
// be included in link failure notifications. // be included in link failure notifications.
failPkt := &htlcPacket{ failPkt := &htlcPacket{

View File

@ -129,7 +129,7 @@ func NewInvoiceExpiryWatcher(clock clock.Clock,
} }
} }
// Start starts the the subscription handler and the main loop. Start() will // Start starts the subscription handler and the main loop. Start() will
// return with error if InvoiceExpiryWatcher is already started. Start() // return with error if InvoiceExpiryWatcher is already started. Start()
// expects a cancellation function passed that will be use to cancel expired // expects a cancellation function passed that will be use to cancel expired
// invoices by their payment hash. // invoices by their payment hash.

View File

@ -1170,7 +1170,7 @@ func (s *stm) Rollback() {
} }
// rollback will reset the read and write sets. If clearReadSet is false we'll // rollback will reset the read and write sets. If clearReadSet is false we'll
// only reset the the write set. // only reset the write set.
func (s *stm) rollback(clearReadSet bool) { func (s *stm) rollback(clearReadSet bool) {
if clearReadSet { if clearReadSet {
s.rset.clear() s.rset.clear()

View File

@ -1231,7 +1231,7 @@ type MuSig2CombineKeysRequest struct {
// session. The list will always be sorted lexicographically internally. This // session. The list will always be sorted lexicographically internally. This
// must include the local key which is described by the above key_loc. // must include the local key which is described by the above key_loc.
AllSignerPubkeys [][]byte `protobuf:"bytes,1,rep,name=all_signer_pubkeys,json=allSignerPubkeys,proto3" json:"all_signer_pubkeys,omitempty"` AllSignerPubkeys [][]byte `protobuf:"bytes,1,rep,name=all_signer_pubkeys,json=allSignerPubkeys,proto3" json:"all_signer_pubkeys,omitempty"`
// A series of optional generic tweaks to be applied to the the aggregated // A series of optional generic tweaks to be applied to the aggregated
// public key. // public key.
Tweaks []*TweakDesc `protobuf:"bytes,2,rep,name=tweaks,proto3" json:"tweaks,omitempty"` Tweaks []*TweakDesc `protobuf:"bytes,2,rep,name=tweaks,proto3" json:"tweaks,omitempty"`
// An optional taproot specific tweak that must be specified if the MuSig2 // An optional taproot specific tweak that must be specified if the MuSig2
@ -1391,7 +1391,7 @@ type MuSig2SessionRequest struct {
// An optional list of all public nonces of other signing participants that // An optional list of all public nonces of other signing participants that
// might already be known. // might already be known.
OtherSignerPublicNonces [][]byte `protobuf:"bytes,3,rep,name=other_signer_public_nonces,json=otherSignerPublicNonces,proto3" json:"other_signer_public_nonces,omitempty"` OtherSignerPublicNonces [][]byte `protobuf:"bytes,3,rep,name=other_signer_public_nonces,json=otherSignerPublicNonces,proto3" json:"other_signer_public_nonces,omitempty"`
// A series of optional generic tweaks to be applied to the the aggregated // A series of optional generic tweaks to be applied to the aggregated
// public key. // public key.
Tweaks []*TweakDesc `protobuf:"bytes,4,rep,name=tweaks,proto3" json:"tweaks,omitempty"` Tweaks []*TweakDesc `protobuf:"bytes,4,rep,name=tweaks,proto3" json:"tweaks,omitempty"`
// An optional taproot specific tweak that must be specified if the MuSig2 // An optional taproot specific tweak that must be specified if the MuSig2

View File

@ -487,7 +487,7 @@ message MuSig2CombineKeysRequest {
repeated bytes all_signer_pubkeys = 1; repeated bytes all_signer_pubkeys = 1;
/* /*
A series of optional generic tweaks to be applied to the the aggregated A series of optional generic tweaks to be applied to the aggregated
public key. public key.
*/ */
repeated TweakDesc tweaks = 2; repeated TweakDesc tweaks = 2;
@ -551,7 +551,7 @@ message MuSig2SessionRequest {
repeated bytes other_signer_public_nonces = 3; repeated bytes other_signer_public_nonces = 3;
/* /*
A series of optional generic tweaks to be applied to the the aggregated A series of optional generic tweaks to be applied to the aggregated
public key. public key.
*/ */
repeated TweakDesc tweaks = 4; repeated TweakDesc tweaks = 4;

View File

@ -509,7 +509,7 @@
"items": { "items": {
"$ref": "#/definitions/signrpcTweakDesc" "$ref": "#/definitions/signrpcTweakDesc"
}, },
"description": "A series of optional generic tweaks to be applied to the the aggregated\npublic key." "description": "A series of optional generic tweaks to be applied to the aggregated\npublic key."
}, },
"taproot_tweak": { "taproot_tweak": {
"$ref": "#/definitions/signrpcTaprootTweakDesc", "$ref": "#/definitions/signrpcTaprootTweakDesc",
@ -627,7 +627,7 @@
"items": { "items": {
"$ref": "#/definitions/signrpcTweakDesc" "$ref": "#/definitions/signrpcTweakDesc"
}, },
"description": "A series of optional generic tweaks to be applied to the the aggregated\npublic key." "description": "A series of optional generic tweaks to be applied to the aggregated\npublic key."
}, },
"taproot_tweak": { "taproot_tweak": {
"$ref": "#/definitions/signrpcTaprootTweakDesc", "$ref": "#/definitions/signrpcTaprootTweakDesc",

View File

@ -13,7 +13,7 @@ const (
FeePerKwFloor SatPerKWeight = 253 FeePerKwFloor SatPerKWeight = 253
// AbsoluteFeePerKwFloor is the lowest fee rate in sat/kw of a // AbsoluteFeePerKwFloor is the lowest fee rate in sat/kw of a
// transaction that we should ever _create_. This is the the equivalent // transaction that we should ever _create_. This is the equivalent
// of 1 sat/byte in sat/kw. // of 1 sat/byte in sat/kw.
AbsoluteFeePerKwFloor SatPerKWeight = 250 AbsoluteFeePerKwFloor SatPerKWeight = 250
) )

View File

@ -307,7 +307,7 @@ func CoinSelectSubtractFees(feeRate chainfee.SatPerKWeight, amt,
outputAmt := totalSat - requiredFeeNoChange outputAmt := totalSat - requiredFeeNoChange
changeAmt := btcutil.Amount(0) changeAmt := btcutil.Amount(0)
// If the the output is too small after subtracting the fee, the coin // If the output is too small after subtracting the fee, the coin
// selection cannot be performed with an amount this small. // selection cannot be performed with an amount this small.
if outputAmt < dustLimit { if outputAmt < dustLimit {
return nil, 0, 0, fmt.Errorf("output amount(%v) after "+ return nil, 0, 0, fmt.Errorf("output amount(%v) after "+

View File

@ -29,7 +29,7 @@ func (pq priorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i] pq[i], pq[j] = pq[j], pq[i]
} }
// Push adds a new item the the priorityQueue. // Push adds a new item the priorityQueue.
func (pq *priorityQueue) Push(x interface{}) { func (pq *priorityQueue) Push(x interface{}) {
item := x.(PriorityQueueItem) item := x.(PriorityQueueItem)
*pq = append(*pq, item) *pq = append(*pq, item)

View File

@ -1205,7 +1205,7 @@
; `Payment_In_FLIGHT` will be sent for compatibility concerns. ; `Payment_In_FLIGHT` will be sent for compatibility concerns.
; routerrpc.usestatusinitiated=false ; routerrpc.usestatusinitiated=false
; Defines the the maximum duration that the probing fee estimation is allowed to ; Defines the maximum duration that the probing fee estimation is allowed to
; take. ; take.
; routerrpc.fee-estimation-timeout=1m ; routerrpc.fee-estimation-timeout=1m

View File

@ -292,7 +292,7 @@ type UtxoSweeperConfig struct {
// sweeps, how many blocks to wait before retrying to sweep. // sweeps, how many blocks to wait before retrying to sweep.
NextAttemptDeltaFunc func(int) int32 NextAttemptDeltaFunc func(int) int32
// MaxFeeRate is the the maximum fee rate allowed within the // MaxFeeRate is the maximum fee rate allowed within the
// UtxoSweeper. // UtxoSweeper.
MaxFeeRate chainfee.SatPerVByte MaxFeeRate chainfee.SatPerVByte
@ -794,7 +794,7 @@ func (s *UtxoSweeper) createInputClusters() []inputCluster {
// if the locktime is equal. // if the locktime is equal.
lockTimeClusters, nonLockTimeInputs := s.clusterByLockTime(inputs) lockTimeClusters, nonLockTimeInputs := s.clusterByLockTime(inputs)
// Cluster the the remaining inputs by sweep fee rate. // Cluster the remaining inputs by sweep fee rate.
feeClusters := s.clusterBySweepFeeRate(nonLockTimeInputs) feeClusters := s.clusterBySweepFeeRate(nonLockTimeInputs)
// Since the inputs that we clustered by fee rate don't commit to a // Since the inputs that we clustered by fee rate don't commit to a
@ -1481,7 +1481,7 @@ func DefaultNextAttemptDeltaFunc(attempts int) int32 {
return 1 + rand.Int31n(1<<uint(attempts-1)) return 1 + rand.Int31n(1<<uint(attempts-1))
} }
// ListSweeps returns a list of the the sweeps recorded by the sweep store. // ListSweeps returns a list of the sweeps recorded by the sweep store.
func (s *UtxoSweeper) ListSweeps() ([]chainhash.Hash, error) { func (s *UtxoSweeper) ListSweeps() ([]chainhash.Hash, error) {
return s.cfg.Store.ListSweeps() return s.cfg.Store.ListSweeps()
} }

View File

@ -33,7 +33,7 @@ type Config struct {
// storing state updates. // storing state updates.
DB DB DB DB
// NodeKeyECDH is the the ECDH capable wrapper of the key to be used in // NodeKeyECDH is the ECDH capable wrapper of the key to be used in
// accepting new brontide connections. // accepting new brontide connections.
NodeKeyECDH keychain.SingleKeyECDH NodeKeyECDH keychain.SingleKeyECDH