Merge pull request #9138 from ziggie1984/detail_debug_process

Detail LND's debug procedures.
This commit is contained in:
Yong 2024-10-22 20:26:11 +08:00 committed by GitHub
commit caff5a00d1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 410 additions and 91 deletions

104
config.go
View file

@ -352,12 +352,12 @@ type Config struct {
DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify <global-level>,<subsystem>=<level>,<subsystem2>=<level>,... to set the log level for individual subsystems -- Use show to list available subsystems"`
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
CPUProfile string `long:"cpuprofile" description:"DEPRECATED: Use 'pprof.cpuprofile' option. Write CPU profile to the specified file" hidden:"true"`
Profile string `long:"profile" description:"DEPRECATED: Use 'pprof.profile' option. Enable HTTP profiling on either a port or host:port" hidden:"true"`
BlockingProfile int `long:"blockingprofile" description:"DEPRECATED: Use 'pprof.blockingprofile' option. Used to enable a blocking profile to be served on the profiling port. This takes a value from 0 to 1, with 1 including every blocking event, and 0 including no events." hidden:"true"`
MutexProfile int `long:"mutexprofile" description:"DEPRECATED: Use 'pprof.mutexprofile' option. Used to Enable a mutex profile to be served on the profiling port. This takes a value from 0 to 1, with 1 including every mutex event, and 0 including no events." hidden:"true"`
Profile string `long:"profile" description:"Enable HTTP profiling on either a port or host:port"`
BlockingProfile int `long:"blockingprofile" description:"Used to enable a blocking profile to be served on the profiling port. This takes a value from 0 to 1, with 1 including every blocking event, and 0 including no events."`
MutexProfile int `long:"mutexprofile" description:"Used to Enable a mutex profile to be served on the profiling port. This takes a value from 0 to 1, with 1 including every mutex event, and 0 including no events."`
Pprof *lncfg.Pprof `group:"Pprof" namespace:"pprof"`
UnsafeDisconnect bool `long:"unsafe-disconnect" description:"DEPRECATED: Allows the rpcserver to intentionally disconnect from peers with open channels. THIS FLAG WILL BE REMOVED IN 0.10.0" hidden:"true"`
UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."`
@ -821,7 +821,8 @@ func LoadConfig(interceptor signal.Interceptor) (*Config, error) {
cleanCfg, err := ValidateConfig(
cfg, interceptor, fileParser, flagParser,
)
if usageErr, ok := err.(*usageError); ok {
var usageErr *lncfg.UsageError
if errors.As(err, &usageErr) {
// The logging system might not yet be initialized, so we also
// write to stderr to make sure the error appears somewhere.
_, _ = fmt.Fprintln(os.Stderr, usageMessage)
@ -830,9 +831,9 @@ func LoadConfig(interceptor signal.Interceptor) (*Config, error) {
// The log subsystem might not yet be initialized. But we still
// try to log the error there since some packaging solutions
// might only look at the log and not stdout/stderr.
ltndLog.Warnf("Error validating config: %v", usageErr.err)
ltndLog.Warnf("Error validating config: %v", err)
return nil, usageErr.err
return nil, err
}
if err != nil {
// The log subsystem might not yet be initialized. But we still
@ -856,18 +857,6 @@ func LoadConfig(interceptor signal.Interceptor) (*Config, error) {
return cleanCfg, nil
}
// usageError is an error type that signals a problem with the supplied flags.
type usageError struct {
err error
}
// Error returns the error string.
//
// NOTE: This is part of the error interface.
func (u *usageError) Error() string {
return u.err.Error()
}
// ValidateConfig check the given configuration to be sane. This makes sure no
// illegal values or combination of values are set. All file system paths are
// normalized. The cleaned up config is returned on success.
@ -1347,31 +1336,6 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
}
// Validate profile port or host:port.
if cfg.Profile != "" {
str := "%s: The profile port must be between 1024 and 65535"
// Try to parse Profile as a host:port.
_, hostPort, err := net.SplitHostPort(cfg.Profile)
if err == nil {
// Determine if the port is valid.
profilePort, err := strconv.Atoi(hostPort)
if err != nil || profilePort < 1024 || profilePort > 65535 {
return nil, &usageError{mkErr(str)}
}
} else {
// Try to parse Profile as a port.
profilePort, err := strconv.Atoi(cfg.Profile)
if err != nil || profilePort < 1024 || profilePort > 65535 {
return nil, &usageError{mkErr(str)}
}
// Since the user just set a port, we will serve debugging
// information over localhost.
cfg.Profile = net.JoinHostPort("127.0.0.1", cfg.Profile)
}
}
// We'll now construct the network directory which will be where we
// store all the data specific to this chain/network.
cfg.networkDir = filepath.Join(
@ -1442,13 +1406,6 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
return nil, mkErr("log writer missing in config")
}
// Special show command to list supported subsystems and exit.
if cfg.DebugLevel == "show" {
fmt.Println("Supported subsystems",
cfg.LogWriter.SupportedSubsystems())
os.Exit(0)
}
if !build.SuportedLogCompressor(cfg.LogCompressor) {
return nil, mkErr("invalid log compressor: %v",
cfg.LogCompressor)
@ -1456,6 +1413,13 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
// Initialize logging at the default logging level.
SetupLoggers(cfg.LogWriter, interceptor)
// Special show command to list supported subsystems and exit.
if cfg.DebugLevel == "show" {
fmt.Println("Supported subsystems",
cfg.LogWriter.SupportedSubsystems())
os.Exit(0)
}
err = cfg.LogWriter.InitLogRotator(
filepath.Join(cfg.LogDir, defaultLogFilename),
cfg.LogCompressor, cfg.MaxLogFileSize, cfg.MaxLogFiles,
@ -1469,7 +1433,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
err = build.ParseAndSetDebugLevels(cfg.DebugLevel, cfg.LogWriter)
if err != nil {
str := "error parsing debug level: %v"
return nil, &usageError{mkErr(str, err)}
return nil, &lncfg.UsageError{Err: mkErr(str, err)}
}
// At least one RPCListener is required. So listen on localhost per
@ -1700,6 +1664,39 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
return nil, mkErr("custom-message: %v", err)
}
// Map old pprof flags to new pprof group flags.
//
// NOTE: This is a temporary measure to ensure compatibility with old
// flags.
if cfg.CPUProfile != "" {
if cfg.Pprof.CPUProfile != "" {
return nil, mkErr("cpuprofile and pprof.cpuprofile " +
"are mutually exclusive")
}
cfg.Pprof.CPUProfile = cfg.CPUProfile
}
if cfg.Profile != "" {
if cfg.Pprof.Profile != "" {
return nil, mkErr("profile and pprof.profile " +
"are mutually exclusive")
}
cfg.Pprof.Profile = cfg.Profile
}
if cfg.BlockingProfile != 0 {
if cfg.Pprof.BlockingProfile != 0 {
return nil, mkErr("blockingprofile and " +
"pprof.blockingprofile are mutually exclusive")
}
cfg.Pprof.BlockingProfile = cfg.BlockingProfile
}
if cfg.MutexProfile != 0 {
if cfg.Pprof.MutexProfile != 0 {
return nil, mkErr("mutexprofile and " +
"pprof.mutexprofile are mutually exclusive")
}
cfg.Pprof.MutexProfile = cfg.MutexProfile
}
// Validate the subconfigs for workers, caches, and the tower client.
err = lncfg.Validate(
cfg.Workers,
@ -1714,6 +1711,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
cfg.Htlcswitch,
cfg.Invoices,
cfg.Routing,
cfg.Pprof,
)
if err != nil {
return nil, err

View file

@ -1,7 +1,7 @@
# Table of Contents
1. [Overview](#overview)
1. [Debug Logging](#debug-logging)
1. [Capturing pprof data with `lnd`](#capturing-pprof-data-with-lnd)
1. [LND's built-in profiler](#built-in-profiler-in-lnd)
## Overview
@ -12,36 +12,192 @@ data ahead of time.
## Debug Logging
You can enable debug logging in `lnd` by passing the `--debuglevel` flag. For
example, to increase the log level from `info` to `debug`:
LND supports different logging levels and you can also specify different logging
levels per subsystem. This makes it easy to focus on a particular subsystem
without clogging up the logs with a lot of noise. One can either set the logging
in the lnd.conf file or pass the flag `--debuglevel` with the specified level
when starting lnd.
LND supports the following logging levels (see [log.go](/build/log.go) and
[sample-lnd.conf](/sample-lnd.conf) for more info):
- `trace`
- `debug`
- `info`
- `warn`
- `error`
- `critical`
- `off`
LND is composed of many subsystems, those subsystems can be listed either by
setting the starting flag `--debuglevel` or by using the lncli program.
Show all subsystems:
```shell
$ lnd --debuglevel=debug
$ lnd --debuglevel=show
$ lncli debuglevel --show
```
For more details see [log.go](/log.go).
You may also specify logging per-subsystem, like this:
```shell
$ lnd --debuglevel=<subsystem>=<level>,<subsystem2>=<level>,...
$ lncli debuglevel --level=<subsystem>=<level>,<subsystem2>=<level>,...
```
The default global logging level is `info`. So if one wants to change the
global logging level and in addition also set a more detailed logging for a
particular subsystem the command would look like this (using `HSWC`
(htlcswitch) as an example subsystem):
```shell
$ lnd --debuglevel=critical,HSWC=debug
$ lncli debuglevel --level=critical,HSWC=debug
```
The subsystem names are case-sensitive and must be all uppercase.
To identify the subsystems defined by an abbreviated name, you can search for
the abbreviation in the [log.go](/log.go) file. Each subsystem
declares a `btclog.Logger` instance locally which is then assigned via the
`UseLogger` function call in the `SetupLoggers` function.
Example HSWC:
For the `HSWC` subsystem a new sublogger is injected into the htlcswitch
package via the `UseLogger` function call in the `SetupLoggers` function. So
the HSWC subsystem handles the logging in the htlcswitch package.
```go
AddSubLogger(root, "HSWC", interceptor, htlcswitch.UseLogger)
```
## Capturing pprof data with `lnd`
Caution: Some logger subsystems are overwritten during the instanziation. An
example here is the `neutrino/query` package which instead of using the `BTCN`
prefix is overwritten by the `LNWL` subsystem.
`lnd` has a built-in feature which allows you to capture profiling data at
Moreover when using the `lncli` command the return value will provide the
updated list of all subsystems and their associated logging levels. This makes
it easy to get an overview of the corrent logging level for the whole system.
Example:
```shell
$ lncli debuglevel --level=critical,HSWC=debug
{
"sub_systems": "ARPC=INF, ATPL=INF, BLPT=INF, BRAR=INF, BTCN=INF, BTWL=INF, CHAC=INF, CHBU=INF, CHCL=INF, CHDB=INF, CHFD=INF, CHFT=INF, CHNF=INF, CHRE=INF, CLUS=INF, CMGR=INF, CNCT=INF, CNFG=INF, CRTR=INF, DISC=INF, DRPC=INF, FNDG=INF, GRPH=INF, HLCK=INF, HSWC=DBG, INVC=INF, IRPC=INF, LNWL=INF, LTND=INF, NANN=INF, NRPC=INF, NTFN=INF, NTFR=INF, PEER=INF, PRNF=INF, PROM=INF, PRPC=INF, RPCP=INF, RPCS=INF, RPWL=INF, RRPC=INF, SGNR=INF, SPHX=INF, SRVR=INF, SWPR=INF, TORC=INF, UTXN=INF, VRPC=INF, WLKT=INF, WTCL=INF, WTWR=INF"
}
```
## Built-in profiler in LND
`LND` has a built-in feature which allows you to capture profiling data at
runtime using [pprof](https://golang.org/pkg/runtime/pprof/), a profiler for
Go. The profiler has negligible performance overhead during normal operations
(unless you have explicitly enabled CPU profiling).
Go. It is recommended to enable the profiling server so that an analyis can be
triggered during runtime. There is only little overhead in enabling this
feature, because profiling is only started when calling the server endpoints.
However LND also allows to specify a cpu profile file via the `cpuprofile` flag
which triggers a cpu profile when LND starts and stops it when LND shuts down.
This is only recommended for debugging purposes, because the overhead is much
higher. To enable the profile server, start `lnd` with the `--profile` option
using a free port. As soon as the server is up different profiles can be
fetched from the `debug/pprof` endpoint using either the web interface or for
example `curl`.
To enable this ability, start `lnd` with the `--profile` option using a free port.
Example port `9736` is used for the profile server in the following examples.
```shell
$ lnd --profile=9736
```
Now, with `lnd` running, you can use the pprof endpoint on port 9736 to collect
runtime profiling data. You can fetch this data using `curl` like so:
NOTE: The `--profile` flag of the lncli program does not relate to profiling and
the profiling server. It has a different context and allows a node operator to
manage different LND daemons without providing all the cmd flags every time.
For more details see [lncli profile](/cmd/commands/profile.go).
### Different types of profiles
#### CPU profile
A cpu profile can be used to analyze the CPU usage of the program. When
obtaining it via the profile http endpoint you can specify the time duration as
a query parameter.
```shell
$ curl http://localhost:9736/debug/pprof/goroutine?debug=1
...
$ curl http://localhost:9736/debug/pprof/profile?seconds=10 > cpu.prof
```
#### Goroutine profile
The goroutine profile is very useful when analyzing deadlocks and lock
contention. It can be obtained via the web interface or the following endpoint:
```shell
$ curl http://localhost:9736/debug/pprof/goroutine?debug=2 > goroutine.prof
```
The query parameter `debug=2` is optional but recommended and referes to the
format of the output file. Only this format has the necessary information to
identify goroutines deadlocks. Otherwise `go tool pprof` needs to be used to
visualize the data and interpret the results.
#### Heap profile
The heap profile is useful to analyze memory allocations. It can be obtained
via the following endpoint:
```shell
$ curl http://localhost:9736/debug/pprof/heap > heap.prof
```
The documentation of the pprof package states that a gc can be triggered before
obtaining the heap profile. This can be done by setting the gc query parameter
(`gc=1`).
#### Other profiles
There are several other options available like a mutex profile or a block
profile which gives insights into contention and bottlenecks of your program.
The web interface lists all the available profiles/endpoints which can be
obtained.
However mutex and block profiling need to be enabled separately by setting the
sampling rate via the config values `BlockingProfile` and `MutexProfile`. They
are off by default (0). These values represent sampling rates meaning that a
value of `1` will record every event leading to a significant overhead whereas
a sample rate of `n` will only record 1 out of nth events decreasing the
aggressiveness of the profiler.
Fetching the block and mutex profile:
```shell
$ curl http://localhost:9736/debug/pprof/mutex?debug=2
$ curl http://localhost:9736/debug/pprof/block?debug=2
```
The full programm command can also be fetched which shows how LND was started
and which flags were provided to the program.
```shell
$ curl http://localhost:9736/debug/pprof/cmdline > cmdline.prof
```
There are also other endpoints available see the
[pprof documentation](https://golang.org/pkg/runtime/pprof/) for more details.
#### Visualizing the profile dumps
It can be hard to make sense of the profile dumps by just looking at them
therefore the Golang ecosystem provides tools to analyze those profile dumps
either via the terminal or by visualizing them. One of the tools is
`go tool pprof`.
Assuming the profile was fetched via `curl` as in the examples above a nice
svg visualization can be generated for the cpu profile like this:
```shell
$ go tool pprof -svg cpu.prof > cpu.svg
```
Details how to interpret these visualizations can be found in the
[pprof documentation](https://github.com/google/pprof/blob/main/doc/README.md#interpreting-the-callgraph).

View file

@ -71,6 +71,10 @@
## Code Health
* [Moved](https://github.com/lightningnetwork/lnd/pull/9138) profile related
config settings to its own dedicated group. The old ones still work but will
be removed in a future release.
## Breaking Changes
## Performance Improvements

View file

@ -2,6 +2,7 @@ package invoices
import (
"errors"
"fmt"
"sync/atomic"
"github.com/lightningnetwork/lnd/fn"
@ -167,21 +168,31 @@ func (s *HtlcModificationInterceptor) RegisterInterceptor(
// Start starts the service.
func (s *HtlcModificationInterceptor) Start() error {
log.Info("HtlcModificationInterceptor starting...")
if !s.started.CompareAndSwap(false, true) {
return nil
return fmt.Errorf("HtlcModificationInterceptor started more" +
"than once")
}
log.Debugf("HtlcModificationInterceptor started")
return nil
}
// Stop stops the service.
func (s *HtlcModificationInterceptor) Stop() error {
log.Info("HtlcModificationInterceptor stopping...")
if !s.stopped.CompareAndSwap(false, true) {
return nil
return fmt.Errorf("HtlcModificationInterceptor stopped more" +
"than once")
}
close(s.quit)
log.Debug("HtlcModificationInterceptor stopped")
return nil
}

25
lncfg/error.go Normal file
View file

@ -0,0 +1,25 @@
package lncfg
import "fmt"
// UsageError is an error type that signals a problem with the supplied flags.
type UsageError struct {
Err error
}
// Error returns the error string.
//
// NOTE: This is part of the error interface.
func (u *UsageError) Error() string {
return u.Err.Error()
}
// Unwrap returns the underlying error.
func (u *UsageError) Unwrap() error {
return u.Err
}
// mkErr creates a new error from a string.
func mkErr(format string, args ...interface{}) error {
return fmt.Errorf(format, args...)
}

68
lncfg/pprof.go Normal file
View file

@ -0,0 +1,68 @@
package lncfg
import (
"net"
"strconv"
)
// Pprof holds the configuration options for LND's built-in pprof server.
//
//nolint:lll
type Pprof struct {
CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"`
Profile string `long:"profile" description:"Enable HTTP profiling on either a port or host:port"`
BlockingProfile int `long:"blockingprofile" description:"Used to enable a blocking profile to be served on the profiling port. This takes a value from 0 to 1, with 1 including every blocking event, and 0 including no events."`
MutexProfile int `long:"mutexprofile" description:"Used to Enable a mutex profile to be served on the profiling port. This takes a value from 0 to 1, with 1 including every mutex event, and 0 including no events."`
}
// Validate checks the values configured for the profiler.
func (p *Pprof) Validate() error {
if p.BlockingProfile > 0 {
log.Warn("Blocking profile enabled only useful for " +
"debugging because of significant performance impact")
}
if p.MutexProfile > 0 {
log.Warn("Mutex profile enabled only useful for " +
"debugging because of significant performance impact")
}
if p.CPUProfile != "" {
log.Warn("CPU profile enabled only useful for " +
"debugging because of significant performance impact")
}
if p.Profile != "" {
str := "%v: The profile port must be between 1024 and 65535"
// Try to parse Profile as a host:port.
_, hostPort, err := net.SplitHostPort(p.Profile)
if err == nil {
// Determine if the port is valid.
profilePort, err := strconv.Atoi(hostPort)
if err != nil || profilePort < 1024 ||
profilePort > 65535 {
return &UsageError{Err: mkErr(str, hostPort)}
}
} else {
// Try to parse Profile as a port.
profilePort, err := strconv.Atoi(p.Profile)
if err != nil || profilePort < 1024 ||
profilePort > 65535 {
return &UsageError{Err: mkErr(str, p.Profile)}
}
// Since the user just set a port, we will serve
// debugging information over localhost.
p.Profile = net.JoinHostPort("127.0.0.1", p.Profile)
}
}
return nil
}

18
lnd.go
View file

@ -193,7 +193,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
defer cancel()
// Enable http profiling server if requested.
if cfg.Profile != "" {
if cfg.Pprof.Profile != "" {
// Create the http handler.
pprofMux := http.NewServeMux()
pprofMux.HandleFunc("/debug/pprof/", pprof.Index)
@ -202,11 +202,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
pprofMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
pprofMux.HandleFunc("/debug/pprof/trace", pprof.Trace)
if cfg.BlockingProfile != 0 {
runtime.SetBlockProfileRate(cfg.BlockingProfile)
if cfg.Pprof.BlockingProfile != 0 {
runtime.SetBlockProfileRate(cfg.Pprof.BlockingProfile)
}
if cfg.MutexProfile != 0 {
runtime.SetMutexProfileFraction(cfg.MutexProfile)
if cfg.Pprof.MutexProfile != 0 {
runtime.SetMutexProfileFraction(cfg.Pprof.MutexProfile)
}
// Redirect all requests to the pprof handler, thus visiting
@ -216,11 +216,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
"/debug/pprof/", http.StatusSeeOther,
))
ltndLog.Infof("Pprof listening on %v", cfg.Profile)
ltndLog.Infof("Pprof listening on %v", cfg.Pprof.Profile)
// Create the pprof server.
pprofServer := &http.Server{
Addr: cfg.Profile,
Addr: cfg.Pprof.Profile,
Handler: pprofMux,
ReadHeaderTimeout: cfg.HTTPHeaderTimeout,
}
@ -245,8 +245,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if cfg.Pprof.CPUProfile != "" {
f, err := os.Create(cfg.Pprof.CPUProfile)
if err != nil {
return mkErr("unable to create CPU profile: %v", err)
}

View file

@ -7355,10 +7355,28 @@ func (r *rpcServer) DebugLevel(ctx context.Context,
return nil, err
}
// Propagate the new config level to the main config struct.
r.cfg.DebugLevel = req.LevelSpec
subLoggers := r.cfg.LogWriter.SubLoggers()
// Sort alphabetically by subsystem name.
var tags []string
for t := range subLoggers {
tags = append(tags, t)
}
sort.Strings(tags)
return &lnrpc.DebugLevelResponse{}, nil
// Create the log levels string.
var logLevels []string
for _, t := range tags {
logLevels = append(logLevels, fmt.Sprintf("%s=%s", t,
subLoggers[t].Level().String()))
}
logLevelsString := strings.Join(logLevels, ", ")
// Propagate the new config level to the main config struct.
r.cfg.DebugLevel = logLevelsString
return &lnrpc.DebugLevelResponse{
SubSystems: logLevelsString,
}, nil
}
// DecodePayReq takes an encoded payment request string and attempts to decode

View file

@ -266,25 +266,32 @@
; Example:
; debuglevel=debug,PEER=info
; Write CPU profile to the specified file.
; DEPRECATED: Use pprof.cpuprofile instead. Write CPU profile to the specified
; file.
; cpuprofile=
; Enable HTTP profiling on given port -- NOTE port must be between 1024 and
; 65536. The profile can be access at: http://localhost:<PORT>/debug/pprof/.
; DEPRECATED: Use pprof.profile instead.Enable HTTP profiling on given port
; -- NOTE port must be between 1024 and 65536. The profile can be access at:
; http://localhost:<PORT>/debug/pprof/. You can also provide it as host:port to
; enable profiling for remote debugging. For example 0.0.0.0:<PORT> to enable
; profiling for all interfaces on the given port.
; profile=
; Enable a blocking profile to be obtained from the profiling port. A blocking
; profile can show where goroutines are blocking (stuck on mutexes, I/O, etc).
; This takes a value from 0 to 1, with 0 turning off the setting, and 1 sampling
; every blocking event (it's a rate value).
; DEPRECATED: Use pprof.blockingprofile instead. Enable a blocking profile to be
; obtained from the profiling port. A blocking profile can show where goroutines
; are blocking (stuck on mutexes, I/O, etc). This takes a value from 0 to 1,
; with 0 turning off the setting, and 1 sampling every blocking event (it's a
; rate value).
; blockingprofile=0
; Enable a mutex profile to be obtained from the profiling port. A mutex
; profile can show where goroutines are blocked on mutexes, and which mutexes
; have high contention. This takes a value from 0 to 1, with 0 turning off the
; setting, and 1 sampling every mutex event (it's a rate value).
; DEPRECATED: Use pprof.mutexprofile instead. Enable a mutex profile to be
; obtained from the profiling port. A mutex profile can show where goroutines
; are blocked on mutexes, and which mutexes have high contention. This takes a
; value from 0 to 1, with 0 turning off the setting, and 1 sampling every mutex
; event (it's a rate value).
; mutexprofile=0
; DEPRECATED: Allows the rpcserver to intentionally disconnect from peers with
; open channels. THIS FLAG WILL BE REMOVED IN 0.10.0.
; unsafe-disconnect=false
@ -1811,3 +1818,35 @@
; no active gRPC streams. This might be useful to keep the underlying HTTP/2
; connection open for future requests.
; grpc.client-allow-ping-without-stream=false
[pprof]
; Enable HTTP profiling on given port -- NOTE port must be between 1024 and
; 65536. The profile can be access at: http://localhost:<PORT>/debug/pprof/.
; You can also provide it as host:port to enable profiling for remote debugging.
; For example 0.0.0.0:<PORT> to enable profiling for all interfaces on the given
; port. The built-in profiler has minimal overhead, so it is recommended to
; enable it.
; pprof.profile=
; Write CPU profile to the specified file. This should only be used for
; debugging because compared to running a pprof server this will record the cpu
; profile constantly from the start of the program until the shutdown.
; pprof.cpuprofile=
; Enable a blocking profile to be obtained from the profiling port. A blocking
; profile can show where goroutines are blocking (stuck on mutexes, I/O, etc).
; This takes a value from 0 to 1, with 0 turning off the setting, and 1 sampling
; every blocking event (it's a rate value). The blocking profile has high
; overhead and is off by default even when running the pprof server. It should
; only be used for debugging.
; pprof.blockingprofile=0
; Enable a mutex profile to be obtained from the profiling port. A mutex
; profile can show where goroutines are blocked on mutexes, and which mutexes
; have high contention. This takes a value from 0 to 1, with 0 turning off the
; setting, and 1 sampling every mutex event (it's a rate value). The mutex
; profile has high overhead and is off by default even when running the pprof
; server. It should only be used for debugging.
; pprof.mutexprofile=0