logging implementation
Logging functionality is powered by Zap core.
type Logging struct {
*LoggerLevels // pointing to the global logger
mutex sync.RWMutex
encoding Encoding
encoderConfig zapcore.EncoderConfig
multiFormatter *fabenc.MultiFormatter
writer zapcore.WriteSyncer
observer Observer
}
// LoggerLevels tracks the logging level of named loggers.
type LoggerLevels struct {
mutex sync.RWMutex
levelCache map[string]zapcore.Level
specs map[string]zapcore.Level
defaultLevel zapcore.Level
minLevel zapcore.Level
}
Global logging system
Firstly, make the global logging system, then create the a global logger named ‘flogging’. In the end, create a special logger for grpc.
var Global *Logging
var logger *FabricLogger
func init() {
logging, err := New(Config{})
if err != nil {
panic(err)
}
Global = logging
logger = Global.Logger("flogging")
grpcLogger := Global.ZapLogger("grpc")
grpclog.SetLogger(NewGRPCLogger(grpcLogger))
}
log initialization in Peer
Logging.level is no loner used. Instead, FABRIC_LOGGING_SPEC environment variable is used to control the log level. The global level will be set to default ‘info’ if FABRIC_LOGGING_SPEC doesn’t exist.
func InitCmd(cmd *cobra.Command, args []string) {
...
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: logOutput,
LogSpec: loggingSpec,
})
...
ActivateSpec is used to modify logging levels
In order to change the loglevel of a logger, we must invoke ActivateSpec like this:
flogging.Global.ActivateSpec("cli.common=warn")
flogging.Global.ActivateSpec("cli.common, mainCmd=warn")
flogging.Global.ActivateSpec("cli.common=info:mainCmd=warn")
// ActivateSpec is used to modify logging levels.
//
// The logging specification has the following form:
// [<logger>[,<logger>...]=]<level>[:[<logger>[,<logger>...]=]<level>...]
func (l *LoggerLevels) ActivateSpec(spec string) error {
l.mutex.Lock()
defer l.mutex.Unlock()
When calling log, Logger.Check will be checked for any log generation.
Note that it is not done by s.base.Core().Enabled. Instaed, it is s.base.Check(lvl, msg)
Log checking call stack
github.com/hyperledger/fabric/common/flogging.(*Core).Check at core.go:86
github.com/hyperledger/fabric/vendor/go.uber.org/zap.(*Logger).check at logger.go:269
github.com/hyperledger/fabric/vendor/go.uber.org/zap.(*Logger).Check at logger.go:172
github.com/hyperledger/fabric/vendor/go.uber.org/zap.(*SugaredLogger).log at sugar.go:233
github.com/hyperledger/fabric/vendor/go.uber.org/zap.(*SugaredLogger).Debugf at sugar.go:133
github.com/hyperledger/fabric/common/flogging.(*FabricLogger).Debug at zap.go:61
github.com/hyperledger/fabric/gossip/election.(*leaderElectionSvcImpl).waitForInterrupt at election.go:262
github.com/hyperledger/fabric/gossip/election.(*leaderElectionSvcImpl).leader at election.go:348
github.com/hyperledger/fabric/gossip/election.(*leaderElectionSvcImpl).run at election.go:279
runtime.goexit at asm_amd64.s:1357
- Async stack trace
github.com/hyperledger/fabric/gossip/election.(*leaderElectionSvcImpl).start at election.go:193
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
return
}
// Format with Sprint, Sprintf, or neither.
msg := template
if msg == "" && len(fmtArgs) > 0 {
msg = fmt.Sprint(fmtArgs...)
} else if msg != "" && len(fmtArgs) > 0 {
msg = fmt.Sprintf(template, fmtArgs...)
}
if ce := s.base.Check(lvl, msg); ce != nil {
ce.Write(s.sweetenFields(context)...)
}
}
func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg)
}
log observer
Observer will have the chance to Check and Write the log entry.
type Observer interface {
Check(e zapcore.Entry, ce *zapcore.CheckedEntry)
WriteEntry(e zapcore.Entry, fields []zapcore.Field)
}
func (l *Logging) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) {
l.mutex.RLock()
observer := l.observer
l.mutex.RUnlock()
if observer != nil {
observer.Check(e, ce)
}
}
func (l *Logging) WriteEntry(e zapcore.Entry, fields []zapcore.Field) {
l.mutex.RLock()
observer := l.observer
l.mutex.RUnlock()
if observer != nil {
observer.WriteEntry(e, fields)
}
}
Example of log observer in Orderer: it will update the counters for check and write.
func NewObserver(provider metrics.Provider) *Observer {
return &Observer{
CheckedCounter: provider.NewCounter(CheckedCountOpts),
WrittenCounter: provider.NewCounter(WriteCountOpts),
}
}
func (m *Observer) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) {
m.CheckedCounter.With("level", e.Level.String()).Add(1)
}
func (m *Observer) WriteEntry(e zapcore.Entry, fields []zapcore.Field) {
m.WrittenCounter.With("level", e.Level.String()).Add(1)
}
log in Orderer
Similarly, it uses environment variable for controlling the log level.
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stderr,
LogSpec: loggingSpec,
})
}
Log spec managed by Operation System
The Operation System is a http service for operation. Both Peer and Orderer start the system at startup, listening on different tcp port.
http://127.0.0.1:9443/logspec
func (h *SpecHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
switch req.Method {
case http.MethodPut:
var logSpec LogSpec
decoder := json.NewDecoder(req.Body)
if err := decoder.Decode(&logSpec); err != nil {
h.sendResponse(resp, http.StatusBadRequest, err)
return
}
req.Body.Close()
if err := h.Logging.ActivateSpec(logSpec.Spec); err != nil {
h.sendResponse(resp, http.StatusBadRequest, err)
return
}
resp.WriteHeader(http.StatusNoContent)
case http.MethodGet:
h.sendResponse(resp, http.StatusOK, &LogSpec{Spec: h.Logging.Spec()})
default:
err := fmt.Errorf("invalid request method: %s", req.Method)
h.sendResponse(resp, http.StatusBadRequest, err)
}
}
loggers in Peer
aclmgmt : info
bccsp : info
bccsp_sw : info
cauthdsl : info
cceventmgmt : info
ccprovider : info
certmonitor : info
chaincode : info
chaincode.accesscontrol : info
chaincode.externalbuilder : info
chaincode.persistence : info
chaincode.platform : info
chaincode.platform.java : info
chaincode.platform.metadata : info
chaincode.platform.node : info
chaincode.platform.util : info
chaincodeCmd : info
channelCmd : info
cli.common : info
cli.lifecycle.chaincode : info
comm : info
comm.grpc.server : info
committer : info
committer.txvalidator : info
common.capabilities : info
common.channelconfig : info
common.configtx : info
common.configtx.test : info
common.deliver : info
common.deliverevents : info
common.ledger.blockledger.file : info
common.privdata : info
common.tools.configtxgen.encoder : info
common.tools.configtxgen.localconfig : info
confighistory : info
container : info
core.comm : info
core.handlers : info
couchdb : info
cscc : info
deliveryClient : info
discovery : info
discovery.DiscoverySupport : info
discovery.acl : info
discovery.config : info
discovery.endorsement : info
discovery.lifecycle : info
dockercontroller : info
endorser : info
extcc : info
flogging : info
flogging.httpadmin : info
fsblkstorage : info
gossip.channel : info
gossip.comm : info
gossip.discovery : info
gossip.election : info
gossip.gossip : info
gossip.privdata : info
gossip.pull : info
gossip.service : info
gossip.state : info
history : info
kvledger : info
kvledger.util : info
ledgermgmt : info
ledgerstorage : info
leveldbhelper : info
lifecycle : info
lockbasedtxmgr : info
lscc : info
main : info
msp : info
msp.identity : info
nodeCmd : info
peer : info
peer.blocksprovider : info
peer.gossip.mcs : info
peer.gossip.sa : info
peer.operations : info
peer.orderers : info
policies : info
policies.inquire : info
privacyenabledstate : info
protoutils : info
pvtdatastorage : info
pvtstatepurgemgmt : info
qscc : info
rwsetutil : info
sccapi : info
statebasedval : info
statecouchdb : info
stateleveldb : info
transientstore : info
util : info
valimpl : info
valinternal : info
viperutil : info
vscc : info
#
loggers in Orderer
# log l
bccsp : info
bccsp_sw : info
cauthdsl : info
certmonitor : info
comm : info
comm.grpc.server : info
common.capabilities : info
common.channelconfig : info
common.configtx : info
common.deliver : info
common.ledger.blockledger.file : info
core.comm : info
couchdb : info
flogging : info
flogging.httpadmin : info
fsblkstorage : info
kvledger.util : info
leveldbhelper : info
localconfig : info
msp : info
msp.identity : info
orderer.commmon.multichannel : info
orderer.common.blockcutter : info
orderer.common.broadcast : info
orderer.common.msgprocessor : info
orderer.common.server : info
orderer.consensus.kafka : info
orderer.consensus.kafka.sarama : info
orderer.consensus.solo : info
orderer.operations : info
policies : info
viperutil : info
#