417f30c9ea
I think this will prevents some random tests failures when we realize that we are not anymore in the peerset and trigger a shutdown but Raft has not finished fully committing the operation, which then triggers an error, and a retry. But the contexts are cancelled in the retry so it won't find a leader and will error finally error with that message. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
19 lines
408 B
Go
19 lines
408 B
Go
// +build debug,!silent
|
|
|
|
package ipfscluster
|
|
|
|
func init() {
|
|
l := "DEBUG"
|
|
for _, f := range facilities {
|
|
SetFacilityLogLevel(f, l)
|
|
}
|
|
|
|
//SetFacilityLogLevel("cluster", l)
|
|
//SetFacilityLogLevel("consensus", l)
|
|
//SetFacilityLogLevel("monitor", "INFO")
|
|
//SetFacilityLogLevel("raft", l)
|
|
//SetFacilityLogLevel("p2p-gorpc", l)
|
|
//SetFacilityLogLevel("swarm2", l)
|
|
//SetFacilityLogLevel("libp2p-raft", l)
|
|
}
|