mirror of
https://github.com/hoernschen/dendrite.git
synced 2024-12-27 23:48:27 +00:00
Enable debug logs using a configuration parameter and put all logs in a single file (#379)
This commit is contained in:
parent
8a1f3195ca
commit
58c10c6d54
8 changed files with 310 additions and 185 deletions
|
@ -117,10 +117,27 @@ listen:
|
||||||
tracing:
|
tracing:
|
||||||
# Config for the jaeger opentracing reporter.
|
# Config for the jaeger opentracing reporter.
|
||||||
# See https://godoc.org/github.com/uber/jaeger-client-go/config#Configuration
|
# See https://godoc.org/github.com/uber/jaeger-client-go/config#Configuration
|
||||||
# for documtation.
|
# for documentation.
|
||||||
jaeger:
|
jaeger:
|
||||||
disabled: true
|
disabled: true
|
||||||
|
|
||||||
# A list of application service config files to use
|
# A list of application service config files to use
|
||||||
application_services:
|
application_services:
|
||||||
config_files: []
|
config_files: []
|
||||||
|
|
||||||
|
# The configuration for dendrite logs
|
||||||
|
logging:
|
||||||
|
# The logging type, only "file" is supported at the moment
|
||||||
|
- type: "file"
|
||||||
|
# The logging level, must be one of debug, info, warn, error, fatal, panic.
|
||||||
|
level: "info"
|
||||||
|
# Parameters for this type of log
|
||||||
|
params:
|
||||||
|
# File logging must be given a path to a directory. Each component will write to a different file. Logs are rotated each day and gzipped
|
||||||
|
path: "/var/log/dendrite"
|
||||||
|
# It is possible to have multiple logging hooks at the same time.
|
||||||
|
# To save only errors in a different directory, uncomment the following.
|
||||||
|
# - type: "file"
|
||||||
|
# level: "error"
|
||||||
|
# params:
|
||||||
|
# path: "/var/log/dendrite/errors"
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/matrix-org/dendrite/common/keydb"
|
"github.com/matrix-org/dendrite/common/keydb"
|
||||||
"github.com/matrix-org/gomatrixserverlib"
|
"github.com/matrix-org/gomatrixserverlib"
|
||||||
|
@ -56,7 +55,8 @@ type BaseDendrite struct {
|
||||||
// The componentName is used for logging purposes, and should be a friendly name
|
// The componentName is used for logging purposes, and should be a friendly name
|
||||||
// of the compontent running, e.g. "SyncAPI"
|
// of the compontent running, e.g. "SyncAPI"
|
||||||
func NewBaseDendrite(cfg *config.Dendrite, componentName string) *BaseDendrite {
|
func NewBaseDendrite(cfg *config.Dendrite, componentName string) *BaseDendrite {
|
||||||
common.SetupLogging(os.Getenv("LOG_DIR"))
|
common.SetupStdLogging()
|
||||||
|
common.SetupHookLogging(cfg.Logging, componentName)
|
||||||
|
|
||||||
closer, err := cfg.SetupTracing("Dendrite" + componentName)
|
closer, err := cfg.SetupTracing("Dendrite" + componentName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -154,14 +154,14 @@ func checkErrors(config *Dendrite) error {
|
||||||
for _, appservice := range config.Derived.ApplicationServices {
|
for _, appservice := range config.Derived.ApplicationServices {
|
||||||
// Check if we've already seen this ID
|
// Check if we've already seen this ID
|
||||||
if idMap[appservice.ID] {
|
if idMap[appservice.ID] {
|
||||||
return Error{[]string{fmt.Sprintf(
|
return configErrors([]string{fmt.Sprintf(
|
||||||
"Application Service ID %s must be unique", appservice.ID,
|
"Application Service ID %s must be unique", appservice.ID,
|
||||||
)}}
|
)})
|
||||||
}
|
}
|
||||||
if tokenMap[appservice.ASToken] {
|
if tokenMap[appservice.ASToken] {
|
||||||
return Error{[]string{fmt.Sprintf(
|
return configErrors([]string{fmt.Sprintf(
|
||||||
"Application Service Token %s must be unique", appservice.ASToken,
|
"Application Service Token %s must be unique", appservice.ASToken,
|
||||||
)}}
|
)})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the id/token to their respective maps if we haven't already
|
// Add the id/token to their respective maps if we haven't already
|
||||||
|
@ -175,9 +175,9 @@ func checkErrors(config *Dendrite) error {
|
||||||
for _, namespaceSlice := range appservice.NamespaceMap {
|
for _, namespaceSlice := range appservice.NamespaceMap {
|
||||||
for _, namespace := range namespaceSlice {
|
for _, namespace := range namespaceSlice {
|
||||||
if !IsValidRegex(namespace.Regex) {
|
if !IsValidRegex(namespace.Regex) {
|
||||||
return Error{[]string{fmt.Sprintf(
|
return configErrors([]string{fmt.Sprintf(
|
||||||
"Invalid regex string for Application Service %s", appservice.ID,
|
"Invalid regex string for Application Service %s", appservice.ID,
|
||||||
)}}
|
)})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,6 +214,9 @@ type Dendrite struct {
|
||||||
ConfigFiles []string `yaml:"config_files"`
|
ConfigFiles []string `yaml:"config_files"`
|
||||||
} `yaml:"application_services"`
|
} `yaml:"application_services"`
|
||||||
|
|
||||||
|
// The config for logging informations. Each hook will be added to logrus.
|
||||||
|
Logging []LogrusHook `yaml:"logging"`
|
||||||
|
|
||||||
// Any information derived from the configuration options for later use.
|
// Any information derived from the configuration options for later use.
|
||||||
Derived struct {
|
Derived struct {
|
||||||
Registration struct {
|
Registration struct {
|
||||||
|
@ -268,6 +271,24 @@ type ThumbnailSize struct {
|
||||||
ResizeMethod string `yaml:"method,omitempty"`
|
ResizeMethod string `yaml:"method,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogrusHook represents a single logrus hook. At this point, only parsing and
|
||||||
|
// verification of the proper values for type and level are done.
|
||||||
|
// Validity/integrity checks on the parameters are done when configuring logrus.
|
||||||
|
type LogrusHook struct {
|
||||||
|
// The type of hook, currently only "file" is supported.
|
||||||
|
Type string `yaml:"type"`
|
||||||
|
|
||||||
|
// The level of the logs to produce. Will output only this level and above.
|
||||||
|
Level string `yaml:"level"`
|
||||||
|
|
||||||
|
// The parameters for this hook.
|
||||||
|
Params map[string]interface{} `yaml:"params"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// configErrors stores problems encountered when parsing a config file.
|
||||||
|
// It implements the error interface.
|
||||||
|
type configErrors []string
|
||||||
|
|
||||||
// Load a yaml config file for a server run as multiple processes.
|
// Load a yaml config file for a server run as multiple processes.
|
||||||
// Checks the config to ensure that it is valid.
|
// Checks the config to ensure that it is valid.
|
||||||
// The checks are different if the server is run as a monolithic process instead
|
// The checks are different if the server is run as a monolithic process instead
|
||||||
|
@ -306,12 +327,6 @@ func LoadMonolithic(configPath string) (*Dendrite, error) {
|
||||||
return loadConfig(basePath, configData, ioutil.ReadFile, monolithic)
|
return loadConfig(basePath, configData, ioutil.ReadFile, monolithic)
|
||||||
}
|
}
|
||||||
|
|
||||||
// An Error indicates a problem parsing the config.
|
|
||||||
type Error struct {
|
|
||||||
// List of problems encountered parsing the config.
|
|
||||||
Problems []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadConfig(
|
func loadConfig(
|
||||||
basePath string,
|
basePath string,
|
||||||
configData []byte,
|
configData []byte,
|
||||||
|
@ -412,100 +427,158 @@ func (config *Dendrite) setDefaults() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string detailing how many errors were contained within an
|
// Error returns a string detailing how many errors were contained within a
|
||||||
// Error type.
|
// configErrors type.
|
||||||
func (e Error) Error() string {
|
func (errs configErrors) Error() string {
|
||||||
if len(e.Problems) == 1 {
|
if len(errs) == 1 {
|
||||||
return e.Problems[0]
|
return errs[0]
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"%s (and %d other problems)", e.Problems[0], len(e.Problems)-1,
|
"%s (and %d other problems)", errs[0], len(errs)-1,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add appends an error to the list of errors in this configErrors.
|
||||||
|
// It is a wrapper to the builtin append and hides pointers from
|
||||||
|
// the client code.
|
||||||
|
// This method is safe to use with an uninitialized configErrors because
|
||||||
|
// if it is nil, it will be properly allocated.
|
||||||
|
func (errs *configErrors) Add(str string) {
|
||||||
|
*errs = append(*errs, str)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkNotEmpty verifies the given value is not empty in the configuration.
|
||||||
|
// If it is, adds an error to the list.
|
||||||
|
func checkNotEmpty(configErrs *configErrors, key, value string) {
|
||||||
|
if value == "" {
|
||||||
|
configErrs.Add(fmt.Sprintf("missing config key %q", key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkNotZero verifies the given value is not zero in the configuration.
|
||||||
|
// If it is, adds an error to the list.
|
||||||
|
func checkNotZero(configErrs *configErrors, key string, value int64) {
|
||||||
|
if value == 0 {
|
||||||
|
configErrs.Add(fmt.Sprintf("missing config key %q", key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPositive verifies the given value is positive (zero included)
|
||||||
|
// in the configuration. If it is not, adds an error to the list.
|
||||||
|
func checkPositive(configErrs *configErrors, key string, value int64) {
|
||||||
|
if value < 0 {
|
||||||
|
configErrs.Add(fmt.Sprintf("invalid value for config key %q: %d", key, value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkTurn verifies the parameters turn.* are valid.
|
||||||
|
func (config *Dendrite) checkTurn(configErrs *configErrors) {
|
||||||
|
value := config.TURN.UserLifetime
|
||||||
|
if value != "" {
|
||||||
|
if _, err := time.ParseDuration(value); err != nil {
|
||||||
|
configErrs.Add(fmt.Sprintf("invalid duration for config key %q: %s", "turn.turn_user_lifetime", value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkMatrix verifies the parameters matrix.* are valid.
|
||||||
|
func (config *Dendrite) checkMatrix(configErrs *configErrors) {
|
||||||
|
checkNotEmpty(configErrs, "matrix.server_name", string(config.Matrix.ServerName))
|
||||||
|
checkNotEmpty(configErrs, "matrix.private_key", string(config.Matrix.PrivateKeyPath))
|
||||||
|
checkNotZero(configErrs, "matrix.federation_certificates", int64(len(config.Matrix.FederationCertificatePaths)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkMedia verifies the parameters media.* are valid.
|
||||||
|
func (config *Dendrite) checkMedia(configErrs *configErrors) {
|
||||||
|
checkNotEmpty(configErrs, "media.base_path", string(config.Media.BasePath))
|
||||||
|
checkPositive(configErrs, "media.max_file_size_bytes", int64(*config.Media.MaxFileSizeBytes))
|
||||||
|
checkPositive(configErrs, "media.max_thumbnail_generators", int64(config.Media.MaxThumbnailGenerators))
|
||||||
|
|
||||||
|
for i, size := range config.Media.ThumbnailSizes {
|
||||||
|
checkPositive(configErrs, fmt.Sprintf("media.thumbnail_sizes[%d].width", i), int64(size.Width))
|
||||||
|
checkPositive(configErrs, fmt.Sprintf("media.thumbnail_sizes[%d].height", i), int64(size.Height))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkKafka verifies the parameters kafka.* and the related
|
||||||
|
// database.naffka are valid.
|
||||||
|
func (config *Dendrite) checkKafka(configErrs *configErrors, monolithic bool) {
|
||||||
|
|
||||||
|
if config.Kafka.UseNaffka {
|
||||||
|
if !monolithic {
|
||||||
|
configErrs.Add(fmt.Sprintf("naffka can only be used in a monolithic server"))
|
||||||
|
}
|
||||||
|
|
||||||
|
checkNotEmpty(configErrs, "database.naffka", string(config.Database.Naffka))
|
||||||
|
} else {
|
||||||
|
// If we aren't using naffka then we need to have at least one kafka
|
||||||
|
// server to talk to.
|
||||||
|
checkNotZero(configErrs, "kafka.addresses", int64(len(config.Kafka.Addresses)))
|
||||||
|
}
|
||||||
|
checkNotEmpty(configErrs, "kafka.topics.output_room_event", string(config.Kafka.Topics.OutputRoomEvent))
|
||||||
|
checkNotEmpty(configErrs, "kafka.topics.output_client_data", string(config.Kafka.Topics.OutputClientData))
|
||||||
|
checkNotEmpty(configErrs, "kafka.topics.user_updates", string(config.Kafka.Topics.UserUpdates))
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkDatabase verifies the parameters database.* are valid.
|
||||||
|
func (config *Dendrite) checkDatabase(configErrs *configErrors) {
|
||||||
|
checkNotEmpty(configErrs, "database.account", string(config.Database.Account))
|
||||||
|
checkNotEmpty(configErrs, "database.device", string(config.Database.Device))
|
||||||
|
checkNotEmpty(configErrs, "database.server_key", string(config.Database.ServerKey))
|
||||||
|
checkNotEmpty(configErrs, "database.media_api", string(config.Database.MediaAPI))
|
||||||
|
checkNotEmpty(configErrs, "database.sync_api", string(config.Database.SyncAPI))
|
||||||
|
checkNotEmpty(configErrs, "database.room_server", string(config.Database.RoomServer))
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkListen verifies the parameters listen.* are valid.
|
||||||
|
func (config *Dendrite) checkListen(configErrs *configErrors) {
|
||||||
|
checkNotEmpty(configErrs, "listen.media_api", string(config.Listen.MediaAPI))
|
||||||
|
checkNotEmpty(configErrs, "listen.client_api", string(config.Listen.ClientAPI))
|
||||||
|
checkNotEmpty(configErrs, "listen.federation_api", string(config.Listen.FederationAPI))
|
||||||
|
checkNotEmpty(configErrs, "listen.sync_api", string(config.Listen.SyncAPI))
|
||||||
|
checkNotEmpty(configErrs, "listen.room_server", string(config.Listen.RoomServer))
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkLogging verifies the parameters logging.* are valid.
|
||||||
|
func (config *Dendrite) checkLogging(configErrs *configErrors) {
|
||||||
|
for _, logrusHook := range config.Logging {
|
||||||
|
checkNotEmpty(configErrs, "logging.type", string(logrusHook.Type))
|
||||||
|
checkNotEmpty(configErrs, "logging.level", string(logrusHook.Level))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// check returns an error type containing all errors found within the config
|
// check returns an error type containing all errors found within the config
|
||||||
// file.
|
// file.
|
||||||
func (config *Dendrite) check(monolithic bool) error {
|
func (config *Dendrite) check(monolithic bool) error {
|
||||||
var problems []string
|
var configErrs configErrors
|
||||||
|
|
||||||
if config.Version != Version {
|
if config.Version != Version {
|
||||||
return Error{[]string{fmt.Sprintf(
|
configErrs.Add(fmt.Sprintf(
|
||||||
"unknown config version %q, expected %q", config.Version, Version,
|
"unknown config version %q, expected %q", config.Version, Version,
|
||||||
)}}
|
))
|
||||||
|
return configErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
checkNotEmpty := func(key string, value string) {
|
config.checkMatrix(&configErrs)
|
||||||
if value == "" {
|
config.checkMedia(&configErrs)
|
||||||
problems = append(problems, fmt.Sprintf("missing config key %q", key))
|
config.checkTurn(&configErrs)
|
||||||
}
|
config.checkKafka(&configErrs, monolithic)
|
||||||
}
|
config.checkDatabase(&configErrs)
|
||||||
|
config.checkLogging(&configErrs)
|
||||||
checkNotZero := func(key string, value int64) {
|
|
||||||
if value == 0 {
|
|
||||||
problems = append(problems, fmt.Sprintf("missing config key %q", key))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkPositive := func(key string, value int64) {
|
|
||||||
if value < 0 {
|
|
||||||
problems = append(problems, fmt.Sprintf("invalid value for config key %q: %d", key, value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkValidDuration := func(key, value string) {
|
|
||||||
if _, err := time.ParseDuration(config.TURN.UserLifetime); err != nil {
|
|
||||||
problems = append(problems, fmt.Sprintf("invalid duration for config key %q: %s", key, value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkNotEmpty("matrix.server_name", string(config.Matrix.ServerName))
|
|
||||||
checkNotEmpty("matrix.private_key", string(config.Matrix.PrivateKeyPath))
|
|
||||||
checkNotZero("matrix.federation_certificates", int64(len(config.Matrix.FederationCertificatePaths)))
|
|
||||||
|
|
||||||
if config.TURN.UserLifetime != "" {
|
|
||||||
checkValidDuration("turn.turn_user_lifetime", config.TURN.UserLifetime)
|
|
||||||
}
|
|
||||||
|
|
||||||
checkNotEmpty("media.base_path", string(config.Media.BasePath))
|
|
||||||
checkPositive("media.max_file_size_bytes", int64(*config.Media.MaxFileSizeBytes))
|
|
||||||
checkPositive("media.max_thumbnail_generators", int64(config.Media.MaxThumbnailGenerators))
|
|
||||||
for i, size := range config.Media.ThumbnailSizes {
|
|
||||||
checkPositive(fmt.Sprintf("media.thumbnail_sizes[%d].width", i), int64(size.Width))
|
|
||||||
checkPositive(fmt.Sprintf("media.thumbnail_sizes[%d].height", i), int64(size.Height))
|
|
||||||
}
|
|
||||||
if config.Kafka.UseNaffka {
|
|
||||||
if !monolithic {
|
|
||||||
problems = append(problems, fmt.Sprintf("naffka can only be used in a monolithic server"))
|
|
||||||
}
|
|
||||||
|
|
||||||
checkNotEmpty("database.naffka", string(config.Database.Naffka))
|
|
||||||
} else {
|
|
||||||
// If we aren't using naffka then we need to have at least one kafka
|
|
||||||
// server to talk to.
|
|
||||||
checkNotZero("kafka.addresses", int64(len(config.Kafka.Addresses)))
|
|
||||||
}
|
|
||||||
checkNotEmpty("kafka.topics.output_room_event", string(config.Kafka.Topics.OutputRoomEvent))
|
|
||||||
checkNotEmpty("kafka.topics.output_client_data", string(config.Kafka.Topics.OutputClientData))
|
|
||||||
checkNotEmpty("kafka.topics.user_updates", string(config.Kafka.Topics.UserUpdates))
|
|
||||||
checkNotEmpty("database.account", string(config.Database.Account))
|
|
||||||
checkNotEmpty("database.device", string(config.Database.Device))
|
|
||||||
checkNotEmpty("database.server_key", string(config.Database.ServerKey))
|
|
||||||
checkNotEmpty("database.media_api", string(config.Database.MediaAPI))
|
|
||||||
checkNotEmpty("database.sync_api", string(config.Database.SyncAPI))
|
|
||||||
checkNotEmpty("database.room_server", string(config.Database.RoomServer))
|
|
||||||
|
|
||||||
if !monolithic {
|
if !monolithic {
|
||||||
checkNotEmpty("listen.media_api", string(config.Listen.MediaAPI))
|
config.checkListen(&configErrs)
|
||||||
checkNotEmpty("listen.client_api", string(config.Listen.ClientAPI))
|
|
||||||
checkNotEmpty("listen.federation_api", string(config.Listen.FederationAPI))
|
|
||||||
checkNotEmpty("listen.sync_api", string(config.Listen.SyncAPI))
|
|
||||||
checkNotEmpty("listen.room_server", string(config.Listen.RoomServer))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if problems != nil {
|
// Due to how Golang manages its interface types, this condition is not redundant.
|
||||||
return Error{problems}
|
// In order to get the proper behavior, it is necessary to return an explicit nil
|
||||||
|
// and not a nil configErrors.
|
||||||
|
// This is because the following equalities hold:
|
||||||
|
// error(nil) == nil
|
||||||
|
// error(configErrors(nil)) != nil
|
||||||
|
if configErrs != nil {
|
||||||
|
return configErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,6 +59,11 @@ listen:
|
||||||
federation_api: "localhost:7772"
|
federation_api: "localhost:7772"
|
||||||
sync_api: "localhost:7773"
|
sync_api: "localhost:7773"
|
||||||
media_api: "localhost:7774"
|
media_api: "localhost:7774"
|
||||||
|
logging:
|
||||||
|
- type: "file"
|
||||||
|
level: "info"
|
||||||
|
params:
|
||||||
|
path: "/my/log/dir"
|
||||||
`
|
`
|
||||||
|
|
||||||
type mockReadFile map[string]string
|
type mockReadFile map[string]string
|
||||||
|
|
|
@ -16,8 +16,10 @@ package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/matrix-org/dendrite/common/config"
|
||||||
"github.com/matrix-org/dugong"
|
"github.com/matrix-org/dugong"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -31,8 +33,29 @@ func (f utcFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||||
return f.Formatter.Format(entry)
|
return f.Formatter.Format(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupLogging configures the logging format and destination(s).
|
// Logrus hook which wraps another hook and filters log entries according to their level.
|
||||||
func SetupLogging(logDir string) {
|
// (Note that we cannot use solely logrus.SetLevel, because Dendrite supports multiple
|
||||||
|
// levels of logging at the same time.)
|
||||||
|
type logLevelHook struct {
|
||||||
|
level logrus.Level
|
||||||
|
logrus.Hook
|
||||||
|
}
|
||||||
|
|
||||||
|
// Levels returns all the levels supported by this hook.
|
||||||
|
func (h *logLevelHook) Levels() []logrus.Level {
|
||||||
|
levels := make([]logrus.Level, 0)
|
||||||
|
|
||||||
|
for _, level := range logrus.AllLevels {
|
||||||
|
if level <= h.level {
|
||||||
|
levels = append(levels, level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return levels
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupStdLogging configures the logging format to standard output. Typically, it is called when the config is not yet loaded.
|
||||||
|
func SetupStdLogging() {
|
||||||
logrus.SetFormatter(&utcFormatter{
|
logrus.SetFormatter(&utcFormatter{
|
||||||
&logrus.TextFormatter{
|
&logrus.TextFormatter{
|
||||||
TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
|
TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
|
||||||
|
@ -42,12 +65,61 @@ func SetupLogging(logDir string) {
|
||||||
DisableSorting: false,
|
DisableSorting: false,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if logDir != "" {
|
}
|
||||||
_ = os.Mkdir(logDir, os.ModePerm)
|
|
||||||
logrus.AddHook(dugong.NewFSHook(
|
// SetupHookLogging configures the logging hooks defined in the configuration.
|
||||||
filepath.Join(logDir, "info.log"),
|
// If something fails here it means that the logging was improperly configured,
|
||||||
filepath.Join(logDir, "warn.log"),
|
// so we just exit with the error
|
||||||
filepath.Join(logDir, "error.log"),
|
func SetupHookLogging(hooks []config.LogrusHook, componentName string) {
|
||||||
|
for _, hook := range hooks {
|
||||||
|
|
||||||
|
// Check we received a proper logging level
|
||||||
|
level, err := logrus.ParseLevel(hook.Level)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatalf("Unrecognized logging level %s: %q", hook.Level, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform a first filter on the logs according to the lowest level of all
|
||||||
|
// (Eg: If we have hook for info and above, prevent logrus from processing debug logs)
|
||||||
|
if logrus.GetLevel() < level {
|
||||||
|
logrus.SetLevel(level)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch hook.Type {
|
||||||
|
case "file":
|
||||||
|
checkFileHookParams(hook.Params)
|
||||||
|
setupFileHook(hook, level, componentName)
|
||||||
|
default:
|
||||||
|
logrus.Fatalf("Unrecognized logging hook type: %s", hook.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// File type hooks should be provided a path to a directory to store log files
|
||||||
|
func checkFileHookParams(params map[string]interface{}) {
|
||||||
|
path, ok := params["path"]
|
||||||
|
if !ok {
|
||||||
|
logrus.Fatalf("Expecting a parameter \"path\" for logging hook of type \"file\"")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := path.(string); !ok {
|
||||||
|
logrus.Fatalf("Parameter \"path\" for logging hook of type \"file\" should be a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a new FSHook to the logger. Each component will log in its own file
|
||||||
|
func setupFileHook(hook config.LogrusHook, level logrus.Level, componentName string) {
|
||||||
|
dirPath := (hook.Params["path"]).(string)
|
||||||
|
fullPath := filepath.Join(dirPath, componentName+".log")
|
||||||
|
|
||||||
|
if err := os.MkdirAll(path.Dir(fullPath), os.ModePerm); err != nil {
|
||||||
|
logrus.Fatalf("Couldn't create directory %s: %q", path.Dir(fullPath), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.AddHook(&logLevelHook{
|
||||||
|
level,
|
||||||
|
dugong.NewFSHook(
|
||||||
|
fullPath,
|
||||||
&utcFormatter{
|
&utcFormatter{
|
||||||
&logrus.TextFormatter{
|
&logrus.TextFormatter{
|
||||||
TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
|
TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
|
||||||
|
@ -57,6 +129,6 @@ func SetupLogging(logDir string) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&dugong.DailyRotationSchedule{GZip: true},
|
&dugong.DailyRotationSchedule{GZip: true},
|
||||||
))
|
),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,12 +3,13 @@ package dugong
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RotationScheduler determines when files should be rotated.
|
// RotationScheduler determines when files should be rotated.
|
||||||
|
@ -41,6 +42,9 @@ func dayOffset(t time.Time, offsetDays int) time.Time {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShouldRotate compares the current time with the rotation schedule.
|
||||||
|
// If the rotation should occur, returns (true, suffix) where suffix is the
|
||||||
|
// suffix for the rotated file. Else, returns (false, "")
|
||||||
func (rs *DailyRotationSchedule) ShouldRotate() (bool, string) {
|
func (rs *DailyRotationSchedule) ShouldRotate() (bool, string) {
|
||||||
now := currentTime()
|
now := currentTime()
|
||||||
if rs.rotateAfter == nil {
|
if rs.rotateAfter == nil {
|
||||||
|
@ -68,15 +72,13 @@ func (rs *DailyRotationSchedule) ShouldGZip() bool {
|
||||||
// contains the messages with that severity or higher. If a formatter is
|
// contains the messages with that severity or higher. If a formatter is
|
||||||
// not specified, they will be logged using a JSON formatter. If a
|
// not specified, they will be logged using a JSON formatter. If a
|
||||||
// RotationScheduler is set, the files will be cycled according to its rules.
|
// RotationScheduler is set, the files will be cycled according to its rules.
|
||||||
func NewFSHook(infoPath, warnPath, errorPath string, formatter log.Formatter, rotSched RotationScheduler) log.Hook {
|
func NewFSHook(path string, formatter log.Formatter, rotSched RotationScheduler) log.Hook {
|
||||||
if formatter == nil {
|
if formatter == nil {
|
||||||
formatter = &log.JSONFormatter{}
|
formatter = &log.JSONFormatter{}
|
||||||
}
|
}
|
||||||
hook := &fsHook{
|
hook := &fsHook{
|
||||||
entries: make(chan log.Entry, 1024),
|
entries: make(chan log.Entry, 1024),
|
||||||
infoPath: infoPath,
|
path: path,
|
||||||
warnPath: warnPath,
|
|
||||||
errorPath: errorPath,
|
|
||||||
formatter: formatter,
|
formatter: formatter,
|
||||||
scheduler: rotSched,
|
scheduler: rotSched,
|
||||||
}
|
}
|
||||||
|
@ -96,9 +98,7 @@ func NewFSHook(infoPath, warnPath, errorPath string, formatter log.Formatter, ro
|
||||||
type fsHook struct {
|
type fsHook struct {
|
||||||
entries chan log.Entry
|
entries chan log.Entry
|
||||||
queueSize int32
|
queueSize int32
|
||||||
infoPath string
|
path string
|
||||||
warnPath string
|
|
||||||
errorPath string
|
|
||||||
formatter log.Formatter
|
formatter log.Formatter
|
||||||
scheduler RotationScheduler
|
scheduler RotationScheduler
|
||||||
}
|
}
|
||||||
|
@ -123,23 +123,9 @@ func (hook *fsHook) writeEntry(entry *log.Entry) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.Level <= log.ErrorLevel {
|
if err := logToFile(hook.path, msg); err != nil {
|
||||||
if err := logToFile(hook.errorPath, msg); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if entry.Level <= log.WarnLevel {
|
|
||||||
if err := logToFile(hook.warnPath, msg); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry.Level <= log.InfoLevel {
|
|
||||||
if err := logToFile(hook.infoPath, msg); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -151,6 +137,7 @@ func (hook *fsHook) Levels() []log.Level {
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.WarnLevel,
|
log.WarnLevel,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
|
log.DebugLevel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,19 +148,16 @@ func (hook *fsHook) Levels() []log.Level {
|
||||||
// one which does the logging. Since we don't hold open a handle to the
|
// one which does the logging. Since we don't hold open a handle to the
|
||||||
// file when writing, a simple Rename is all that is required.
|
// file when writing, a simple Rename is all that is required.
|
||||||
func (hook *fsHook) rotate(suffix string, gzip bool) error {
|
func (hook *fsHook) rotate(suffix string, gzip bool) error {
|
||||||
for _, fpath := range []string{hook.errorPath, hook.warnPath, hook.infoPath} {
|
logFilePath := hook.path + suffix
|
||||||
logFilePath := fpath + suffix
|
if err := os.Rename(hook.path, logFilePath); err != nil {
|
||||||
if err := os.Rename(fpath, logFilePath); err != nil {
|
|
||||||
// e.g. because there were no errors in error.log for this day
|
// e.g. because there were no errors in error.log for this day
|
||||||
fmt.Fprintf(os.Stderr, "Error rotating file %s: %v\n", fpath, err)
|
fmt.Fprintf(os.Stderr, "Error rotating file %s: %v\n", hook.path, err)
|
||||||
continue // don't try to gzip if we failed to rotate
|
} else if gzip {
|
||||||
}
|
// Don't try to gzip if we failed to rotate
|
||||||
if gzip {
|
|
||||||
if err := gzipFile(logFilePath); err != nil {
|
if err := gzipFile(logFilePath); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Failed to gzip file %s: %v\n", logFilePath, err)
|
fmt.Fprintf(os.Stderr, "Failed to gzip file %s: %v\n", logFilePath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package dugong
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -12,6 +11,8 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -19,7 +20,7 @@ const (
|
||||||
fieldValue = "my_value"
|
fieldValue = "my_value"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFSHookInfo(t *testing.T) {
|
func TestFSHook(t *testing.T) {
|
||||||
logger, hook, wait, teardown := setupLogHook(t)
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
defer teardown()
|
defer teardown()
|
||||||
|
|
||||||
|
@ -27,32 +28,7 @@ func TestFSHookInfo(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
checkLogFile(t, hook.infoPath, "info")
|
checkLogFile(t, hook.path, "info")
|
||||||
}
|
|
||||||
|
|
||||||
func TestFSHookWarn(t *testing.T) {
|
|
||||||
logger, hook, wait, teardown := setupLogHook(t)
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
logger.WithField(fieldName, fieldValue).Warn("Warn message")
|
|
||||||
|
|
||||||
wait()
|
|
||||||
|
|
||||||
checkLogFile(t, hook.infoPath, "warning")
|
|
||||||
checkLogFile(t, hook.warnPath, "warning")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFSHookError(t *testing.T) {
|
|
||||||
logger, hook, wait, teardown := setupLogHook(t)
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
logger.WithField(fieldName, fieldValue).Error("Error message")
|
|
||||||
|
|
||||||
wait()
|
|
||||||
|
|
||||||
checkLogFile(t, hook.infoPath, "error")
|
|
||||||
checkLogFile(t, hook.warnPath, "error")
|
|
||||||
checkLogFile(t, hook.errorPath, "error")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFsHookInterleaved(t *testing.T) {
|
func TestFsHookInterleaved(t *testing.T) {
|
||||||
|
@ -67,7 +43,7 @@ func TestFsHookInterleaved(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
file, err := os.Open(hook.infoPath)
|
file, err := os.Open(hook.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to open file: %v", err)
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -101,7 +77,7 @@ func TestFSHookMultiple(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
file, err := os.Open(hook.infoPath)
|
file, err := os.Open(hook.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to open file: %v", err)
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -143,7 +119,7 @@ func TestFSHookConcurrent(t *testing.T) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
file, err := os.Open(hook.infoPath)
|
file, err := os.Open(hook.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to open file: %v", err)
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -176,7 +152,7 @@ func TestDailySchedule(t *testing.T) {
|
||||||
// Time ticks from 23:50 to 00:10 in 1 minute increments. Log each tick as 'counter'.
|
// Time ticks from 23:50 to 00:10 in 1 minute increments. Log each tick as 'counter'.
|
||||||
minutesGoneBy := 0
|
minutesGoneBy := 0
|
||||||
currentTime = func() time.Time {
|
currentTime = func() time.Time {
|
||||||
minutesGoneBy += 1
|
minutesGoneBy++
|
||||||
return time.Date(2016, 10, 26, 23, 50+minutesGoneBy, 00, 0, loc)
|
return time.Date(2016, 10, 26, 23, 50+minutesGoneBy, 00, 0, loc)
|
||||||
}
|
}
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
|
@ -186,11 +162,11 @@ func TestDailySchedule(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
// info.log.2016-10-26 should have 0 -> 9
|
// fshook.log.2016-10-26 should have 0 -> 9
|
||||||
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-26", 0, 9)
|
checkFileHasSequentialCounts(t, hook.path+".2016-10-26", 0, 9)
|
||||||
|
|
||||||
// info.log should have 10 -> 19 inclusive
|
// fshook.log should have 10 -> 19 inclusive
|
||||||
checkFileHasSequentialCounts(t, hook.infoPath, 10, 19)
|
checkFileHasSequentialCounts(t, hook.path, 10, 19)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDailyScheduleMultipleRotations(t *testing.T) {
|
func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
|
@ -210,7 +186,7 @@ func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
// Start from 10/29 01:37
|
// Start from 10/29 01:37
|
||||||
return time.Date(2016, 10, 28, 13+hoursGoneBy, 37, 00, 0, loc)
|
return time.Date(2016, 10, 28, 13+hoursGoneBy, 37, 00, 0, loc)
|
||||||
}
|
}
|
||||||
// log 2 lines per file, to 4 files (so 8 log lines)
|
// log 8 lines
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
ts := time.Date(2016, 10, 28, 13+((i+1)*12), 37, 00, 0, loc)
|
ts := time.Date(2016, 10, 28, 13+((i+1)*12), 37, 00, 0, loc)
|
||||||
logger.WithField("counter", i).Infof("The time is now %s", ts)
|
logger.WithField("counter", i).Infof("The time is now %s", ts)
|
||||||
|
@ -218,17 +194,17 @@ func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
// info.log.2016-10-29 should have 0-1
|
// fshook.log.2016-10-29 should have 0-1
|
||||||
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-29", 0, 1)
|
checkFileHasSequentialCounts(t, hook.path+".2016-10-29", 0, 1)
|
||||||
|
|
||||||
// info.log.2016-10-30 should have 2-3
|
// fshook.log.2016-10-30 should have 2-3
|
||||||
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-30", 2, 3)
|
checkFileHasSequentialCounts(t, hook.path+".2016-10-30", 2, 3)
|
||||||
|
|
||||||
// info.log.2016-10-31 should have 4-5
|
// fshook.log.2016-10-31 should have 4-5
|
||||||
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-31", 4, 5)
|
checkFileHasSequentialCounts(t, hook.path+".2016-10-31", 4, 5)
|
||||||
|
|
||||||
// info.log should have 6-7 (current day is 11/01)
|
// fshook.log should have 6-7 (current day is 11/01)
|
||||||
checkFileHasSequentialCounts(t, hook.infoPath, 6, 7)
|
checkFileHasSequentialCounts(t, hook.path, 6, 7)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkFileHasSequentialCounts based on a JSON "counter" key being a monotonically
|
// checkFileHasSequentialCounts based on a JSON "counter" key being a monotonically
|
||||||
|
@ -271,11 +247,9 @@ func setupLogHook(t *testing.T) (logger *log.Logger, hook *fsHook, wait func(),
|
||||||
t.Fatalf("Failed to make temporary directory: %v", err)
|
t.Fatalf("Failed to make temporary directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
infoPath := filepath.Join(dir, "info.log")
|
path := filepath.Join(dir, "fshook.log")
|
||||||
warnPath := filepath.Join(dir, "warn.log")
|
|
||||||
errorPath := filepath.Join(dir, "error.log")
|
|
||||||
|
|
||||||
hook = NewFSHook(infoPath, warnPath, errorPath, nil, nil).(*fsHook)
|
hook = NewFSHook(path, nil, nil).(*fsHook)
|
||||||
|
|
||||||
logger = log.New()
|
logger = log.New()
|
||||||
logger.Hooks.Add(hook)
|
logger.Hooks.Add(hook)
|
||||||
|
|
Loading…
Reference in a new issue