mirror of
https://github.com/hoernschen/dendrite.git
synced 2024-12-27 07:28:27 +00:00
Add dugong to support writing to disk/rotations
This commit is contained in:
parent
f1bb59d24a
commit
0bd3af8115
5 changed files with 741 additions and 0 deletions
6
vendor/manifest
vendored
6
vendor/manifest
vendored
|
@ -77,6 +77,12 @@
|
||||||
"revision": "a6657b2386e9b8be76484c08711b02c7cf867ead",
|
"revision": "a6657b2386e9b8be76484c08711b02c7cf867ead",
|
||||||
"branch": "master"
|
"branch": "master"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/matrix-org/dugong",
|
||||||
|
"repository": "https://github.com/matrix-org/dugong",
|
||||||
|
"revision": "193b8f88e381d12f2d53023fba25e43fc81dc5ac",
|
||||||
|
"branch": "master"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/matrix-org/gomatrixserverlib",
|
"importpath": "github.com/matrix-org/gomatrixserverlib",
|
||||||
"repository": "https://github.com/matrix-org/gomatrixserverlib",
|
"repository": "https://github.com/matrix-org/gomatrixserverlib",
|
||||||
|
|
201
vendor/src/github.com/matrix-org/dugong/LICENSE
vendored
Normal file
201
vendor/src/github.com/matrix-org/dugong/LICENSE
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
10
vendor/src/github.com/matrix-org/dugong/README.md
vendored
Normal file
10
vendor/src/github.com/matrix-org/dugong/README.md
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
# dugong
|
||||||
|
Logging utilities for [logrus](https://github.com/Sirupsen/logrus).
|
||||||
|
|
||||||
|
To develop on this library, you need logrus on your GOPATH:
|
||||||
|
|
||||||
|
``go get github.com/Sirupsen/logrus``
|
||||||
|
|
||||||
|
You can then run its tests by running
|
||||||
|
|
||||||
|
``go test``
|
210
vendor/src/github.com/matrix-org/dugong/fshook.go
vendored
Normal file
210
vendor/src/github.com/matrix-org/dugong/fshook.go
vendored
Normal file
|
@ -0,0 +1,210 @@
|
||||||
|
package dugong
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RotationScheduler determines when files should be rotated.
|
||||||
|
type RotationScheduler interface {
|
||||||
|
// ShouldRotate returns true if the file should be rotated. The suffix to apply
|
||||||
|
// to the filename is returned as the 2nd arg.
|
||||||
|
ShouldRotate() (bool, string)
|
||||||
|
// ShouldGZip returns true if the file should be gzipped when it is rotated.
|
||||||
|
ShouldGZip() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DailyRotationSchedule rotates log files daily. Logs are only rotated
|
||||||
|
// when midnight passes *whilst the process is running*. E.g: if you run
|
||||||
|
// the process on Day 4 then stop it and start it on Day 7, no rotation will
|
||||||
|
// occur when the process starts.
|
||||||
|
type DailyRotationSchedule struct {
|
||||||
|
GZip bool
|
||||||
|
rotateAfter *time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentTime = time.Now // exclusively for testing
|
||||||
|
|
||||||
|
func dayOffset(t time.Time, offsetDays int) time.Time {
|
||||||
|
// GoDoc:
|
||||||
|
// The month, day, hour, min, sec, and nsec values may be outside their
|
||||||
|
// usual ranges and will be normalized during the conversion.
|
||||||
|
// For example, October 32 converts to November 1.
|
||||||
|
return time.Date(
|
||||||
|
t.Year(), t.Month(), t.Day()+offsetDays, 0, 0, 0, 0, t.Location(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *DailyRotationSchedule) ShouldRotate() (bool, string) {
|
||||||
|
now := currentTime()
|
||||||
|
if rs.rotateAfter == nil {
|
||||||
|
nextRotate := dayOffset(now, 1)
|
||||||
|
rs.rotateAfter = &nextRotate
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
if now.After(*rs.rotateAfter) {
|
||||||
|
// the suffix should be actually the date of the complete day being logged
|
||||||
|
actualDay := dayOffset(*rs.rotateAfter, -1)
|
||||||
|
suffix := "." + actualDay.Format("2006-01-02") // YYYY-MM-DD
|
||||||
|
nextRotate := dayOffset(now, 1)
|
||||||
|
rs.rotateAfter = &nextRotate
|
||||||
|
return true, suffix
|
||||||
|
}
|
||||||
|
return false, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *DailyRotationSchedule) ShouldGZip() bool {
|
||||||
|
return rs.GZip
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFSHook makes a logging hook that writes formatted
|
||||||
|
// log entries to info, warn and error log files. Each log file
|
||||||
|
// contains the messages with that severity or higher. If a formatter is
|
||||||
|
// not specified, they will be logged using a JSON formatter. If a
|
||||||
|
// RotationScheduler is set, the files will be cycled according to its rules.
|
||||||
|
func NewFSHook(infoPath, warnPath, errorPath string, formatter log.Formatter, rotSched RotationScheduler) log.Hook {
|
||||||
|
if formatter == nil {
|
||||||
|
formatter = &log.JSONFormatter{}
|
||||||
|
}
|
||||||
|
hook := &fsHook{
|
||||||
|
entries: make(chan log.Entry, 1024),
|
||||||
|
infoPath: infoPath,
|
||||||
|
warnPath: warnPath,
|
||||||
|
errorPath: errorPath,
|
||||||
|
formatter: formatter,
|
||||||
|
scheduler: rotSched,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for entry := range hook.entries {
|
||||||
|
if err := hook.writeEntry(&entry); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error writing to logfile: %v\n", err)
|
||||||
|
}
|
||||||
|
atomic.AddInt32(&hook.queueSize, -1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return hook
|
||||||
|
}
|
||||||
|
|
||||||
|
type fsHook struct {
|
||||||
|
entries chan log.Entry
|
||||||
|
queueSize int32
|
||||||
|
infoPath string
|
||||||
|
warnPath string
|
||||||
|
errorPath string
|
||||||
|
formatter log.Formatter
|
||||||
|
scheduler RotationScheduler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *fsHook) Fire(entry *log.Entry) error {
|
||||||
|
atomic.AddInt32(&hook.queueSize, 1)
|
||||||
|
hook.entries <- *entry
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *fsHook) writeEntry(entry *log.Entry) error {
|
||||||
|
msg, err := hook.formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hook.scheduler != nil {
|
||||||
|
if should, suffix := hook.scheduler.ShouldRotate(); should {
|
||||||
|
if err := hook.rotate(suffix, hook.scheduler.ShouldGZip()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Level <= log.ErrorLevel {
|
||||||
|
if err := logToFile(hook.errorPath, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Level <= log.WarnLevel {
|
||||||
|
if err := logToFile(hook.warnPath, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Level <= log.InfoLevel {
|
||||||
|
if err := logToFile(hook.infoPath, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hook *fsHook) Levels() []log.Level {
|
||||||
|
return []log.Level{
|
||||||
|
log.PanicLevel,
|
||||||
|
log.FatalLevel,
|
||||||
|
log.ErrorLevel,
|
||||||
|
log.WarnLevel,
|
||||||
|
log.InfoLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotate all the log files to the given suffix.
|
||||||
|
// If error path is "err.log" and suffix is "1" then move
|
||||||
|
// the contents to "err.log1".
|
||||||
|
// This requires no locking as the goroutine calling this is the same
|
||||||
|
// one which does the logging. Since we don't hold open a handle to the
|
||||||
|
// file when writing, a simple Rename is all that is required.
|
||||||
|
func (hook *fsHook) rotate(suffix string, gzip bool) error {
|
||||||
|
for _, fpath := range []string{hook.errorPath, hook.warnPath, hook.infoPath} {
|
||||||
|
logFilePath := fpath + suffix
|
||||||
|
if err := os.Rename(fpath, logFilePath); err != nil {
|
||||||
|
// e.g. because there were no errors in error.log for this day
|
||||||
|
fmt.Fprintf(os.Stderr, "Error rotating file %s: %v\n", fpath, err)
|
||||||
|
continue // don't try to gzip if we failed to rotate
|
||||||
|
}
|
||||||
|
if gzip {
|
||||||
|
if err := gzipFile(logFilePath); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to gzip file %s: %v\n", logFilePath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func logToFile(path string, msg []byte) error {
|
||||||
|
fd, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
_, err = fd.Write(msg)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func gzipFile(fpath string) error {
|
||||||
|
reader, err := os.Open(fpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := filepath.Base(fpath)
|
||||||
|
target := filepath.Join(filepath.Dir(fpath), filename+".gz")
|
||||||
|
writer, err := os.Create(target)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer writer.Close()
|
||||||
|
|
||||||
|
archiver := gzip.NewWriter(writer)
|
||||||
|
archiver.Name = filename
|
||||||
|
defer archiver.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(archiver, reader)
|
||||||
|
return err
|
||||||
|
}
|
314
vendor/src/github.com/matrix-org/dugong/fshook_test.go
vendored
Normal file
314
vendor/src/github.com/matrix-org/dugong/fshook_test.go
vendored
Normal file
|
@ -0,0 +1,314 @@
|
||||||
|
package dugong
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
fieldName = "my_field"
|
||||||
|
fieldValue = "my_value"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFSHookInfo(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
logger.WithField(fieldName, fieldValue).Info("Info message")
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
checkLogFile(t, hook.infoPath, "info")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFSHookWarn(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
logger.WithField(fieldName, fieldValue).Warn("Warn message")
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
checkLogFile(t, hook.infoPath, "warning")
|
||||||
|
checkLogFile(t, hook.warnPath, "warning")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFSHookError(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
logger.WithField(fieldName, fieldValue).Error("Error message")
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
checkLogFile(t, hook.infoPath, "error")
|
||||||
|
checkLogFile(t, hook.warnPath, "error")
|
||||||
|
checkLogFile(t, hook.errorPath, "error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFsHookInterleaved(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
logger.WithField("counter", 0).Info("message")
|
||||||
|
logger.WithField("counter", 1).Warn("message")
|
||||||
|
logger.WithField("counter", 2).Error("message")
|
||||||
|
logger.WithField("counter", 3).Warn("message")
|
||||||
|
logger.WithField("counter", 4).Info("message")
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
file, err := os.Open(hook.infoPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
count := 0
|
||||||
|
for scanner.Scan() {
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
|
||||||
|
t.Fatalf("Failed to parse JSON: %v", err)
|
||||||
|
}
|
||||||
|
dataCounter := int(data["counter"].(float64))
|
||||||
|
if count != dataCounter {
|
||||||
|
t.Fatalf("Counter: want %d got %d", count, dataCounter)
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 5 {
|
||||||
|
t.Fatalf("Lines: want 5 got %d", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFSHookMultiple(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
logger.WithField("counter", i).Info("message")
|
||||||
|
}
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
file, err := os.Open(hook.infoPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
count := 0
|
||||||
|
for scanner.Scan() {
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
|
||||||
|
t.Fatalf("Failed to parse JSON: %v", err)
|
||||||
|
}
|
||||||
|
dataCounter := int(data["counter"].(float64))
|
||||||
|
if count != dataCounter {
|
||||||
|
t.Fatalf("Counter: want %d got %d", count, dataCounter)
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 100 {
|
||||||
|
t.Fatalf("Lines: want 100 got %d", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFSHookConcurrent(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func(counter int) {
|
||||||
|
defer wg.Done()
|
||||||
|
logger.WithField("counter", counter).Info("message")
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
wait()
|
||||||
|
|
||||||
|
file, err := os.Open(hook.infoPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
count := 0
|
||||||
|
for scanner.Scan() {
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
|
||||||
|
t.Fatalf("Failed to parse JSON: %v", err)
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 100 {
|
||||||
|
t.Fatalf("Lines: want 100 got %d", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDailySchedule(t *testing.T) {
|
||||||
|
loc, err := time.LoadLocation("UTC")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to load location UTC: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
hook.scheduler = &DailyRotationSchedule{}
|
||||||
|
|
||||||
|
// Time ticks from 23:50 to 00:10 in 1 minute increments. Log each tick as 'counter'.
|
||||||
|
minutesGoneBy := 0
|
||||||
|
currentTime = func() time.Time {
|
||||||
|
minutesGoneBy += 1
|
||||||
|
return time.Date(2016, 10, 26, 23, 50+minutesGoneBy, 00, 0, loc)
|
||||||
|
}
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
t := time.Date(2016, 10, 26, 23, 50+i, 00, 0, loc)
|
||||||
|
logger.WithField("counter", i).Info("BASE " + t.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
// info.log.2016-10-26 should have 0 -> 9
|
||||||
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-26", 0, 9)
|
||||||
|
|
||||||
|
// info.log should have 10 -> 19 inclusive
|
||||||
|
checkFileHasSequentialCounts(t, hook.infoPath, 10, 19)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
|
loc, err := time.LoadLocation("UTC")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to load location UTC: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
hook.scheduler = &DailyRotationSchedule{}
|
||||||
|
|
||||||
|
// Time ticks every 12 hours from 13:37 -> 01:37 -> 13:37 -> ...
|
||||||
|
hoursGoneBy := 0
|
||||||
|
currentTime = func() time.Time {
|
||||||
|
hoursGoneBy += 12
|
||||||
|
// Start from 10/29 01:37
|
||||||
|
return time.Date(2016, 10, 28, 13+hoursGoneBy, 37, 00, 0, loc)
|
||||||
|
}
|
||||||
|
// log 2 lines per file, to 4 files (so 8 log lines)
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
ts := time.Date(2016, 10, 28, 13+((i+1)*12), 37, 00, 0, loc)
|
||||||
|
logger.WithField("counter", i).Infof("The time is now %s", ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
// info.log.2016-10-29 should have 0-1
|
||||||
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-29", 0, 1)
|
||||||
|
|
||||||
|
// info.log.2016-10-30 should have 2-3
|
||||||
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-30", 2, 3)
|
||||||
|
|
||||||
|
// info.log.2016-10-31 should have 4-5
|
||||||
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-31", 4, 5)
|
||||||
|
|
||||||
|
// info.log should have 6-7 (current day is 11/01)
|
||||||
|
checkFileHasSequentialCounts(t, hook.infoPath, 6, 7)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkFileHasSequentialCounts based on a JSON "counter" key being a monotonically
|
||||||
|
// incrementing integer. from and to are both inclusive.
|
||||||
|
func checkFileHasSequentialCounts(t *testing.T, filepath string, from, to int) {
|
||||||
|
t.Logf("checkFileHasSequentialCounts(%s,%d,%d)", filepath, from, to)
|
||||||
|
|
||||||
|
file, err := os.Open(filepath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
count := from
|
||||||
|
for scanner.Scan() {
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
|
||||||
|
t.Fatalf("%s : Failed to parse JSON: %v", file.Name(), err)
|
||||||
|
}
|
||||||
|
dataCounter := int(data["counter"].(float64))
|
||||||
|
t.Logf("%s want %d got %d", file.Name(), count, dataCounter)
|
||||||
|
if count != dataCounter {
|
||||||
|
t.Fatalf("%s : Counter: want %d got %d", file.Name(), count, dataCounter)
|
||||||
|
}
|
||||||
|
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
count-- // never hit the next value
|
||||||
|
|
||||||
|
if count != to {
|
||||||
|
t.Fatalf("%s EOF: Want count %d got %d", file.Name(), to, count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupLogHook(t *testing.T) (logger *log.Logger, hook *fsHook, wait func(), teardown func()) {
|
||||||
|
dir, err := ioutil.TempDir("", "TestFSHook")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to make temporary directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
infoPath := filepath.Join(dir, "info.log")
|
||||||
|
warnPath := filepath.Join(dir, "warn.log")
|
||||||
|
errorPath := filepath.Join(dir, "error.log")
|
||||||
|
|
||||||
|
hook = NewFSHook(infoPath, warnPath, errorPath, nil, nil).(*fsHook)
|
||||||
|
|
||||||
|
logger = log.New()
|
||||||
|
logger.Hooks.Add(hook)
|
||||||
|
|
||||||
|
wait = func() {
|
||||||
|
for atomic.LoadInt32(&hook.queueSize) != 0 {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown = func() {
|
||||||
|
os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLogFile(t *testing.T, path, expectedLevel string) {
|
||||||
|
contents, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make(map[string]interface{})
|
||||||
|
if err := json.Unmarshal(contents, &data); err != nil {
|
||||||
|
t.Fatalf("Failed to parse JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if data["level"] != expectedLevel {
|
||||||
|
t.Fatalf("level: want %q got %q", expectedLevel, data["level"])
|
||||||
|
}
|
||||||
|
|
||||||
|
if data[fieldName] != fieldValue {
|
||||||
|
t.Fatalf("%s: want %q got %q", fieldName, fieldValue, data[fieldName])
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue