infra
/
goutils
Archived
1
0
Fork 0

logrotator 교체

This commit is contained in:
Sangbum Kim 2018-06-12 08:45:20 +09:00
parent b1f49e40e7
commit fba6751e05
175 changed files with 24446 additions and 53 deletions

92
Gopkg.lock generated Normal file
View File

@ -0,0 +1,92 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "github.com/NebulousLabs/fastrand"
packages = ["."]
revision = "3cf7173006a0b7d2371fa1a220da7f9d48c7827c"
[[projects]]
branch = "master"
name = "github.com/benburkert/dns"
packages = ["."]
revision = "737ce0376169e5410fb2a4ae066d5aabbb71648c"
[[projects]]
name = "github.com/lestrrat-go/file-rotatelogs"
packages = [
".",
"internal/option"
]
revision = "3b4f34a036f374c18d604e1b8006d9fbb9593c06"
version = "v2.2.0"
[[projects]]
branch = "master"
name = "github.com/lestrrat-go/strftime"
packages = ["."]
revision = "59966ecb6d84ec0010de6a5b8deae0299ce5b549"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "go.uber.org/atomic"
packages = ["."]
revision = "4e336646b2ef9fc6e47be8e21594178f98e5ebcf"
version = "v1.2.0"
[[projects]]
name = "go.uber.org/multierr"
packages = ["."]
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
version = "v1.1.0"
[[projects]]
name = "go.uber.org/zap"
packages = [
".",
"buffer",
"internal/bufferpool",
"internal/color",
"internal/exit",
"zapcore"
]
revision = "eeedf312bc6c57391d84767a4cd413f02a917974"
version = "v1.8.0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["blake2b"]
revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["cpu"]
revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb"
[[projects]]
name = "golang.org/x/text"
packages = [
"internal/gen",
"internal/triegen",
"internal/ucd",
"transform",
"unicode/cldr",
"width"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "6efd7212bdd402a83d3371c761b44b92ba0a2dfb54708f3fe576e5fd1ed5bade"
solver-name = "gps-cdcl"
solver-version = 1

50
Gopkg.toml Normal file
View File

@ -0,0 +1,50 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
branch = "master"
name = "github.com/NebulousLabs/fastrand"
[[constraint]]
branch = "master"
name = "github.com/benburkert/dns"
[[constraint]]
name = "go.uber.org/zap"
version = "1.8.0"
[[constraint]]
name = "golang.org/x/text"
version = "0.3.0"
[[constraint]]
name = "gopkg.in/natefinch/lumberjack.v2"
version = "2.1.0"
[prune]
go-tests = true
unused-packages = true

View File

@ -9,29 +9,25 @@ import (
var loggers RotateSyncerSet
func NewLogWriter(FileName string, MaxSizeMb, MaxBackup, MaxDay int, logDir string) logger.RotateSyncer {
func NewLogWriter(FileName string, logDir string, options ...Option) (logger.RotateSyncer, error) {
switch FileName {
case "Stdout":
return NewLocked(os.Stdout)
return NewLocked(os.Stdout), nil
case "Stderr":
return NewLocked(os.Stderr)
return NewLocked(os.Stderr), nil
default:
logpath := FileName
if logDir != "" {
logpath = path.Join(logDir, FileName)
}
log.Println(" Attention!! log writes to ", logpath)
logWriter := NewRotater(
logpath,
MaxSizeMb, // megabytes
MaxBackup,
MaxDay, //days
)
loggers.Store(logWriter)
logWriter.SetOnClose(func() { loggers.Delete(logWriter) })
return logWriter
if logWriter, err := NewRotater(logpath, options...); err != nil {
return nil, err
} else {
loggers.Store(logWriter)
logWriter.SetOnClose(func() { loggers.Delete(logWriter) })
return logWriter, nil
}
}
}

View File

@ -34,6 +34,6 @@ func (s *LockedWriteSyncer) Sync() error {
return err
}
func (r *LockedWriteSyncer) SetOnClose(closeFunc logger.CloseFunc) {}
func (r *LockedWriteSyncer) Rotate() (err error) { return }
func (r *LockedWriteSyncer) Close() (err error) { return }
func (r *LockedWriteSyncer) SetOnClose(closeFunc func()) {}
func (r *LockedWriteSyncer) Rotate() (err error) { return }
func (r *LockedWriteSyncer) Close() (err error) { return }

View File

@ -3,26 +3,25 @@ package rotater
import (
"sync"
"amuz.es/src/infra/goutils/logger"
lumberjack "gopkg.in/natefinch/lumberjack.v2"
"github.com/lestrrat-go/file-rotatelogs"
)
type Option = rotatelogs.Option
type rotateSyncer struct {
setOnceOnclose *sync.Once
onClose func()
lumberjack.Logger
*rotatelogs.RotateLogs
}
func NewRotater(filename string, maxSize, maxBackup, maxDay int) logger.RotateSyncer {
return &rotateSyncer{
setOnceOnclose: &sync.Once{},
Logger: lumberjack.Logger{
Filename: filename,
MaxSize: maxSize, // megabytes
MaxBackups: maxBackup,
MaxAge: maxDay, //days
LocalTime: false,
Compress: false,
},
func NewRotater(filename string, options ...Option) (logger.RotateSyncer, error) {
if rotateLogger, err := rotatelogs.New(filename, options...); err != nil {
return nil, err
} else {
return &rotateSyncer{
setOnceOnclose: &sync.Once{},
RotateLogs: rotateLogger,
}, nil
}
}
func (r *rotateSyncer) SetOnClose(closeFunc func()) {
@ -32,7 +31,7 @@ func (r *rotateSyncer) SetOnClose(closeFunc func()) {
}
func (r *rotateSyncer) Rotate() error {
return r.Logger.Rotate()
return r.RotateLogs.Rotate()
}
func (r *rotateSyncer) Close() error {
defer func() {
@ -40,7 +39,7 @@ func (r *rotateSyncer) Close() error {
r.onClose()
}
}()
return r.Logger.Close()
return r.RotateLogs.Close()
}
func (r *rotateSyncer) Sync() error {
@ -48,5 +47,5 @@ func (r *rotateSyncer) Sync() error {
}
func (s *rotateSyncer) Write(bs []byte) (int, error) {
return s.Logger.Write(bs)
return s.RotateLogs.Write(bs)
}

View File

@ -5,6 +5,7 @@ import (
"go.uber.org/zap/zapcore"
"amuz.es/src/infra/goutils/logger"
"amuz.es/src/infra/goutils/logger/rotater"
)
var (
@ -26,25 +27,27 @@ func Init(
verbose bool,
formatter zapcore.Encoder,
mainLogName, logFilename, logDir string,
maxSizeMb, maxBackup, maxDay int,
rotateOption []rotater.Option,
logLevel zapcore.Level,
additionalOptions ...zap.Option,
) *zap.SugaredLogger {
) (*zap.SugaredLogger, error) {
level := zap.NewAtomicLevelAt(logLevel)
defaultWriter = rotater.NewLogWriter(logFilename, maxSizeMb, maxBackup, maxDay, logDir)
if defaultWriter, err := rotater.NewLogWriter(logFilename, logDir, rotateOption...); err != nil {
return nil, err
} else {
defaultErrorOutputOptions = []zap.Option{zap.ErrorOutput(defaultWriter)}
options := defaultErrorOutputOptions
if verbose {
options = append(options, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.PanicLevel)))
}
// reset log option slice
options = append(options, additionalOptions...)
defaultErrorOutputOptions = []zap.Option{zap.ErrorOutput(defaultWriter)}
options := defaultErrorOutputOptions
if verbose {
options = append(options, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.PanicLevel)))
log := initLogger(defaultWriter, mainLogName, formatter, level, options...)
replaceGlobalLogger(log)
return log.Sugar(), nil
}
// reset log option slice
options = append(options, additionalOptions...)
log := initLogger(defaultWriter, mainLogName, formatter, level, options...)
replaceGlobalLogger(log)
return log.Sugar()
}
func New(parent *zap.SugaredLogger, moduleName string, options ...zap.Option) *zap.SugaredLogger {
@ -63,12 +66,15 @@ func New(parent *zap.SugaredLogger, moduleName string, options ...zap.Option) *z
func NewOtherLogger(
formatter zapcore.Encoder,
moduleName, logFilename, logDir string,
maxSizeMb, maxBackup, maxDay int,
rotateOption []rotater.Option,
logLevel zapcore.Level,
fields ...zapcore.Field,
) (logger *zap.SugaredLogger, closer func() error) {
) (logger *zap.SugaredLogger, closer func() error, err error) {
loglevel := zap.NewAtomicLevelAt(logLevel)
logWriter := rotater.NewLogWriter(logFilename, maxSizeMb, maxBackup, maxDay, logDir)
logWriter, err := rotater.NewLogWriter(logFilename, logDir, rotateOption...)
if err != nil {
return
}
core := zapcore.NewCore(formatter, logWriter, loglevel)
closer = logWriter.Close
logger = zap.New(core, defaultErrorOutputOptions...).
@ -79,13 +85,16 @@ func NewOtherLogger(
func NewOtherLoggerWithOption(
formatter zapcore.Encoder,
moduleName, logFilename, logDir string,
maxSizeMb, maxBackup, maxDay int,
rotateOption []rotater.Option,
logLevel zapcore.Level,
options []zap.Option,
fields ...zapcore.Field,
) (logger *zap.SugaredLogger, closer func() error) {
) (logger *zap.SugaredLogger, closer func() error, err error) {
loglevel := zap.NewAtomicLevelAt(logLevel)
logWriter := rotater.NewLogWriter(logFilename, maxSizeMb, maxBackup, maxDay, logDir)
logWriter, err := rotater.NewLogWriter(logFilename, logDir, rotateOption...)
if err != nil {
return
}
core := zapcore.NewCore(formatter, logWriter, loglevel)
closer = logWriter.Close
options = append(defaultErrorOutputOptions, options...)

21
vendor/github.com/NebulousLabs/fastrand/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Nebulous Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

55
vendor/github.com/NebulousLabs/fastrand/README.md generated vendored Normal file
View File

@ -0,0 +1,55 @@
fastrand
--------
[![GoDoc](https://godoc.org/github.com/NebulousLabs/fastrand?status.svg)](https://godoc.org/github.com/NebulousLabs/fastrand)
[![Go Report Card](http://goreportcard.com/badge/github.com/NebulousLabs/fastrand)](https://goreportcard.com/report/github.com/NebulousLabs/fastrand)
```
go get github.com/NebulousLabs/fastrand
```
`fastrand` implements a cryptographically secure pseudorandom number generator.
The generator is seeded using the system's default entropy source, and
thereafter produces random values via repeated hashing. As a result, `fastrand`
can generate randomness much faster than `crypto/rand`, and generation cannot
fail beyond a potential panic during `init()`.
`fastrand` also scales better than `crypto/rand` and `math/rand` when called in
parallel. In fact, `fastrand` can even outperform `math/rand` when using enough threads.
## Benchmarks ##
```
// 32 byte reads
BenchmarkRead32 10000000 175 ns/op 181.86 MB/s
BenchmarkReadCrypto32 500000 2733 ns/op 11.71 MB/s
// 512 kb reads
BenchmarkRead512kb 1000 1336217 ns/op 383.17 MB/s
BenchmarkReadCrypto512kb 50 33423693 ns/op 15.32 MB/s
// 32 byte reads using 4 threads
BenchmarkRead4Threads32 3000000 392 ns/op 326.46 MB/s
BenchmarkReadCrypto4Threads32 200000 7579 ns/op 16.89 MB/s
// 512 kb reads using 4 threads
BenchmarkRead4Threads512kb 1000 1899048 ns/op 1078.43 MB/s
BenchmarkReadCrypto4Threads512kb 20 97423380 ns/op 21.02 MB/s
```
## Security ##
`fastrand` uses an algorithm similar to Fortuna, which is the basis for the
`/dev/random` device in FreeBSD. However, although the techniques used by
`fastrand` are known to be secure, the specific implementation has not been
reviewed by a security professional. Use with caution.
The general strategy is to use `crypto/rand` at init to get 32 bytes of strong
entropy. From there, the entropy is concatenated to a counter and hashed
repeatedly, providing 64 bytes of random output each time the counter is
incremented. The counter is 16 bytes, which provides strong guarantees that a
cycle will not be seen throughout the lifetime of the program.
The `sync/atomic` package is used to ensure that multiple threads calling
`fastrand` concurrently are always guaranteed to end up with unique counters.

174
vendor/github.com/NebulousLabs/fastrand/fastrand.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
// Package fastrand implements a cryptographically secure pseudorandom number
// generator. The generator is seeded using the system's default entropy source,
// and thereafter produces random values via repeated hashing. As a result,
// fastrand can generate randomness much faster than crypto/rand, and generation
// cannot fail beyond a potential panic at init.
//
// The method used in this package is similar to the Fortuna algorithm, which is
// used in used in FreeBSD for /dev/urandom. This package uses techniques that
// are known to be secure, however the exact implementation has not been heavily
// reviewed by cryptographers.
package fastrand
import (
"crypto/rand"
"encoding/binary"
"io"
"math"
"math/big"
"strconv"
"sync/atomic"
"unsafe"
"golang.org/x/crypto/blake2b"
)
// A randReader produces random values via repeated hashing. The entropy field
// is the concatenation of an initial seed and a 128-bit counter. Each time
// the entropy is hashed, the counter is incremented.
type randReader struct {
counter uint64 // First 64 bits of the counter.
counterExtra uint64 // Second 64 bits of the counter.
entropy [32]byte
}
// Reader is a global, shared instance of a cryptographically strong pseudo-
// random generator. It uses blake2b as its hashing function. Reader is safe
// for concurrent use by multiple goroutines.
var Reader io.Reader
// init provides the initial entropy for the reader that will seed all numbers
// coming out of fastrand.
func init() {
r := &randReader{}
n, err := rand.Read(r.entropy[:])
if err != nil || n != len(r.entropy) {
panic("not enough entropy to fill fastrand reader at startup")
}
Reader = r
}
// Read fills b with random data. It always returns len(b), nil.
func (r *randReader) Read(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
// Grab a unique counter from the reader, while atomically updating the
// counter so that concurrent callers also end up with unique values.
counter := atomic.AddUint64(&r.counter, 1)
counterExtra := atomic.LoadUint64(&r.counterExtra)
// Increment counterExtra when counter is close to overflowing. We cannot
// wait until counter == math.MaxUint64 to increment counterExtra, because
// another goroutine could call Read, overflowing counter to 0 before the
// first goroutine increments counterExtra. The second goroutine would then
// be reusing the counter pair (0, 0). Instead, we increment at 1<<63 so
// that there is little risk of an overflow.
//
// There is still a potential overlap near 1<<63, though, because another
// goroutine could see counter == 1<<63+1 before the first goroutine
// increments counterExtra. The counter pair (1<<63+1, 1) would then be
// reused. To prevent this, we also increment at math.MaxUint64. This means
// that in order for an overlap to occur, 1<<63 goroutine would need to
// increment counter before the first goroutine increments counterExtra.
//
// This strategy means that many counters will be omitted, and that the
// total space cycle time is potentially as low as 2^126. This is fine
// however, as the security model merely mandates that no counter is ever
// used twice.
if counter == 1<<63 || counter == math.MaxUint64 {
atomic.AddUint64(&r.counterExtra, 1)
}
// Copy the counter and entropy into a separate slice, so that the result
// may be used in isolation of the other threads. The counter ensures that
// the result is unique to this thread.
seed := make([]byte, 64)
binary.LittleEndian.PutUint64(seed[0:8], counter)
binary.LittleEndian.PutUint64(seed[8:16], counterExtra)
// Leave 16 bytes for the inner counter.
copy(seed[32:], r.entropy[:])
// Set up an inner counter, that can be incremented to produce unique
// entropy within this thread.
n := 0
innerCounter := uint64(0)
innerCounterExtra := uint64(0)
for n < len(b) {
// Copy in the inner counter values.
binary.LittleEndian.PutUint64(seed[16:24], innerCounter)
binary.LittleEndian.PutUint64(seed[24:32], innerCounterExtra)
// Hash the seed to produce the next set of entropy.
result := blake2b.Sum512(seed)
n += copy(b[n:], result[:])
// Increment the inner counter. Because we are the only thread accessing
// the counter, we can wait until the first 64 bits have reached their
// maximum value before incrementing the next 64 bits.
innerCounter++
if innerCounter == math.MaxUint64 {
innerCounterExtra++
}
}
return n, nil
}
// Read is a helper function that calls Reader.Read on b. It always fills b
// completely.
func Read(b []byte) { Reader.Read(b) }
// Bytes is a helper function that returns n bytes of random data.
func Bytes(n int) []byte {
b := make([]byte, n)
Read(b)
return b
}
// Uint64n returns a uniform random uint64 in [0,n). It panics if n == 0.
func Uint64n(n uint64) uint64 {
if n == 0 {
panic("fastrand: argument to Uint64n is 0")
}
// To eliminate modulo bias, keep selecting at random until we fall within
// a range that is evenly divisible by n.
// NOTE: since n is at most math.MaxUint64, max is minimized when:
// n = math.MaxUint64/2 + 1 -> max = math.MaxUint64 - math.MaxUint64/2
// This gives an expected 2 tries before choosing a value < max.
max := math.MaxUint64 - math.MaxUint64%n
b := Bytes(8)
r := *(*uint64)(unsafe.Pointer(&b[0]))
for r >= max {
Read(b)
r = *(*uint64)(unsafe.Pointer(&b[0]))
}
return r % n
}
// Intn returns a uniform random int in [0,n). It panics if n <= 0.
func Intn(n int) int {
if n <= 0 {
panic("fastrand: argument to Intn is <= 0: " + strconv.Itoa(n))
}
// NOTE: since n is at most math.MaxUint64/2, max is minimized when:
// n = math.MaxUint64/4 + 1 -> max = math.MaxUint64 - math.MaxUint64/4
// This gives an expected 1.333 tries before choosing a value < max.
return int(Uint64n(uint64(n)))
}
// BigIntn returns a uniform random *big.Int in [0,n). It panics if n <= 0.
func BigIntn(n *big.Int) *big.Int {
i, _ := rand.Int(Reader, n)
return i
}
// Perm returns a random permutation of the integers [0,n).
func Perm(n int) []int {
m := make([]int, n)
for i := 1; i < n; i++ {
j := Intn(i + 1)
m[i] = m[j]
m[j] = i
}
return m
}

6
vendor/github.com/benburkert/dns/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,6 @@
language: go
sudo: false
go:
- 1.9
- tip
script: "go test -v -race ./..."

21
vendor/github.com/benburkert/dns/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Ben Burkert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

3
vendor/github.com/benburkert/dns/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# dns [![GoDoc](https://godoc.org/github.com/benburkert/dns?status.svg)](https://godoc.org/github.com/benburkert/dns) [![Build Status](https://travis-ci.org/benburkert/dns.svg)](https://travis-ci.org/benburkert/dns) [![Go Report Card](https://goreportcard.com/badge/github.com/benburkert/dns)](https://goreportcard.com/report/github.com/benburkert/dns)
DNS client and server package. [See godoc for details & examples.](https://godoc.org/github.com/benburkert/dns)

155
vendor/github.com/benburkert/dns/cache.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
package dns
import (
"context"
"math/rand"
"sync"
"time"
)
// Cache is a DNS query cache handler.
type Cache struct {
mu sync.RWMutex
cache map[Question]*Message
}
// ServeDNS answers query questions from a local cache, and forwards unanswered
// questions upstream, then caches the answers from the response.
func (c *Cache) ServeDNS(ctx context.Context, w MessageWriter, r *Query) {
var (
miss bool
now = time.Now()
)
c.mu.RLock()
for _, q := range r.Questions {
if hit := c.lookup(q, w, now); !hit {
miss = true
}
}
c.mu.RUnlock()
if !miss {
return
}
msg, err := w.Recur(ctx)
if err != nil || msg == nil {
w.Status(ServFail)
return
}
if msg.RCode == NoError {
c.insert(msg, now)
}
writeMessage(w, msg)
}
// c.mu.RLock held
func (c *Cache) lookup(q Question, w MessageWriter, now time.Time) bool {
msg, ok := c.cache[q]
if !ok {
return false
}
var answers, authorities, additionals []Resource
for _, res := range msg.Answers {
if res.TTL = cacheTTL(res.TTL, now); res.TTL <= 0 {
return false
}
answers = append(answers, res)
}
for _, res := range msg.Authorities {
if res.TTL = cacheTTL(res.TTL, now); res.TTL <= 0 {
return false
}
authorities = append(authorities, res)
}
for _, res := range msg.Additionals {
if res.TTL = cacheTTL(res.TTL, now); res.TTL <= 0 {
return false
}
additionals = append(additionals, res)
}
randomize(answers)
for _, res := range answers {
w.Answer(res.Name, res.TTL, res.Record)
}
for _, res := range authorities {
w.Authority(res.Name, res.TTL, res.Record)
}
for _, res := range additionals {
w.Additional(res.Name, res.TTL, res.Record)
}
return true
}
func (c *Cache) insert(msg *Message, now time.Time) {
cache := make(map[Question]*Message, len(msg.Questions))
for _, q := range msg.Questions {
m := new(Message)
for _, res := range msg.Answers {
res.TTL = cacheEpoch(res.TTL, now)
m.Answers = append(m.Answers, res)
}
for _, res := range msg.Authorities {
res.TTL = cacheEpoch(res.TTL, now)
m.Authorities = append(m.Authorities, res)
}
for _, res := range msg.Additionals {
res.TTL = cacheEpoch(res.TTL, now)
m.Additionals = append(m.Additionals, res)
}
cache[q] = m
}
c.mu.Lock()
defer c.mu.Unlock()
if c.cache == nil {
c.cache = cache
return
}
for q, m := range cache {
c.cache[q] = m
}
}
func cacheEpoch(ttl time.Duration, now time.Time) time.Duration {
return time.Duration(now.Add(ttl).UnixNano())
}
func cacheTTL(epoch time.Duration, now time.Time) time.Duration {
return time.Unix(0, int64(epoch)).Sub(now)
}
// randomize shuffles contigous groups of resourcesfor the same name.
func randomize(s []Resource) {
var low, high int
for low = 0; low < len(s)-1; low++ {
for high = low + 1; high < len(s) && s[low].Name == s[high].Name; high++ {
}
shuffle(s[low:high])
low = high
}
}
func shuffle(s []Resource) {
if len(s) < 2 {
return
}
for i := len(s) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
s[i], s[j] = s[j], s[i]
}
}

219
vendor/github.com/benburkert/dns/client.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
package dns
import (
"context"
"net"
"sync/atomic"
)
// Client is a DNS client.
type Client struct {
// Transport manages connections to DNS servers.
Transport AddrDialer
// Resolver is a handler that may answer all or portions of a query.
// Any questions answered by the handler are not sent to the upstream
// server.
Resolver Handler
id uint32
}
// Dial dials a DNS server and returns a net Conn that reads and writes DNS
// messages.
func (c *Client) Dial(ctx context.Context, network, address string) (net.Conn, error) {
switch network {
case "tcp", "tcp4", "tcp6":
addr, err := net.ResolveTCPAddr(network, address)
if err != nil {
return nil, err
}
conn, err := c.dial(ctx, addr)
if err != nil {
return nil, err
}
return &streamSession{
session: session{
Conn: conn,
addr: addr,
client: c,
msgerrc: make(chan msgerr),
},
}, nil
case "udp", "udp4", "udp6":
addr, err := net.ResolveUDPAddr(network, address)
if err != nil {
return nil, err
}
conn, err := c.dial(ctx, addr)
if err != nil {
return nil, err
}
return &packetSession{
session: session{
Conn: conn,
addr: addr,
client: c,
msgerrc: make(chan msgerr),
},
}, nil
default:
return nil, ErrUnsupportedNetwork
}
}
// Do sends a DNS query to a server and returns the response message.
func (c *Client) Do(ctx context.Context, query *Query) (*Message, error) {
conn, err := c.dial(ctx, query.RemoteAddr)
if err != nil {
return nil, err
}
if t, ok := ctx.Deadline(); ok {
if err := conn.SetDeadline(t); err != nil {
return nil, err
}
}
return c.do(ctx, conn, query)
}
func (c *Client) dial(ctx context.Context, addr net.Addr) (Conn, error) {
tport := c.Transport
if tport == nil {
tport = new(Transport)
}
return tport.DialAddr(ctx, addr)
}
func (c *Client) do(ctx context.Context, conn Conn, query *Query) (*Message, error) {
if c.Resolver == nil {
return c.roundtrip(conn, query)
}
w := &clientWriter{
messageWriter: &messageWriter{
msg: response(query.Message),
},
req: request(query.Message),
addr: query.RemoteAddr,
conn: conn,
roundtrip: c.roundtrip,
}
c.Resolver.ServeDNS(ctx, w, query)
if w.err != nil {
return nil, w.err
}
return response(w.msg), nil
}
func (c *Client) roundtrip(conn Conn, query *Query) (*Message, error) {
id := query.ID
msg := *query.Message
msg.ID = c.nextID()
if err := conn.Send(&msg); err != nil {
return nil, err
}
if err := conn.Recv(&msg); err != nil {
return nil, err
}
msg.ID = id
return &msg, nil
}
const idMask = (1 << 16) - 1
func (c *Client) nextID() int {
return int(atomic.AddUint32(&c.id, 1) & idMask)
}
type clientWriter struct {
*messageWriter
req *Message
err error
addr net.Addr
conn Conn
roundtrip func(Conn, *Query) (*Message, error)
}
func (w *clientWriter) Recur(context.Context) (*Message, error) {
qs := make([]Question, 0, len(w.req.Questions))
for _, q := range w.req.Questions {
if !questionMatched(q, w.msg) {
qs = append(qs, q)
}
}
w.req.Questions = qs
req := &Query{
Message: w.req,
RemoteAddr: w.addr,
}
msg, err := w.roundtrip(w.conn, req)
if err != nil {
w.err = err
}
return msg, err
}
func (w *clientWriter) Reply(context.Context) error {
return ErrUnsupportedOp
}
func request(msg *Message) *Message {
req := new(Message)
*req = *msg // shallow copy
return req
}
func questionMatched(q Question, msg *Message) bool {
mrs := [3][]Resource{
msg.Answers,
msg.Authorities,
msg.Additionals,
}
for _, rs := range mrs {
for _, res := range rs {
if res.Name == q.Name {
return true
}
}
}
return false
}
func writeMessage(w MessageWriter, msg *Message) {
w.Status(msg.RCode)
w.Authoritative(msg.Authoritative)
w.Recursion(msg.RecursionAvailable)
for _, res := range msg.Answers {
w.Answer(res.Name, res.TTL, res.Record)
}
for _, res := range msg.Authorities {
w.Authority(res.Name, res.TTL, res.Record)
}
for _, res := range msg.Additionals {
w.Additional(res.Name, res.TTL, res.Record)
}
}

177
vendor/github.com/benburkert/dns/compression.go generated vendored Normal file
View File

@ -0,0 +1,177 @@
package dns
import (
"strings"
)
// Compressor encodes domain names.
type Compressor interface {
Length(...string) int
Pack([]byte, string) ([]byte, error)
}
// Decompressor decodes domain names.
type Decompressor interface {
Unpack([]byte) (string, []byte, error)
}
type compressor struct {
tbl map[string]int
offset int
}
func (c compressor) Length(names ...string) int {
var visited map[string]struct{}
if c.tbl != nil {
visited = make(map[string]struct{})
}
var n int
for _, name := range names {
n += c.length(name, visited)
}
return n
}
func (c compressor) length(name string, visited map[string]struct{}) int {
if name == "." || name == "" {
return 1
}
if c.tbl != nil {
if _, ok := c.tbl[name]; ok {
return 2
}
if _, ok := visited[name]; ok {
return 2
}
visited[name] = struct{}{}
}
pvt := strings.IndexByte(name, '.')
return pvt + 1 + c.length(name[pvt+1:], visited)
}
func (c compressor) Pack(b []byte, fqdn string) ([]byte, error) {
if fqdn == "." || fqdn == "" {
return append(b, 0x00), nil
}
if c.tbl != nil {
if idx, ok := c.tbl[fqdn]; ok {
ptr, err := pointerTo(idx)
if err != nil {
return nil, err
}
return append(b, ptr...), nil
}
}
pvt := strings.IndexByte(fqdn, '.')
if pvt == 0 {
return nil, errZeroSegLen
}
if pvt > 63 {
return nil, errSegTooLong
}
if c.tbl != nil {
idx := len(b) - c.offset
if int(uint16(idx)) != idx {
return nil, errInvalidPtr
}
c.tbl[fqdn] = idx
}
b = append(b, byte(pvt))
b = append(b, fqdn[:pvt]...)
return c.Pack(b, fqdn[pvt+1:])
}
type decompressor []byte
func (d decompressor) Unpack(b []byte) (string, []byte, error) {
name, b, err := d.unpack(make([]byte, 0, 32), b, nil)
if err != nil {
return "", nil, err
}
return string(name), b, nil
}
func (d decompressor) unpack(name, b []byte, visited []int) ([]byte, []byte, error) {
lenb := len(b)
if lenb == 0 {
return nil, nil, errBaseLen
}
if b[0] == 0x00 {
if len(name) == 0 {
return append(name, '.'), b[1:], nil
}
return name, b[1:], nil
}
if lenb < 2 {
return nil, nil, errBaseLen
}
if isPointer(b[0]) {
if d == nil {
return nil, nil, errBaseLen
}
ptr := nbo.Uint16(b[:2])
name, err := d.deref(name, ptr, visited)
if err != nil {
return nil, nil, err
}
return name, b[2:], nil
}
lenl, b := int(b[0]), b[1:]
if len(b) < lenl {
return nil, nil, errCalcLen
}
name = append(name, b[:lenl]...)
name = append(name, '.')
return d.unpack(name, b[lenl:], visited)
}
func (d decompressor) deref(name []byte, ptr uint16, visited []int) ([]byte, error) {
idx := int(ptr & 0x3FFF)
if len(d) < idx {
return nil, errInvalidPtr
}
if isPointer(d[idx]) {
return nil, errInvalidPtr
}
for _, v := range visited {
if idx == v {
return nil, errPtrCycle
}
}
name, _, err := d.unpack(name, d[idx:], append(visited, idx))
return name, err
}
func isPointer(b byte) bool { return b&0xC0 > 0 }
func pointerTo(idx int) ([]byte, error) {
ptr := uint16(idx)
if int(ptr) != idx {
return nil, errInvalidPtr
}
ptr |= 0xC000
buf := [2]byte{}
nbo.PutUint16(buf[:], ptr)
return buf[:], nil
}

113
vendor/github.com/benburkert/dns/conn.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
package dns
import (
"io"
"net"
)
// Conn is a network connection to a DNS resolver.
type Conn interface {
net.Conn
// Recv reads a DNS message from the connection.
Recv(msg *Message) error
// Send writes a DNS message to the connection.
Send(msg *Message) error
}
// PacketConn is a packet-oriented network connection to a DNS resolver that
// expects transmitted messages to adhere to RFC 1035 Section 4.2.1. "UDP
// usage".
type PacketConn struct {
net.Conn
rbuf, wbuf []byte
}
// Recv reads a DNS message from the underlying connection.
func (c *PacketConn) Recv(msg *Message) error {
if len(c.rbuf) != maxPacketLen {
c.rbuf = make([]byte, maxPacketLen)
}
n, err := c.Read(c.rbuf)
if err != nil {
return err
}
_, err = msg.Unpack(c.rbuf[:n])
return err
}
// Send writes a DNS message to the underlying connection.
func (c *PacketConn) Send(msg *Message) error {
if len(c.wbuf) != maxPacketLen {
c.wbuf = make([]byte, maxPacketLen)
}
var err error
if c.wbuf, err = msg.Pack(c.wbuf[:0], true); err != nil {
return err
}
if len(c.wbuf) > maxPacketLen {
return ErrOversizedMessage
}
_, err = c.Write(c.wbuf)
return err
}
// StreamConn is a stream-oriented network connection to a DNS resolver that
// expects transmitted messages to adhere to RFC 1035 Section 4.2.2. "TCP
// usage".
type StreamConn struct {
net.Conn
rbuf, wbuf []byte
}
// Recv reads a DNS message from the underlying connection.
func (c *StreamConn) Recv(msg *Message) error {
if len(c.rbuf) < 2 {
c.rbuf = make([]byte, 1280)
}
if _, err := io.ReadFull(c, c.rbuf[:2]); err != nil {
return err
}
mlen := nbo.Uint16(c.rbuf[:2])
if len(c.rbuf) < int(mlen) {
c.rbuf = make([]byte, mlen)
}
if _, err := io.ReadFull(c, c.rbuf[:mlen]); err != nil {
return err
}
_, err := msg.Unpack(c.rbuf[:mlen])
return err
}
// Send writes a DNS message to the underlying connection.
func (c *StreamConn) Send(msg *Message) error {
if len(c.wbuf) < 2 {
c.wbuf = make([]byte, 1024)
}
b, err := msg.Pack(c.wbuf[2:2], true)
if err != nil {
return err
}
mlen := uint16(len(b))
if int(mlen) != len(b) {
return ErrOversizedMessage
}
nbo.PutUint16(c.wbuf[:2], mlen)
_, err = c.Write(c.wbuf[:len(b)+2])
return err
}

60
vendor/github.com/benburkert/dns/dns.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package dns
import (
"context"
"errors"
"net"
)
var (
// ErrConflictingID is a pipelining error due to the same message ID being
// used for more than one inflight query.
ErrConflictingID = errors.New("conflicting message id")
// ErrOversizedMessage is an error returned when attempting to send a
// message that is longer than the maximum allowed number of bytes.
ErrOversizedMessage = errors.New("oversized message")
// ErrTruncatedMessage indicates the response message has been truncated.
ErrTruncatedMessage = errors.New("truncated message")
// ErrUnsupportedNetwork is returned when DialAddr is called with an
// unknown network.
ErrUnsupportedNetwork = errors.New("unsupported network")
// ErrUnsupportedOp indicates the operation is not supported by callee.
ErrUnsupportedOp = errors.New("unsupported operation")
)
// AddrDialer dials a net Addr.
type AddrDialer interface {
DialAddr(context.Context, net.Addr) (Conn, error)
}
// Query is a DNS request message bound for a DNS resolver.
type Query struct {
*Message
// RemoteAddr is the address of a DNS resolver.
RemoteAddr net.Addr
}
// OverTLSAddr indicates the remote DNS service implements DNS-over-TLS as
// defined in RFC 7858.
type OverTLSAddr struct {
net.Addr
}
// Network returns the address's network name with a "-tls" suffix.
func (a OverTLSAddr) Network() string {
return a.Addr.Network() + "-tls"
}
// ProxyFunc modifies the address of a DNS server.
type ProxyFunc func(context.Context, net.Addr) (net.Addr, error)
// RoundTripper is an interface representing the ability to execute a single
// DNS transaction, obtaining a response Message for a given Query.
type RoundTripper interface {
Do(context.Context, *Query) (*Message, error)
}

76
vendor/github.com/benburkert/dns/doc.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
/*
Package dns provides DNS client and server implementations.
A client can handle queries for a net.Dialer:
dialer := &net.Dialer{
Resolver: &net.Resolver{
PreferGo: true,
Dial: new(dns.Client).Dial,
},
}
conn, err := dialer.DialContext(ctx, "tcp", "example.com:80")
It can also query a remote DNS server directly:
client := new(dns.Client)
query := &dns.Query{
RemoteAddr: &net.TCPAddr{IP: net.IPv4(8, 8, 8, 8), Port: 53},
Message: &dns.Message{
Questions: []dns.Question{
{
Name: "example.com.",
Type: dns.TypeA,
Class: dns.ClassIN,
},
{
Name: "example.com.",
Type: dns.TypeAAAA,
Class: dns.ClassIN,
},
},
},
}
msg, err := client.Do(ctx, query)
A handler answers queries for a server or a local resolver for a client:
zone := &dns.Zone{
Origin: "localhost.",
TTL: 5 * time.Minute,
RRs: dns.RRSet{
"alpha": []dns.Record{
&dns.A{net.IPv4(127, 0, 0, 42).To4()},
&dns.AAAA{net.ParseIP("::42")},
},
},
}
srv := &dns.Server{
Addr: ":53",
Handler: zone,
}
go srv.ListenAndServe(ctx)
mux := new(dns.ResolveMux)
mux.Handle(dns.TypeANY, zone.Origin, zone)
client := &dns.Client{
Resolver: mux,
}
net.DefaultResolver = &net.Resolver{
PreferGo: true,
Dial: client.Dial,
}
addrs, err := net.LookupHost("alpha.localhost")
*/
package dns

247
vendor/github.com/benburkert/dns/handler.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
package dns
import (
"context"
"strings"
)
// Handler responds to a DNS query.
//
// ServeDNS should build the reply message using the MessageWriter, and may
// optionally call the Reply method. Returning signals that the request is
// finished and the response is ready to send.
//
// A recursive handler may call the Recur method of the MessageWriter to send
// an query upstream. Only unanswered questions are included in the upstream
// query.
type Handler interface {
ServeDNS(context.Context, MessageWriter, *Query)
}
// The HandlerFunc type is an adapter to allow the use of ordinary functions as
// DNS handlers. If f is a function with the appropriate signature,
// HandlerFunc(f) is a Handler that calls f.
type HandlerFunc func(context.Context, MessageWriter, *Query)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(ctx context.Context, w MessageWriter, r *Query) {
f(ctx, w, r)
}
// Recursor forwards a query and copies the response.
func Recursor(ctx context.Context, w MessageWriter, r *Query) {
msg, err := w.Recur(ctx)
if err != nil {
w.Status(ServFail)
return
}
writeMessage(w, msg)
}
// Refuse responds to all queries with a "Query Refused" message.
func Refuse(ctx context.Context, w MessageWriter, r *Query) {
w.Status(Refused)
}
// ResolveMux is a DNS query multiplexer. It matches a question type and name
// suffix to a Handler.
type ResolveMux struct {
tbl []muxEntry
}