1
0
Fork 0

initial commit

This commit is contained in:
Sangbum Kim 2022-03-18 09:21:04 +09:00
commit 11446782cd
16 changed files with 1311 additions and 0 deletions

366
.gitignore vendored Executable file
View File

@ -0,0 +1,366 @@
# Created by https://www.gitignore.io/api/intellij,go,linux,osx,windows,node,python,executable,jetbrains+all,visualstudiocode,compressedarchive,git
# Edit at https://www.gitignore.io/?templates=intellij,go,linux,osx,windows,node,python,executable,jetbrains+all,visualstudiocode,compressedarchive,git
### CompressedArchive ###
### Mostly from https://en.wikipedia.org/wiki/List_of_archive_formats
## Archiving and compression
# Open source file format. Used by 7-Zip.
*.7z
# Mac OS X, restoration on different platforms is possible although not immediate Yes Based on 7z. Preserves Spotlight metadata, resource forks, owner/group information, dates and other data which would be otherwise lost with compression.
*.s7z
# Old archive versions only Proprietary format
*.ace
# A format that compresses and doubly encrypt the data (AES256 and CAS256) avoiding brute force attacks, also hide files in an AFA file. It has two ways to safeguard data integrity and subsequent repair of the file if has an error (repair with AstroA2P (online) or Astrotite (offline)).
*.afa
# A mainly Korean format designed for very large archives.
*.alz
# Android application package (variant of JAR file format).
*.apk
# ??
*.arc
# Originally DOS, now multiple
*.arj
# Open archive format, used by B1 Free Archiver (http://dev.b1.org/standard/archive-format.html)
*.b1
# Binary Archive with external header
*.ba
# Proprietary format from the ZipTV Compression Components
*.bh
# The Microsoft Windows native archive format, which is also used by many commercial installers such as InstallShield and WISE.
*.cab
# Originally DOS, now DOS and Windows Created by Yaakov Gringeler; released last in 2003 (Compressia 1.0.0.1 beta), now apparently defunct. Free trial of 30 days lets user create and extract archives; after that it is possible to extract, but not to create.
*.car
# Open source file format.
*.cfs
# Compact Pro archive, a common archiver used on Mac platforms until about Mac OS 7.5.x. Competed with StuffIt; now obsolete.
*.cpt
# Windows, Unix-like, Mac OS X Open source file format. Files are compressed individually with either gzip, bzip2 or lzo.
*.dar
# DiskDoubler Mac OS obsolete
*.dd
# ??
*.dgc
# Apple Disk Image upports "Internet-enabled" disk images, which, once downloaded, are automatically decompressed, mounted, have the contents extracted, and thrown away. Currently, Safari is the only browser that supports this form of extraction; however, the images can be manually extracted as well. This format can also be password-protected or encrypted with 128-bit or 256-bit AES encryption.
*.dmg
# Enterprise Java Archive archive
*.ear
# ETSoft compressed archive
*.egg
# The predecessor of DGCA.
*.gca
# Originally DOS Yes, but may be covered by patents DOS era format; uses arithmetic/Markov coding
*.ha
# MS Windows HKI
*.hki
# Produced by ICEOWS program. Excels at text file compression.
*.ice
# Java archive, compatible with ZIP files
*.jar
# Open sourced archiver with compression using the PAQ family of algorithms and optional encryption.
*.kgb
# Originally DOS, now multiple Multiple Yes The standard format on Amiga.
*.lzh
*.lha
# Archiver originally used on The Amiga. Now copied by Microsoft to use in their .cab and .chm files.
*.lzx
# file format from NoGate Consultings, a rival from ARC-Compressor.
*.pak
# A disk image archive format that supports several compression methods as well as splitting the archive into smaller pieces.
*.partimg
# An experimental open source packager (http://mattmahoney.net/dc)
*.paq*
# Open source archiver supporting authenticated encryption, volume spanning, customizable object level and volume level integrity checks (form CRCs to SHA-512 and Whirlpool hashes), fast deflate based compression
*.pea
# The format from the PIM - a freeware compression tool by Ilia Muraviev. It uses an LZP-based compression algorithm with set of filters for executable, image and audio files.
*.pim
# PackIt Mac OS obsolete
*.pit
# Used for data in games written using the Quadruple D library for Delphi. Uses byte pair compression.
*.qda
# A proprietary archive format, second in popularity to .zip files.
*.rar
# The format from a commercial archiving package. Odd among commercial packages in that they focus on incorporating experimental algorithms with the highest possible compression (at the expense of speed and memory), such as PAQ, PPMD and PPMZ (PPMD with unlimited-length strings), as well as a proprietary algorithms.
*.rk
# Self Dissolving ARChive Commodore 64, Commodore 128 Commodore 64, Commodore 128 Yes SDAs refer to Self Dissolving ARC files, and are based on the Commodore 64 and Commodore 128 versions of ARC, originally written by Chris Smeets. While the files share the same extension, they are not compatible between platforms. That is, an SDA created on a Commodore 64 but run on a Commodore 128 in Commodore 128 mode will crash the machine, and vice versa. The intended successor to SDA is SFX.
*.sda
# A pre-Mac OS X Self-Extracting Archive format. StuffIt, Compact Pro, Disk Doubler and others could create .sea files, though the StuffIt versions were the most common.
*.sea
# Scifer Archive with internal header
*.sen
# Commodore 64, Commodore 128 SFX is a Self Extracting Archive which uses the LHArc compression algorithm. It was originally developed by Chris Smeets on the Commodore platform, and runs primarily using the CS-DOS extension for the Commodore 128. Unlike its predecessor SDA, SFX files will run on both the Commodore 64 and Commodore 128 regardless of which machine they were created on.
*.sfx
# An archive format designed for the Apple II series of computers. The canonical implementation is ShrinkIt, which can operate on disk images as well as files. Preferred compression algorithm is a combination of RLE and 12-bit LZW. Archives can be manipulated with the command-line NuLib tool, or the Windows-based CiderPress.
*.shk
# A compression format common on Apple Macintosh computers. The free StuffIt Expander is available for Windows and OS X.
*.sit
# The replacement for the .sit format that supports more compression methods, UNIX file permissions, long file names, very large files, more encryption options, data specific compressors (JPEG, Zip, PDF, 24-bit image, MP3). The free StuffIt Expander is available for Windows and OS X.
*.sitx
# A royalty-free compressing format
*.sqx
# The "tarball" format combines tar archives with a file-based compression scheme (usually gzip). Commonly used for source and binary distribution on Unix-like platforms, widely available elsewhere.
*.tar.gz
*.tgz
*.tar.Z
*.tar.bz2
*.tbz2
*.tar.lzma
*.tlz
# UltraCompressor 2.3 was developed to act as an alternative to the then popular PKZIP application. The main feature of the application is its ability to create large archives. This means that compressed archives with the UC2 file extension can hold almost 1 million files.
*.uc
*.uc0
*.uc2
*.ucn
*.ur2
*.ue2
# Based on PAQ, RZM, CSC, CCM, and 7zip. The format consists of a PAQ, RZM, CSC, or CCM compressed file and a manifest with compression settings stored in a 7z archive.
*.uca
# A high compression rate archive format originally for DOS.
*.uha
# Web Application archive (Java-based web app)
*.war
# File-based disk image format developed to deploy Microsoft Windows.
*.wim
# XAR
*.xar
# Native format of the Open Source KiriKiri Visual Novel engine. Uses combination of block splitting and zlib compression. The filenames and pathes are stored in UTF-16 format. For integrity check, the Adler-32 hashsum is used. For many commercial games, the files are encrypted (and decoded on runtime) via so-called "cxdec" module, which implements xor-based encryption.
*.xp3
# Yamazaki zipper archive. Compression format used in DeepFreezer archiver utility created by Yamazaki Satoshi. Read and write support exists in TUGZip, IZArc and ZipZag
*.yz1
# The most widely used compression format on Microsoft Windows. Commonly used on Macintosh and Unix systems as well.
*.zip
*.zipx
# application/x-zoo zoo Multiple Multiple Yes
*.zoo
# Journaling (append-only) archive format with rollback capability. Supports deduplication and incremental update based on last-modified dates. Multi-threaded. Compresses in LZ77, BWT, and context mixing formats. Open source.
*.zpaq
# Archiver with a compression algorithm based on the Burrows-Wheeler transform method.
*.zz
### Executable ###
*.app
*.bat
*.cgi
*.com
*.exe
*.gadget
*.pif
*.vb
*.wsf
### Git ###
# Created by git for backups. To disable backups in Git:
# $ git config --global mergetool.keepBackup false
*.orig
# Created by git when using merge tools for conflicts
*.BACKUP.*
*.BASE.*
*.LOCAL.*
*.REMOTE.*
*_BACKUP_*.txt
*_BASE_*.txt
*_LOCAL_*.txt
*_REMOTE_*.txt
### Go ###
# Binaries for programs and plugins
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
### Go Patch ###
/vendor/
/Godeps/
### Intellij ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### Intellij Patch ###
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
# *.iml
# modules.xml
# .idea/misc.xml
# *.ipr
# Sonarlint plugin
.idea/sonarlint
### JetBrains+all Patch ###
# Ignores the whole .idea folder and all .iml files
# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
.idea/
# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
*.iml
modules.xml
.idea/misc.xml
*.ipr
# Sonarlint plugin
### Linux ###
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### OSX ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### VisualStudioCode ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
### VisualStudioCode Patch ###
# Ignore all local history of files
.history
### Windows ###
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
# End of https://www.gitignore.io/api/intellij,go,linux,osx,windows,node,python,executable,jetbrains+all,visualstudiocode,compressedarchive,git
go.sum
/vendor

22
LICENSE Executable file
View File

@ -0,0 +1,22 @@
The BSD 3-Clause License
Copyright (c) 2022 Sangbum Kim.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

237
README.md Executable file
View File

@ -0,0 +1,237 @@
# logging
[![GoDoc](https://godoc.org/github.com/spi-ca/logging?status.png)](http://godoc.org/github.com/spi-ca/logging)
[![Go Report](http://goreportcard.com/badge/spi-ca/logging](http://goreportcard.com/report/spi-ca/logging)
## Description
logging is a convenient wrapper of [zap](https://github.com/uber-go/zap) logger.
It provides grouping, rotation.
## Requirements
Go 1.5 or above.
## Installation
Run the following command to install the package:
```
go get github.com/spi-ca/logging
```
## Getting Started
Create a `server.go` file with the following content:
```go
package main
import (
"fmt"
"github.com/qiangxue/fasthttp-routing"
"github.com/valyala/fasthttp"
)
func main() {
router := routing.New()
router.Get("/", func(c *routing.Context) error {
fmt.Fprintf(c, "Hello, world!")
return nil
})
panic(fasthttp.ListenAndServe(":8080", router.HandleRequest))
}
```
Now run the following command to start the Web server:
```
go run server.go
```
You should be able to access URLs such as `http://localhost:8080`.
### Routes
ozzo-routing works by building a routing table in a router and then dispatching HTTP requests to the matching handlers
found in the routing table. An intuitive illustration of a routing table is as follows:
Routes | Handlers
--------------------|-----------------
`GET /users` | m1, m2, h1, ...
`POST /users` | m1, m2, h2, ...
`PUT /users/<id>` | m1, m2, h3, ...
`DELETE /users/<id>`| m1, m2, h4, ...
For an incoming request `GET /users`, the first route would match and the handlers m1, m2, and h1 would be executed.
If the request is `PUT /users/123`, the third route would match and the corresponding handlers would be executed.
Note that the token `<id>` can match any number of non-slash characters and the matching part can be accessed as
a path parameter value in the handlers.
**If an incoming request matches multiple routes in the table, the route added first to the table will take precedence.
All other matching routes will be ignored.**
The actual implementation of the routing table uses a variant of the radix tree data structure, which makes the routing
process as fast as working with a hash table, thanks to the inspiration from [httprouter](https://github.com/julienschmidt/httprouter).
To add a new route and its handlers to the routing table, call the `To` method like the following:
```go
router := routing.New()
router.To("GET", "/users", m1, m2, h1)
router.To("POST", "/users", m1, m2, h2)
```
You can also use shortcut methods, such as `Get`, `Post`, `Put`, etc., which are named after the HTTP method names:
```go
router.Get("/users", m1, m2, h1)
router.Post("/users", m1, m2, h2)
```
If you have multiple routes with the same URL path but different HTTP methods, like the above example, you can
chain them together as follows,
```go
router.Get("/users", m1, m2, h1).Post(m1, m2, h2)
```
If you want to use the same set of handlers to handle the same URL path but different HTTP methods, you can take
the following shortcut:
```go
router.To("GET,POST", "/users", m1, m2, h)
```
A route may contain parameter tokens which are in the format of `<name:pattern>`, where `name` stands for the parameter
name, and `pattern` is a regular expression which the parameter value should match. A token `<name>` is equivalent
to `<name:[^/]*>`, i.e., it matches any number of non-slash characters. At the end of a route, an asterisk character
can be used to match any number of arbitrary characters. Below are some examples:
* `/users/<username>`: matches `/users/admin`
* `/users/accnt-<id:\d+>`: matches `/users/accnt-123`, but not `/users/accnt-admin`
* `/users/<username>/*`: matches `/users/admin/profile/address`
When a URL path matches a route, the matching parameters on the URL path can be accessed via `Context.Param()`:
```go
router := routing.New()
router.Get("/users/<username>", func (c *routing.Context) error {
fmt.Fprintf(c, "Name: %v", c.Param("username"))
return nil
})
```
### Route Groups
Route group is a way of grouping together the routes which have the same route prefix. The routes in a group also
share the same handlers that are registered with the group via its `Use` method. For example,
```go
router := routing.New()
api := router.Group("/api")
api.Use(m1, m2)
api.Get("/users", h1).Post(h2)
api.Put("/users/<id>", h3).Delete(h4)
```
The above `/api` route group establishes the following routing table:
Routes | Handlers
------------------------|-------------
`GET /api/users` | m1, m2, h1, ...
`POST /api/users` | m1, m2, h2, ...
`PUT /api/users/<id>` | m1, m2, h3, ...
`DELETE /api/users/<id>`| m1, m2, h4, ...
As you can see, all these routes have the same route prefix `/api` and the handlers `m1` and `m2`. In other similar
routing frameworks, the handlers registered with a route group are also called *middlewares*.
Route groups can be nested. That is, a route group can create a child group by calling the `Group()` method. The router
serves as the top level route group. A child group inherits the handlers registered with its parent group. For example,
```go
router := routing.New()
router.Use(m1)
api := router.Group("/api")
api.Use(m2)
users := group.Group("/users")
users.Use(m3)
users.Put("/<id>", h1)
```
Because the router serves as the parent of the `api` group which is the parent of the `users` group,
the `PUT /api/users/<id>` route is associated with the handlers `m1`, `m2`, `m3`, and `h1`.
### Router
Router manages the routing table and dispatches incoming requests to appropriate handlers. A router instance is created
by calling the `routing.New()` method.
To hook up router with fasthttp, use the following code:
```go
router := routing.New()
fasthttp.ListenAndServe(":8080", router.HandleRequest)
```
### Handlers
A handler is a function with the signature `func(*routing.Context) error`. A handler is executed by the router if
the incoming request URL path matches the route that the handler is associated with. Through the `routing.Context`
parameter, you can access the request information in handlers.
A route may be associated with multiple handlers. These handlers will be executed in the order that they are registered
to the route. The execution sequence can be terminated in the middle using one of the following two methods:
* A handler returns an error: the router will skip the rest of the handlers and handle the returned error.
* A handler calls `Context.Abort()`: the router will simply skip the rest of the handlers. There is no error to be handled.
A handler can call `Context.Next()` to explicitly execute the rest of the unexecuted handlers and take actions after
they finish execution. For example, a response compression handler may start the output buffer, call `Context.Next()`,
and then compress and send the output to response.
### Context
For each incoming request, a `routing.Context` object is passed through the relevant handlers. Because `routing.Context`
embeds `fasthttp.RequestCtx`, you can access all properties and methods provided by the latter.
Additionally, the `Context.Param()` method allows handlers to access the URL path parameters that match the current route.
Using `Context.Get()` and `Context.Set()`, handlers can share data between each other. For example, an authentication
handler can store the authenticated user identity by calling `Context.Set()`, and other handlers can retrieve back
the identity information by calling `Context.Get()`.
Context also provides a handy `WriteData()` method that can be used to write data of arbitrary type to the response.
The `WriteData()` method can also be overridden (by replacement) to achieve more versatile response data writing.
### Error Handling
A handler may return an error indicating some erroneous condition. Sometimes, a handler or the code it calls may cause
a panic. Both should be handled properly to ensure best user experience. It is recommended that you use
the `fault.Recover` handler or a similar error handler to handle these errors.
If an error is not handled by any handler, the router will handle it by calling its `handleError()` method which
simply sets an appropriate HTTP status code and writes the error message to the response.
When an incoming request has no matching route, the router will call the handlers registered via the `Router.NotFound()`
method. All the handlers registered via `Router.Use()` will also be called in advance. By default, the following two
handlers are registered with `Router.NotFound()`:
* `routing.MethodNotAllowedHandler`: a handler that sends an `Allow` HTTP header indicating the allowed HTTP methods for a requested URL
* `routing.NotFoundHandler`: a handler triggering 404 HTTP error

86
common.go Normal file
View File

@ -0,0 +1,86 @@
package logging
import (
"go.uber.org/zap/zapcore"
"log"
"time"
)
type (
// Logger represents logging interface.
Logger interface {
// DPanic uses fmt.Sprint to construct and log a message. In development, the
// logger then panics. (See DPanicLevel for details.)
DPanic(args ...any)
// DPanicf uses fmt.Sprintf to log a templated message. In development, the
// logger then panics. (See DPanicLevel for details.)
DPanicf(template string, args ...any)
// Debug uses fmt.Sprint to construct and log a message.
Debug(args ...any)
// Debugf uses fmt.Sprintf to log a templated message.
Debugf(template string, args ...any)
// Error uses fmt.Sprint to construct and log a message.
Error(args ...any)
// Errorf uses fmt.Sprintf to log a templated message.
Errorf(template string, args ...any)
// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
Fatal(args ...any)
// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
Fatalf(template string, args ...any)
// Info uses fmt.Sprint to construct and log a message.
Info(args ...any)
// Infof uses fmt.Sprintf to log a templated message.
Infof(template string, args ...any)
// Named adds a sub-scope to the logger's name.
Named(name string) Logger
// Name returns logger name
Name() string
// Panic uses fmt.Sprint to construct and log a message, then panics.
Panic(args ...any)
// Panicf uses fmt.Sprintf to log a templated message, then panics.
Panicf(template string, args ...any)
// Sync flushes any buffered log entries.
Sync() error
// Warn uses fmt.Sprint to construct and log a message.
Warn(args ...any)
// Warnf uses fmt.Sprintf to log a templated message.
Warnf(template string, args ...any)
// ToStdLogAt returns *log.Logger which writes to supplied the logger at
// required level.
ToStdLogAt(level Level) (*log.Logger, error)
}
// HookLogger is a Logging interface with a hooking capability.
HookLogger interface {
Logger
// SetHook specify a log entry wrapper.
SetHook(hook LoggerHook) (err error)
}
// LoggerHook is an alias for the hooking function.
LoggerHook = func(level Level, logger, message string, at time.Time) (err error)
Level = zapcore.Level
)
const (
// LevelDebug logs are typically voluminous, and are usually disabled in
// production.
LevelDebug = zapcore.DebugLevel
// LevelInfo is the default logging priority.
LevelInfo = zapcore.InfoLevel
// LevelWarn logs are more important than Info, but don't need individual
// human review.
LevelWarn = zapcore.WarnLevel
// LevelError logs are high-priority. If an application is running smoothly,
// it shouldn't generate any error-level logs.
LevelError = zapcore.ErrorLevel
// LevelDPanic logs are particularly important errors. In development the
// logger panics after writing the message.
LevelDPanic = zapcore.DPanicLevel
// LevelPanic logs a message, then panics.
LevelPanic = zapcore.PanicLevel
// LevelFatal logs a message, then calls os.Exit(1).
LevelFatal = zapcore.FatalLevel
)

168
global.go Normal file
View File

@ -0,0 +1,168 @@
package logging
import (
"amuz.es/src/logging/rotater"
"errors"
rotatelogs "github.com/lestrrat-go/file-rotatelogs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var defaultErrorOutputOptions []zap.Option
func init() {
if _, err := zap.RedirectStdLogAt(zap.L(), zapcore.DebugLevel); err != nil {
panic(err)
}
}
// New create a sub-logger from root logger with specified name.
func New(parent *zap.SugaredLogger, moduleName string, options ...zap.Option) *zap.SugaredLogger {
var subLogger *zap.Logger
if parent == nil {
subLogger = zap.L().Named(moduleName)
} else {
subLogger = parent.Desugar().Named(moduleName)
}
subLogger.WithOptions(options...)
return subLogger.Sugar()
}
// NewOtherLogger create a seperated-root-logger.
func NewOtherLogger(
formatter zapcore.Encoder,
moduleName, logFilename, logDir string,
rotateOption []rotater.Option,
logLevel zapcore.Level,
fields ...zapcore.Field,
) (logger *zap.SugaredLogger, closer func() error, err error) {
loglevel := zap.NewAtomicLevelAt(logLevel)
logWriter, err := rotater.NewLogWriter(logFilename, logDir, rotateOption...)
if err != nil {
return
}
core := zapcore.NewCore(formatter, logWriter, loglevel)
closer = logWriter.Close
logger = zap.New(core, defaultErrorOutputOptions...).
Named(moduleName).With(fields...).Sugar()
return
}
// NewOtherLoggerWithOption create a seperated-root-logger with zap-logger option.
func NewOtherLoggerWithOption(
formatter zapcore.Encoder,
moduleName, logFilename, logDir string,
rotateOption []rotater.Option,
logLevel zapcore.Level,
options []zap.Option,
fields ...zapcore.Field,
) (logger *zap.SugaredLogger, closer func() error, err error) {
loglevel := zap.NewAtomicLevelAt(logLevel)
logWriter, err := rotater.NewLogWriter(logFilename, logDir, rotateOption...)
if err != nil {
return
}
core := zapcore.NewCore(formatter, logWriter, loglevel)
closer = logWriter.Close
options = append(defaultErrorOutputOptions, options...)
logger = zap.New(core, options...).
Named(moduleName).With(fields...).Sugar()
return
}
// ReplaceGlobalHookLogger replaces log.Default() logger
func ReplaceGlobalHookLogger(name string, verbose bool, maxBackup uint, loggingDirectory, filename string, loggerLevel Level, simple bool) (logger HookLogger, canceler func(), err error) {
var (
logWrapper loggerWithHookImpl
formatter zapcore.Encoder
)
if simple {
formatter = zapcore.NewConsoleEncoder(LogOnlyMessageFormat)
} else {
formatter = zapcore.NewConsoleEncoder(LogCommonFormat)
}
var zapLoger *zap.SugaredLogger
// 전역 로거 초기화
zapLoger, canceler, err = replaceGlobalLogger(
verbose,
formatter,
name,
filename,
loggingDirectory,
[]rotater.Option{
rotatelogs.WithMaxAge(-1),
rotatelogs.WithRotationCount(maxBackup),
},
loggerLevel,
zap.Hooks(logWrapper.hook),
)
defer func() {
if err != nil && canceler != nil {
canceler()
canceler = nil
}
}()
if err != nil {
//do nothing
} else if zapLoger == nil {
err = errors.New("not initialized")
} else {
logWrapper.SugaredLogger = *zapLoger
logWrapper.name = name
logger = &logWrapper
}
return
}
func replaceGlobalLogger(
verbose bool,
formatter zapcore.Encoder,
mainLogName, logFilename, logDir string,
rotateOption []rotater.Option,
logLevel zapcore.Level,
additionalOptions ...zap.Option,
) (logger *zap.SugaredLogger, back func(), err error) {
level := zap.NewAtomicLevelAt(logLevel)
var defaultWriter rotater.RotateSyncer
if defaultWriter, err = rotater.NewLogWriter(logFilename, logDir, rotateOption...); err != nil {
return
}
if defaultErrorOutputOptions == nil {
defaultErrorOutputOptions = []zap.Option{zap.ErrorOutput(defaultWriter)}
}
options := defaultErrorOutputOptions
if verbose {
options = append(options, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.PanicLevel)))
}
// reset log option slice
options = append(options, additionalOptions...)
log := zap.New(zapcore.NewCore(formatter, defaultWriter, level), options...).Named(mainLogName)
var (
closers []func()
closer = func() {
for i := len(closers) - 1; i >= 0; i-- {
closers[i]()
}
}
)
defer func() {
if err != nil {
closer()
}
}()
closers = append(closers, zap.ReplaceGlobals(log))
var rollback func()
if rollback, err = zap.RedirectStdLogAt(log, zapcore.DebugLevel); err != nil {
return
}
closers = append(closers, rollback)
return log.Sugar(), closer, nil
}

26
go.mod Normal file
View File

@ -0,0 +1,26 @@
module amuz.es/src/logging
go 1.18
require (
github.com/go-ozzo/ozzo-routing v2.1.4+incompatible
github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible
github.com/stretchr/testify v1.7.1
github.com/valyala/fasthttp v1.34.0
go.uber.org/zap v1.21.0
)
require (
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/klauspost/compress v1.15.0 // indirect
github.com/lestrrat-go/strftime v1.0.5 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

36
hook_logger.go Normal file
View File

@ -0,0 +1,36 @@
package logging
import (
"go.uber.org/zap/zapcore"
"sync"
)
type (
// hookable logger container
loggerWithHookImpl struct {
loggerImpl
hookerLock sync.RWMutex
hooker LoggerHook
}
)
func (l *loggerWithHookImpl) hook(entry zapcore.Entry) (err error) {
l.hookerLock.RLock()
defer l.hookerLock.RUnlock()
if l.hooker != nil {
err = l.hooker(entry.Level, entry.LoggerName, entry.Message, entry.Time)
}
return
}
func (l *loggerWithHookImpl) SetHook(hook LoggerHook) (err error) {
l.hookerLock.Lock()
defer l.hookerLock.Unlock()
l.hooker = hook
if hook == nil {
l.Info("log hook cleared")
} else {
l.Info("log hook set")
}
return
}

51
log_format.go Normal file
View File

@ -0,0 +1,51 @@
package logging
import "go.uber.org/zap/zapcore"
var (
// LogCommonFormat is a common log entry format.
LogCommonFormat = zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "msg",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.CapitalLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
// LogOnlyMessageFormat is a reduced log entry format.
LogOnlyMessageFormat = zapcore.EncoderConfig{
TimeKey: "",
LevelKey: "L",
NameKey: "",
CallerKey: "",
MessageKey: "M",
StacktraceKey: "",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: func(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
switch l {
case zapcore.DebugLevel:
enc.AppendString("(-)")
case zapcore.InfoLevel:
case zapcore.WarnLevel:
enc.AppendString("(*)")
case zapcore.ErrorLevel:
enc.AppendString("(!)")
case zapcore.DPanicLevel:
fallthrough
case zapcore.PanicLevel:
enc.AppendString("(!!)")
case zapcore.FatalLevel:
enc.AppendString("(!!!)")
default:
// nothing
}
},
}
)

35
logger.go Normal file
View File

@ -0,0 +1,35 @@
// Package logging is convenient wrapper of zap logger.
// It provides grouping, rotation.
package logging // import "amuz.es/src/logging"
import (
"go.uber.org/zap"
"log"
)
type (
// logger container
loggerImpl struct {
zap.SugaredLogger
name string
}
)
// Named adds a sub-scope to the logger's name.
func (l *loggerImpl) Named(name string) Logger {
return &loggerImpl{
SugaredLogger: *l.SugaredLogger.Named(name),
name: l.name + "." + name,
}
}
// Name returns logger name
func (l *loggerImpl) Name() string {
return l.name
}
// NewStdLogAt returns *log.Logger which writes to supplied the logger at
// required level.
func (l *loggerImpl) ToStdLogAt(level Level) (*log.Logger, error) {
return zap.NewStdLogAt(l.Desugar(), level)
}

50
rotater/global.go Normal file
View File

@ -0,0 +1,50 @@
package rotater
import (
"github.com/lestrrat-go/file-rotatelogs"
"os"
"path/filepath"
)
var loggers RotateSyncerSet
// NewLogWriter create a RotateSyncer writer for logging.Logger.
func NewLogWriter(FileName string, logDir string, options ...Option) (RotateSyncer, error) {
switch FileName {
case "Stdout":
return NewLocked(os.Stdout), nil
case "Stderr":
return NewLocked(os.Stderr), nil
case "Null":
return NewNull(), nil
default:
logpath := FileName
if logDir != "" && !filepath.IsAbs(FileName) {
logpath, _ = filepath.Abs(filepath.Join(logDir, FileName))
}
options = append(options, rotatelogs.WithLinkName(logpath))
if logWriter, err := NewRotater(logpath+".%Y%m%d", options...); err != nil {
return nil, err
} else {
loggers.Store(logWriter)
logWriter.SetOnClose(func() { loggers.Delete(logWriter) })
return logWriter, nil
}
}
}
// Rotate will rotate all registered logger .
func Rotate() {
loggers.Range(func(rotater RotateSyncer) {
_ = rotater.Sync()
_ = rotater.Rotate()
})
}
// Close will close all registered logger .
func Close() {
loggers.Range(func(rotater RotateSyncer) {
_ = rotater.Sync()
_ = rotater.Rotate()
})
}

24
rotater/iface.go Normal file
View File

@ -0,0 +1,24 @@
package rotater
import (
rotatelogs "github.com/lestrrat-go/file-rotatelogs"
"io"
)
type (
// WriteSyncer is a WriteCloser interface with synchronize capability.
WriteSyncer interface {
io.WriteCloser
Sync() error
}
// RotateSyncer is a WriteSyncer interface with file rotate capability.
RotateSyncer interface {
WriteSyncer
SetOnClose(func())
Rotate() error
}
// Option is an alias for the rotatelogs.Option
Option = rotatelogs.Option
)

35
rotater/null.go Normal file
View File

@ -0,0 +1,35 @@
package rotater
import (
"sync"
)
type nullWriteSyncer struct {
setOnceOnclose sync.Once
onceOnclose sync.Once
onClose func()
}
// NewNull create a blackhole writer.
func NewNull() RotateSyncer {
return &nullWriteSyncer{}
}
func (s *nullWriteSyncer) SetOnClose(closeFunc func()) {
s.setOnceOnclose.Do(func() {
s.onClose = closeFunc
})
}
func (s *nullWriteSyncer) Rotate() error { return nil }
func (s *nullWriteSyncer) Write(bs []byte) (int, error) { return len(bs), nil }
func (s *nullWriteSyncer) Sync() error { return nil }
func (s *nullWriteSyncer) Close() error {
s.onceOnclose.Do(func() {
if s.onClose != nil {
s.onClose()
s.onClose = nil
}
})
return nil
}

57
rotater/set_logcore.go Normal file
View File

@ -0,0 +1,57 @@
package rotater
import (
"sync"
"sync/atomic"
)
// RotateSyncerSet is registry of RotateSyncer
type RotateSyncerSet struct {
storage sync.Map
}
// Delete deletes the value for a key.
func (s *RotateSyncerSet) Delete(key RotateSyncer) {
s.storage.Delete(key)
}
// Exist returns whether value was found in the map.
func (s *RotateSyncerSet) Exist(key RotateSyncer) (ok bool) {
_, ok = s.storage.Load(key)
return
}
// SetNx returns false value was found in the map.
// Otherwise, it stores and returns true.
func (s *RotateSyncerSet) SetNx(key RotateSyncer) bool {
_, exist := s.storage.LoadOrStore(key, 0)
return !exist
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
func (s *RotateSyncerSet) Range(f func(key RotateSyncer)) {
s.storage.Range(s.rangeWrap(f))
}
// Store sets the value for a key.
func (s *RotateSyncerSet) Store(key RotateSyncer) {
s.storage.Store(key, 0)
}
// Len returns sizeof the map.
func (s *RotateSyncerSet) Len() int {
var count uint64
s.Range(func(conn RotateSyncer) {
atomic.AddUint64(&count, 1)
})
return int(count)
}
func (s *RotateSyncerSet) rangeWrap(f func(key RotateSyncer)) func(key, value any) bool {
ok := true
return func(key, value any) bool {
f(key.(RotateSyncer))
return ok
}
}

56
rotater/wrapped.go Normal file
View File

@ -0,0 +1,56 @@
package rotater
import (
"sync"
)
type lockedWriteSyncer struct {
setOnceOnclose sync.Once
onceOnclose sync.Once
onClose func()
sync.Mutex
ws WriteSyncer
}
// NewLocked create a writer.
func NewLocked(ws WriteSyncer) RotateSyncer {
if lws, ok := ws.(*lockedWriteSyncer); ok {
// no need to layer on another lock
return lws
}
return &lockedWriteSyncer{ws: ws}
}
func (s *lockedWriteSyncer) SetOnClose(closeFunc func()) {
s.setOnceOnclose.Do(func() {
s.onClose = closeFunc
})
}
func (s *lockedWriteSyncer) Rotate() error {
return s.Sync()
}
func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
s.Lock()
defer s.Unlock()
return s.ws.Write(bs)
}
func (s *lockedWriteSyncer) Sync() error {
s.Lock()
defer s.Unlock()
return s.ws.Sync()
}
func (s *lockedWriteSyncer) Close() error {
s.Lock()
defer s.Unlock()
s.onceOnclose.Do(func() {
if s.onClose != nil {
s.onClose()
s.onClose = nil
}
})
return s.ws.Close()
}

48
rotater/writeRotater.go Normal file
View File

@ -0,0 +1,48 @@
package rotater
import (
"github.com/lestrrat-go/file-rotatelogs"
"sync"
)
type rotateSyncer struct {
setOnceOnclose sync.Once
onceOnclose sync.Once
onClose func()
*rotatelogs.RotateLogs
}
// NewRotater create a RotateSyncer writer.
func NewRotater(filename string, options ...Option) (RotateSyncer, error) {
if rotateLogger, err := rotatelogs.New(filename, options...); err != nil {
return nil, err
} else {
return &rotateSyncer{RotateLogs: rotateLogger}, nil
}
}
func (r *rotateSyncer) SetOnClose(closeFunc func()) {
r.setOnceOnclose.Do(func() {
r.onClose = closeFunc
})
}
func (r *rotateSyncer) Rotate() error {
return r.RotateLogs.Rotate()
}
func (r *rotateSyncer) Close() error {
r.onceOnclose.Do(func() {
if r.onClose != nil {
r.onClose()
r.onClose = nil
}
})
return r.RotateLogs.Close()
}
func (r *rotateSyncer) Sync() error {
return nil
}
func (s *rotateSyncer) Write(bs []byte) (int, error) {
return s.RotateLogs.Write(bs)
}

14
wrapped.go Normal file
View File

@ -0,0 +1,14 @@
package logging
import (
"go.uber.org/zap/zapcore"
)
type zapWrappedSyncer struct {
zapcore.WriteSyncer
}
func (r *zapWrappedSyncer) SetOnClose(closeFunc func()) {}
func (r *zapWrappedSyncer) Rotate() (err error) { return }
func (r *zapWrappedSyncer) Close() (err error) { return }
func (r *zapWrappedSyncer) Sync() error { return r.WriteSyncer.Sync() }