重构代码

This commit is contained in:
兔子 2026-03-19 16:37:57 +08:00
parent 51608601cf
commit 8023bfe328
Signed by: b612
GPG Key ID: 99DD2222B612B612
75 changed files with 13325 additions and 394 deletions

72
.github/workflows/quality.yml vendored Normal file
View File

@ -0,0 +1,72 @@
name: quality
on:
push:
branches:
- main
- master
pull_request:
jobs:
unit:
name: unit-${{ matrix.os }}-go${{ matrix.go-version }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os:
- ubuntu-latest
- windows-latest
go-version:
- "1.16.x"
- "1.22.x"
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: true
- name: Run unit tests
run: go test ./...
race:
name: race-linux
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.22.x"
cache: true
- name: Run race tests
run: go test -race ./...
perf-fuzz-smoke:
name: bench-fuzz-smoke
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.22.x"
cache: true
- name: Benchmark smoke
run: go test . -run '^$' -bench Benchmark -benchmem -benchtime=100x
- name: Fuzz smoke text-json
run: go test . -run '^$' -fuzz=FuzzTextAndJSONFormatter -fuzztime=2s
- name: Fuzz smoke keyword
run: go test . -run '^$' -fuzz=FuzzKeywordHighlight -fuzztime=2s

9
.gitignore vendored Normal file
View File

@ -0,0 +1,9 @@
.sentrux/
agent_readme.md
target.md
.gocache/
testdata/fuzz/
.idea/
bin/
*.log
test_*.log

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2026 starnet contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

151
README.md Normal file
View File

@ -0,0 +1,151 @@
# starlog
`starlog` 是一个面向生产环境的 Go 日志库,兼顾控制台可读性与服务端可观测性。
- Go 版本:`1.16+`
- 核心目标:可读、可控、可扩展、可观测
完整使用说明见 [docs/USAGE.md](./docs/USAGE.md),迁移说明见 [docs/MIGRATION.md](./docs/MIGRATION.md)。
## 快速开始
```go
package main
import (
"os"
"b612.me/starlog"
)
func main() {
log := starlog.NewStarlog(os.Stdout)
log.SetName("demo")
log.SetShowStd(true)
log.SetColorMode(starlog.ColorModeLevelOnly)
log.WithField("user_id", 42).Info("login ok")
log.WithError(os.ErrNotExist).Error("open file failed")
}
```
## 核心能力
- 结构化日志:`WithField/WithFields/WithError/WithContext`
- 级别过滤:`SetLevel/GetLevel/IsLevelEnabled/ParseLevel`
- 格式化输出:`TextFormatter``JSONFormatter`、自定义 `Formatter`
- 多路输出:`Sink``MultiSink``RouteHandler`
- 轮转能力:`RotatePolicy``StartRotate*``NewRotate*Sink`
- 关键词预设:`ApplyKeywordPreset/MergeKeywordPreset`(内置 `MobaLite/MobaFull`
- 高频防爆:去重、采样、限流
- 脱敏治理:规则式脱敏 + 失败策略
- 指标可观测写入错误、异步丢弃、pending、多 sink、轮转状态
- 生命周期收口:`Flush/Sync/Close/Shutdown`
- 并发配置:`Config` 快照 API`GetConfig/ApplyConfig/UpdateConfig`
## 核心接口
```go
type Handler interface { Handle(context.Context, *Entry) error }
type Formatter interface { Format(*Entry) ([]byte, error) }
type Sink interface { Write([]byte) error; Close() error }
type RotatePolicy interface {
ShouldRotate(FileInfo, *Entry) bool
NextPath(string, time.Time) string
}
// 可选扩展。若实现该接口,框架会优先使用 ArchivePath。
type RotateArchivePathProvider interface {
ArchivePath(string, time.Time) string
}
```
`NextPath` 的语义是“归档文件目标路径”。
## 关键词预设
内置预设:
- `KeywordPresetMobaLite`:常用关键词高亮(`error/true/warn/success` 等)
- `KeywordPresetMobaFull`:在 Lite 基础上扩展更多运行态词汇
```go
log := starlog.NewDevelopment(os.Stdout)
log.ApplyKeywordPreset(starlog.KeywordPresetMobaLite) // 覆盖现有关键词配置
// 在预设基础上继续自定义
log.SetKeywordColor("OOM", []starlog.Attr{starlog.FgHiRed, starlog.Bold})
// 可选匹配模式(默认关闭,兼容旧行为)
log.SetKeywordMatchOptions(starlog.KeywordMatchOptions{
IgnoreCase: true, // 忽略大小写
WholeWord: true, // 仅匹配完整单词
})
```
如需保留已有关键词并叠加预设,使用 `MergeKeywordPreset(...)`
## 轮转入口
推荐主路径:
- `StartRotatePolicy`
- `StartManagedRotatePolicy`
- `StartRotateByTime/StartRotateBySize/StartRotateByTimeSize`
- `StartManagedRotateByTime/StartManagedRotateBySize/StartManagedRotateByTimeSize`
RouteHandler 分流文件可直接使用轮转 sink
- `NewRotatePolicySink`
- `NewManagedRotatePolicySink`
- `NewRotateBy*Sink`
- `NewManagedRotateBy*Sink`
兼容入口:
- `StartArchive`(保留兼容,不推荐新代码使用)
## 生产初始化示例
```go
package main
import (
"context"
"time"
"b612.me/starlog"
)
func main() {
log := starlog.NewProduction(nil)
log.SetName("svc")
log.SetAutoAppendNewline(true)
if err := starlog.SetLogFile("./logs/app.log", log, true); err != nil {
panic(err)
}
if err := starlog.StartManagedRotateBySize(log, 200*1024*1024, 5, starlog.RotateManageOptions{
MaxBackups: 14,
MaxAge: 14 * 24 * time.Hour,
Compress: true,
Pattern: "20060102-150405",
}); err != nil {
panic(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
defer log.Shutdown(ctx)
log.WithField("module", "boot").Info("service started")
}
```
## 兼容策略
- 现有旧 API 仍可继续使用。
- 新功能优先通过新增 API 提供,不强制破坏式迁移。
- 新项目建议优先使用 `Config` API 与 `RotatePolicy` 主路径。

View File

@ -1,31 +1,40 @@
package starlog
import (
"context"
"errors"
"os"
"path/filepath"
"time"
"b612.me/starlog/internal/archivex"
)
var archMap starMapKV
func init() {
archMap = newStarMap()
}
var archiveStore = archivex.NewStore()
type Archive interface {
ShouldArchiveNow(*StarLogger, string, os.FileInfo) bool
NextLogFilePath(*StarLogger, string, os.FileInfo) string
ArchiveLogFilePath(*StarLogger, string, os.FileInfo) string
Interval() int64
// Deprecated: use HookBeforeArchive on concrete implementations.
HookBeforArchive() func(*StarLogger, string, string, os.FileInfo) error //archivePath;currentPath
HookAfterArchive() func(*StarLogger, string, string, os.FileInfo) error //archivePath;currentPath
DoArchive() func(*StarLogger, string, string, os.FileInfo) error
}
type logfileinfo struct {
fullpath string
pointer *os.File
type archiveStrategy struct {
interval int64
shouldRotate func(string, os.FileInfo, *Entry) bool
nextLogPath func(string, os.FileInfo, time.Time) string
archivePath func(string, os.FileInfo, time.Time) string
beforeHook func(*StarLogger, string, string, os.FileInfo) error
afterHook func(*StarLogger, string, string, os.FileInfo) error
doArchive func(*StarLogger, string, string, os.FileInfo) error
}
type archiveBeforeHookProvider interface {
HookBeforeArchive() func(*StarLogger, string, string, os.FileInfo) error
}
func SetLogFile(path string, logger *StarLogger, appendMode bool) error {
@ -46,111 +55,175 @@ func SetLogFile(path string, logger *StarLogger, appendMode bool) error {
if err != nil {
return err
}
if archMap.MustGet(logger.logcore.id) != nil {
if current, ok := archiveStore.GetFile(logger.logcore.id); ok {
logger.SetSwitching(true)
err := archMap.MustGet(logger.logcore.id).(logfileinfo).pointer.Close()
err := current.Pointer.Close()
if err != nil {
logger.logcore.output = nil
logger.SetWriter(nil)
logger.SetSwitching(false)
return err
}
err = archMap.Delete(logger.logcore.id)
err = archiveStore.DeleteFile(logger.logcore.id)
if err != nil {
logger.logcore.output = nil
logger.SetWriter(nil)
logger.SetSwitching(false)
return err
}
}
err = archMap.Store(logger.logcore.id, logfileinfo{
fullpath: fullpath,
pointer: fp,
err = archiveStore.SetFile(logger.logcore.id, archivex.FileRecord{
FullPath: fullpath,
Pointer: fp,
})
if err != nil {
fp.Close()
logger.logcore.output = nil
logger.SetWriter(nil)
logger.SetSwitching(false)
return err
}
logger.SetSwitching(true)
logger.logcore.output = fp
logger.SetWriter(fp)
logger.SetSwitching(false)
return nil
}
func CloseWithSwitching(logger *StarLogger) error {
if archMap.MustGet(logger.logcore.id) != nil {
func CloseLogFileWithSwitching(logger *StarLogger) error {
if current, ok := archiveStore.GetFile(logger.logcore.id); ok {
logger.SetSwitching(true)
err := archMap.MustGet(logger.logcore.id).(logfileinfo).pointer.Close()
err := current.Pointer.Close()
if err != nil {
logger.SetWriter(nil)
return err
}
err = archiveStore.DeleteFile(logger.logcore.id)
if err != nil {
return err
}
logger.logcore.mu.Lock()
if logger.logcore.output == current.Pointer {
logger.logcore.output = nil
return err
}
err = archMap.Delete(logger.logcore.id)
if err != nil {
return err
}
logger.logcore.mu.Unlock()
}
return nil
}
func Close(logger *StarLogger) error {
func CloseLogFile(logger *StarLogger) error {
defer logger.SetSwitching(false)
return CloseWithSwitching(logger)
return CloseLogFileWithSwitching(logger)
}
// Deprecated: use CloseLogFileWithSwitching.
func CloseWithSwitching(logger *StarLogger) error {
return CloseLogFileWithSwitching(logger)
}
// Deprecated: use (*StarLogger).Close or CloseLogFile.
func Close(logger *StarLogger) error {
return CloseLogFile(logger)
}
func GetLogFileInfo(logger *StarLogger) (os.FileInfo, error) {
if archMap.MustGet(logger.logcore.id) != nil {
return archMap.MustGet(logger.logcore.id).(logfileinfo).pointer.Stat()
if current, ok := archiveStore.GetFile(logger.logcore.id); ok {
return current.Pointer.Stat()
}
return nil, errors.New("logger don't have a register logfile")
}
func StartArchive(logger *StarLogger, arch Archive) error {
if archMap.MustGet("arch"+logger.logcore.id) != nil {
func resolveBeforeHook(arch Archive) func(*StarLogger, string, string, os.FileInfo) error {
if arch == nil {
return nil
}
if hookProvider, ok := arch.(archiveBeforeHookProvider); ok {
return hookProvider.HookBeforeArchive()
}
return arch.HookBeforArchive()
}
func startArchiveWithStrategy(logger *StarLogger, strategy archiveStrategy) error {
archiveKey := "arch" + logger.logcore.id
if _, ok := archiveStore.GetRunner(archiveKey); ok {
return errors.New("already running")
}
stopChan := make(chan int)
archMap.Store("arch"+logger.logcore.id, stopChan)
go func(stopChan chan int, arch Archive, logger *StarLogger) {
interval := strategy.interval
if interval <= 0 {
interval = 1
}
ctx, cancel := context.WithCancel(context.Background())
runner := &archivex.Runner{
Cancel: cancel,
Done: make(chan struct{}),
}
if err := archiveStore.SetRunner(archiveKey, runner); err != nil {
cancel()
return err
}
go func(ctx context.Context, runner *archivex.Runner, logger *StarLogger) {
defer close(runner.Done)
defer archiveStore.DeleteRunner(archiveKey)
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
for {
select {
case <-stopChan:
case <-ctx.Done():
return
case <-time.After(time.Millisecond * time.Duration(1000*arch.Interval())):
case <-ticker.C:
}
fileinfo, err := GetLogFileInfo(logger)
if err != nil {
logger.Errorf("cannot get log file info,reason is %v\n", err)
continue
}
if archMap.MustGet(logger.logcore.id) == nil {
current, ok := archiveStore.GetFile(logger.logcore.id)
if !ok {
logger.Errorf("cannot get log core info from the map:no such keys\n")
continue
}
fullpath := archMap.MustGet(logger.logcore.id).(logfileinfo).fullpath
if !arch.ShouldArchiveNow(logger, fullpath, fileinfo) {
fullpath := current.FullPath
now := time.Now()
entry := &Entry{
Time: now,
LoggerName: logger.GetName(),
}
if strategy.shouldRotate == nil || !strategy.shouldRotate(fullpath, fileinfo, entry) {
continue
}
newLogPath := arch.NextLogFilePath(logger, fullpath, fileinfo)
archiveLogPath := arch.ArchiveLogFilePath(logger, fullpath, fileinfo)
if arch.HookBeforArchive() != nil {
if err := arch.HookBeforArchive()(logger, archiveLogPath, fullpath, fileinfo); err != nil {
newLogPath := fullpath
if strategy.nextLogPath != nil {
newLogPath = strategy.nextLogPath(fullpath, fileinfo, now)
}
if newLogPath == "" {
newLogPath = fullpath
}
archiveLogPath := fullpath
if strategy.archivePath != nil {
archiveLogPath = strategy.archivePath(fullpath, fileinfo, now)
}
if archiveLogPath == "" {
logger.Errorf("archive path cannot be empty\n")
continue
}
if strategy.doArchive == nil && archiveLogPath == fullpath {
logger.Errorf("archive path equals current path, skip rotate\n")
continue
}
if strategy.beforeHook != nil {
if err := strategy.beforeHook(logger, archiveLogPath, fullpath, fileinfo); err != nil {
logger.Errorf("error occur while executing hook before archive,detail is %v\n", err)
continue
}
}
err = CloseWithSwitching(logger)
err = CloseLogFileWithSwitching(logger)
if err != nil {
continue
}
if arch.DoArchive() == nil {
if strategy.doArchive == nil {
err = os.Rename(fullpath, archiveLogPath)
if err != nil {
continue
}
} else {
err = arch.DoArchive()(logger, fullpath, archiveLogPath, fileinfo)
err = strategy.doArchive(logger, fullpath, archiveLogPath, fileinfo)
if err != nil {
logger.Errorf("error occur while executing archive log file,detail is %v\n", err)
continue
@ -167,29 +240,95 @@ func StartArchive(logger *StarLogger, arch Archive) error {
logger.Errorf("cannot get new log core info from the map:no such keys\n")
continue
}
if arch.HookAfterArchive() != nil {
if err := arch.HookAfterArchive()(logger, archiveLogPath, newLogPath, fileinfo); err != nil {
if strategy.afterHook != nil {
if err := strategy.afterHook(logger, archiveLogPath, newLogPath, fileinfo); err != nil {
logger.Errorf("error occur while executing hook after archive,detail is %v\n", err)
continue
}
}
}
}(stopChan, arch, logger)
}(ctx, runner, logger)
return nil
}
func IsArchiveRun(logger *StarLogger) bool {
if archMap.MustGet("arch"+logger.logcore.id) == nil {
return false
func buildRotateStrategy(policy RotatePolicy, checkInterval int64) archiveStrategy {
return archiveStrategy{
interval: checkInterval,
shouldRotate: func(fullpath string, info os.FileInfo, entry *Entry) bool {
return policy.ShouldRotate(info, entry)
},
nextLogPath: func(oldpath string, info os.FileInfo, now time.Time) string {
return oldpath
},
archivePath: func(oldpath string, info os.FileInfo, now time.Time) string {
return resolveRotateArchivePath(policy, oldpath, now)
},
}
return true
}
func buildRotateArchiveStrategy(archive *RotatePolicyArchive) archiveStrategy {
strategy := buildRotateStrategy(archive.policy, archive.checkInterval)
strategy.beforeHook = archive.HookBeforeArchive()
strategy.afterHook = archive.HookAfterArchive()
if archive.DoArchive() != nil {
strategy.doArchive = archive.DoArchive()
}
if archive.Interval() > 0 {
strategy.interval = archive.Interval()
}
return strategy
}
func buildLegacyArchiveStrategy(logger *StarLogger, arch Archive) archiveStrategy {
return archiveStrategy{
interval: arch.Interval(),
shouldRotate: func(fullpath string, info os.FileInfo, entry *Entry) bool {
return arch.ShouldArchiveNow(logger, fullpath, info)
},
nextLogPath: func(oldpath string, info os.FileInfo, now time.Time) string {
return arch.NextLogFilePath(logger, oldpath, info)
},
archivePath: func(oldpath string, info os.FileInfo, now time.Time) string {
return arch.ArchiveLogFilePath(logger, oldpath, info)
},
beforeHook: resolveBeforeHook(arch),
afterHook: arch.HookAfterArchive(),
doArchive: arch.DoArchive(),
}
}
func StartRotatePolicy(logger *StarLogger, policy RotatePolicy, checkInterval int64) error {
if policy == nil {
return errors.New("rotate policy is nil")
}
return startArchiveWithStrategy(logger, buildRotateStrategy(policy, checkInterval))
}
// Deprecated: prefer StartRotatePolicy or StartManagedRotatePolicy.
func StartArchive(logger *StarLogger, arch Archive) error {
if arch == nil {
return errors.New("archive is nil")
}
if rotateArchive, ok := arch.(*RotatePolicyArchive); ok && rotateArchive != nil && rotateArchive.policy != nil {
return startArchiveWithStrategy(logger, buildRotateArchiveStrategy(rotateArchive))
}
return startArchiveWithStrategy(logger, buildLegacyArchiveStrategy(logger, arch))
}
func IsArchiveRun(logger *StarLogger) bool {
_, ok := archiveStore.GetRunner("arch" + logger.logcore.id)
return ok
}
func StopArchive(logger *StarLogger) {
if archMap.MustGet("arch"+logger.logcore.id) == nil {
archiveKey := "arch" + logger.logcore.id
runner, ok := archiveStore.GetRunner(archiveKey)
if !ok || runner == nil {
return
}
archMap.MustGet("arch" + logger.logcore.id).(chan int) <- 1
runner.Cancel()
<-runner.Done
_ = archiveStore.DeleteRunner(archiveKey)
}
type ArchiveByDate struct {
@ -244,7 +383,10 @@ func (abd *ArchiveByDate) Interval() int64 {
}
func (abd *ArchiveByDate) HookBeforArchive() func(*StarLogger, string, string, os.FileInfo) error {
return abd.HookBeforeArchive()
}
func (abd *ArchiveByDate) HookBeforeArchive() func(*StarLogger, string, string, os.FileInfo) error {
return abd.hookBefore
}
@ -263,6 +405,10 @@ func (abd *ArchiveByDate) DoArchive() func(*StarLogger, string, string, os.FileI
}
func (abd *ArchiveByDate) SetHookBeforArchive(f func(*StarLogger, string, string, os.FileInfo) error) {
abd.SetHookBeforeArchive(f)
}
func (abd *ArchiveByDate) SetHookBeforeArchive(f func(*StarLogger, string, string, os.FileInfo) error) {
abd.hookBefore = f
}
@ -328,6 +474,10 @@ func (abd *ArchiveBySize) Interval() int64 {
}
func (abd *ArchiveBySize) HookBeforArchive() func(*StarLogger, string, string, os.FileInfo) error {
return abd.HookBeforeArchive()
}
func (abd *ArchiveBySize) HookBeforeArchive() func(*StarLogger, string, string, os.FileInfo) error {
return abd.hookBefore
}
@ -336,6 +486,10 @@ func (abd *ArchiveBySize) HookAfterArchive() func(*StarLogger, string, string, o
}
func (abd *ArchiveBySize) SetHookBeforArchive(f func(*StarLogger, string, string, os.FileInfo) error) {
abd.SetHookBeforeArchive(f)
}
func (abd *ArchiveBySize) SetHookBeforeArchive(f func(*StarLogger, string, string, os.FileInfo) error) {
abd.hookBefore = f
}
@ -414,6 +568,10 @@ func (abd *ArchiveByDateSize) Interval() int64 {
}
func (abd *ArchiveByDateSize) HookBeforArchive() func(*StarLogger, string, string, os.FileInfo) error {
return abd.HookBeforeArchive()
}
func (abd *ArchiveByDateSize) HookBeforeArchive() func(*StarLogger, string, string, os.FileInfo) error {
return abd.hookBefore
}
@ -428,6 +586,10 @@ func (abd *ArchiveByDateSize) HookAfterArchive() func(*StarLogger, string, strin
}
func (abd *ArchiveByDateSize) SetHookBeforArchive(f func(*StarLogger, string, string, os.FileInfo) error) {
abd.SetHookBeforeArchive(f)
}
func (abd *ArchiveByDateSize) SetHookBeforeArchive(f func(*StarLogger, string, string, os.FileInfo) error) {
abd.hookBefore = f
}

View File

@ -1,17 +1,248 @@
package starlog
import (
"os"
"path/filepath"
"testing"
"time"
)
func TestArchiveByDate(t *testing.T) {
l := Std
SetLogFile("test.log", l, true)
StartArchive(l, NewArchiveByDateSize(4096, 10, 2, "test.log",
"_2006_01_02_15_04_05.log", true, nil, nil))
for {
time.Sleep(time.Second)
l.Infoln("hahaha", time.Now())
type archiveNoop struct {
interval int64
}
func (archive *archiveNoop) ShouldArchiveNow(logger *StarLogger, fullpath string, info os.FileInfo) bool {
return false
}
func (archive *archiveNoop) NextLogFilePath(logger *StarLogger, oldpath string, info os.FileInfo) string {
return oldpath
}
func (archive *archiveNoop) ArchiveLogFilePath(logger *StarLogger, oldpath string, info os.FileInfo) string {
return oldpath
}
func (archive *archiveNoop) Interval() int64 {
return archive.interval
}
func (archive *archiveNoop) HookBeforArchive() func(*StarLogger, string, string, os.FileInfo) error {
return nil
}
func (archive *archiveNoop) HookAfterArchive() func(*StarLogger, string, string, os.FileInfo) error {
return nil
}
func (archive *archiveNoop) DoArchive() func(*StarLogger, string, string, os.FileInfo) error {
return nil
}
func TestArchiveStopCanRestart(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "archive.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer Close(logger)
archive := &archiveNoop{interval: 1}
if err := StartArchive(logger, archive); err != nil {
t.Fatalf("StartArchive first run failed: %v", err)
}
if !IsArchiveRun(logger) {
t.Fatalf("archive should be running after StartArchive")
}
StopArchive(logger)
if IsArchiveRun(logger) {
t.Fatalf("archive should be stopped after StopArchive")
}
if err := StartArchive(logger, archive); err != nil {
t.Fatalf("StartArchive second run failed: %v", err)
}
StopArchive(logger)
}
func TestArchiveZeroIntervalDoesNotFail(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "archive_zero.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer Close(logger)
archive := &archiveNoop{interval: 0}
if err := StartArchive(logger, archive); err != nil {
t.Fatalf("StartArchive should accept zero interval: %v", err)
}
StopArchive(logger)
}
type rotateWhenNonEmptyPolicy struct{}
func (policy *rotateWhenNonEmptyPolicy) ShouldRotate(info FileInfo, entry *Entry) bool {
return info != nil && info.Size() > 0
}
func (policy *rotateWhenNonEmptyPolicy) NextPath(current string, now time.Time) string {
return current + "." + now.Format("20060102150405.000000") + ".bak"
}
type rotatePreferArchivePathPolicy struct{}
func (policy *rotatePreferArchivePathPolicy) ShouldRotate(info FileInfo, entry *Entry) bool {
return info != nil && info.Size() > 0
}
func (policy *rotatePreferArchivePathPolicy) NextPath(current string, now time.Time) string {
return current + "." + now.Format("20060102150405.000000") + ".next.bak"
}
func (policy *rotatePreferArchivePathPolicy) ArchivePath(current string, now time.Time) string {
return current + "." + now.Format("20060102150405.000000") + ".archive.bak"
}
type rotateArchivePathFallbackPolicy struct{}
func (policy *rotateArchivePathFallbackPolicy) ShouldRotate(info FileInfo, entry *Entry) bool {
return info != nil && info.Size() > 0
}
func (policy *rotateArchivePathFallbackPolicy) NextPath(current string, now time.Time) string {
return current + "." + now.Format("20060102150405.000000") + ".nextonly.bak"
}
func (policy *rotateArchivePathFallbackPolicy) ArchivePath(current string, now time.Time) string {
return ""
}
func TestStartRotatePolicyPrimaryPath(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetShowColor(false)
logPath := filepath.Join(testBinDir(t), "rotate.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer Close(logger)
defer StopArchive(logger)
if err := StartRotatePolicy(logger, &rotateWhenNonEmptyPolicy{}, 1); err != nil {
t.Fatalf("StartRotatePolicy failed: %v", err)
}
logger.Infoln("trigger rotate")
var found bool
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
matches, err := filepath.Glob(logPath + ".*.bak")
if err == nil && len(matches) > 0 {
found = true
break
}
time.Sleep(100 * time.Millisecond)
}
if !found {
t.Fatalf("rotate policy should create archived files")
}
}
func TestStartRotatePolicyPrefersArchivePathProvider(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetShowColor(false)
logPath := filepath.Join(testBinDir(t), "rotate_provider.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer Close(logger)
defer StopArchive(logger)
if err := StartRotatePolicy(logger, &rotatePreferArchivePathPolicy{}, 1); err != nil {
t.Fatalf("StartRotatePolicy failed: %v", err)
}
logger.Infoln("trigger rotate with provider")
var foundArchive bool
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
matches, err := filepath.Glob(logPath + ".*.archive.bak")
if err == nil && len(matches) > 0 {
foundArchive = true
break
}
time.Sleep(100 * time.Millisecond)
}
if !foundArchive {
t.Fatalf("rotate policy should use ArchivePath when provider is implemented")
}
nextMatches, _ := filepath.Glob(logPath + ".*.next.bak")
if len(nextMatches) > 0 {
t.Fatalf("rotate policy should not use NextPath when ArchivePath is available")
}
}
func TestStartRotatePolicyArchivePathProviderFallbackToNextPath(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetShowColor(false)
logPath := filepath.Join(testBinDir(t), "rotate_provider_fallback.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer Close(logger)
defer StopArchive(logger)
if err := StartRotatePolicy(logger, &rotateArchivePathFallbackPolicy{}, 1); err != nil {
t.Fatalf("StartRotatePolicy failed: %v", err)
}
logger.Infoln("trigger rotate fallback")
var foundNext bool
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
matches, err := filepath.Glob(logPath + ".*.nextonly.bak")
if err == nil && len(matches) > 0 {
foundNext = true
break
}
time.Sleep(100 * time.Millisecond)
}
if !foundNext {
t.Fatalf("rotate policy should fallback to NextPath when ArchivePath returns empty")
}
}
func TestArchiveHookBeforeAlias(t *testing.T) {
archive := NewArchiveBySize(1024, 1, "app.log", "app.log.2006", false, nil, nil)
hook := func(*StarLogger, string, string, os.FileInfo) error {
return nil
}
archive.SetHookBeforeArchive(hook)
if archive.HookBeforArchive() == nil || archive.HookBeforeArchive() == nil {
t.Fatalf("both hook getter names should work")
}
}
func TestCloseLogFileClearsManagedWriter(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "close_logfile.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
if err := CloseLogFile(logger); err != nil {
t.Fatalf("CloseLogFile failed: %v", err)
}
if logger.GetWriter() != nil {
t.Fatalf("CloseLogFile should clear managed writer")
}
}

50
benchmark_test.go Normal file
View File

@ -0,0 +1,50 @@
package starlog
import (
"io"
"testing"
)
func newBenchmarkLogger() *StarLogger {
logger := NewStarlog(io.Discard)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
return logger
}
func BenchmarkInfoTextFormatter(b *testing.B) {
logger := newBenchmarkLogger()
logger.SetFormatter(NewTextFormatter())
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("bench text formatter")
}
}
func BenchmarkInfoJSONFormatter(b *testing.B) {
logger := newBenchmarkLogger()
logger.SetFormatter(NewJSONFormatter())
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.WithField("idx", i).Info("bench json formatter")
}
}
func BenchmarkWithFieldsInfo(b *testing.B) {
logger := newBenchmarkLogger()
fields := Fields{
"user_id": 42,
"module": "auth",
"ok": true,
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.WithFields(fields).Info("bench structured")
}
}

190
config_api_test.go Normal file
View File

@ -0,0 +1,190 @@
package starlog
import (
"bytes"
"strings"
"testing"
)
func TestConfigGetAndApply(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetLevel(LvWarning)
logger.SetStdErrLevel(LvCritical)
logger.SetShowColor(false)
logger.SetOnlyColorLevel(false)
logger.SetShowLevel(false)
logger.SetShowFlag(false)
logger.SetShowFieldColor(false)
logger.SetPendingWriteLimit(8)
logger.SetPendingDropPolicy(PendingDropOldest)
logger.SetRedactFailMode(RedactFailMaskAll)
logger.SetRedactMaskToken("[MASK]")
cfg := logger.GetConfig()
if cfg.Level != LvWarning {
t.Fatalf("unexpected level in config snapshot: %d", cfg.Level)
}
if cfg.StdErrLevel != LvCritical {
t.Fatalf("unexpected stderr level in config snapshot: %d", cfg.StdErrLevel)
}
if cfg.ShowColor {
t.Fatalf("show color should be false in config snapshot")
}
if cfg.PendingWriteLimit != 8 {
t.Fatalf("unexpected pending write limit in config snapshot: %d", cfg.PendingWriteLimit)
}
cfg.Level = LvInfo
cfg.ShowColor = true
cfg.OnlyColorLevel = true
cfg.ShowLevel = true
cfg.ShowFlag = true
cfg.ShowFieldColor = true
cfg.LevelColors = map[int][]Attr{
LvInfo: []Attr{FgCyan},
LvError: []Attr{FgRed},
}
cfg.KeywordColors = map[string][]Attr{
"panic": []Attr{FgRed},
}
cfg.FieldTypeColors = map[string][]Attr{
FieldTypeString: []Attr{FgGreen},
}
cfg.FieldValueColors = map[string][]Attr{
"user": []Attr{FgYellow},
}
cfg.PendingWriteLimit = 2
cfg.PendingDropPolicy = PendingDropNewest
cfg.RedactFailMode = RedactFailOpen
cfg.RedactMaskToken = "***"
cfg.RateLimit = RateLimitConfig{
Enable: true,
Levels: []int{LvInfo},
Rate: 3,
Burst: 5,
}
logger.ApplyConfig(cfg)
if logger.GetLevel() != LvInfo {
t.Fatalf("apply config should update level")
}
if !logger.GetShowColor() || !logger.GetOnlyColorLevel() {
t.Fatalf("apply config should update color mode flags")
}
if logger.GetPendingWriteLimit() != 2 {
t.Fatalf("apply config should update pending write limit")
}
if logger.GetPendingDropPolicy() != PendingDropNewest {
t.Fatalf("apply config should update pending drop policy")
}
applied := logger.GetConfig()
if applied.RedactFailMode != RedactFailOpen || applied.RedactMaskToken != "***" {
t.Fatalf("apply config should update redaction options")
}
if !applied.RateLimit.Enable || applied.RateLimit.Rate != 3 || applied.RateLimit.Burst != 5 {
t.Fatalf("apply config should update rate limit options, got %+v", applied.RateLimit)
}
if len(applied.KeywordColors) == 0 || len(applied.LevelColors) == 0 {
t.Fatalf("apply config should keep configured color maps")
}
}
func TestUpdateConfigBatch(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.UpdateConfig(func(cfg *Config) {
cfg.Level = LvError
cfg.ShowLevel = true
cfg.ShowFlag = false
cfg.ShowOriginFile = false
cfg.ShowFuncName = false
cfg.PendingWriteLimit = 4
cfg.PendingDropPolicy = PendingDropOldest
cfg.RedactFailMode = RedactFailOpen
cfg.RedactMaskToken = "[R]"
})
logger.Info("filtered")
logger.Error("visible")
got := buf.String()
if strings.Contains(got, "filtered") {
t.Fatalf("info should be filtered after UpdateConfig, got %q", got)
}
if !strings.Contains(got, "visible") {
t.Fatalf("error should be visible after UpdateConfig, got %q", got)
}
snapshot := logger.GetConfig()
if snapshot.PendingWriteLimit != 4 || snapshot.PendingDropPolicy != PendingDropOldest {
t.Fatalf("update config should apply pending queue settings")
}
}
func TestApplyConfigPendingLimitTrim(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetPendingWriteLimit(5)
logger.SetPendingDropPolicy(PendingDropOldest)
logger.SetSwitching(true)
logger.Infoln("one")
logger.Infoln("two")
logger.Infoln("three")
beforeDrop := logger.GetPendingDropCount()
cfg := logger.GetConfig()
cfg.PendingWriteLimit = 2
cfg.PendingDropPolicy = PendingDropOldest
cfg.Switching = true
logger.ApplyConfig(cfg)
stats := logger.GetPendingStats()
if stats.Length != 2 {
t.Fatalf("pending queue should be trimmed to limit, got length=%d", stats.Length)
}
if logger.GetPendingDropCount() <= beforeDrop {
t.Fatalf("applying lower pending limit should increase drop count")
}
logger.SetSwitching(false)
}
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
if cfg.Level != LvDebug {
t.Fatalf("default level should be LvDebug, got %d", cfg.Level)
}
if cfg.StdErrLevel != LvError {
t.Fatalf("default stderr level should be LvError, got %d", cfg.StdErrLevel)
}
if !cfg.ShowColor || !cfg.ShowStd {
t.Fatalf("default config should enable show color/std")
}
if cfg.PendingWriteLimit != 1024 {
t.Fatalf("default pending write limit should be 1024, got %d", cfg.PendingWriteLimit)
}
if cfg.RateLimit.Burst <= 0 || cfg.RateLimit.MaxKeys <= 0 {
t.Fatalf("default config should include normalized rate limit defaults, got %+v", cfg.RateLimit)
}
if len(cfg.LevelColors) == 0 {
t.Fatalf("default config should include level colors")
}
}
func TestStdConfigAPIBridge(t *testing.T) {
backup := GetConfig()
defer ApplyConfig(backup)
UpdateConfig(func(cfg *Config) {
cfg.Level = LvCritical
cfg.ShowColor = false
cfg.OnlyColorLevel = false
})
cfg := GetConfig()
if cfg.Level != LvCritical {
t.Fatalf("std update config should change level")
}
if cfg.ShowColor || cfg.OnlyColorLevel {
t.Fatalf("std update config should change color flags")
}
}

68
context_api_test.go Normal file
View File

@ -0,0 +1,68 @@
package starlog
import (
"bytes"
"context"
"strings"
"testing"
)
func TestInfoContextShortcut(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetContextFieldExtractor(func(ctx context.Context) Fields {
traceID, _ := ctx.Value("trace_id").(string)
if traceID == "" {
return nil
}
return Fields{"trace_id": traceID}
})
ctx := context.WithValue(context.Background(), "trace_id", "t-1")
logger.InfoContext(ctx, "hello")
got := buf.String()
if !strings.Contains(got, "hello") || !strings.Contains(got, "trace_id=t-1") {
t.Fatalf("InfoContext should carry context fields, got %q", got)
}
}
func TestErrorContextShortcut(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetContextFieldExtractor(func(ctx context.Context) Fields {
rid, _ := ctx.Value("rid").(string)
if rid == "" {
return nil
}
return Fields{"rid": rid}
})
ctx := context.WithValue(context.Background(), "rid", "req-9")
logger.ErrorContext(ctx, "fail")
got := buf.String()
if !strings.Contains(got, "fail") || !strings.Contains(got, "rid=req-9") {
t.Fatalf("ErrorContext should carry context fields, got %q", got)
}
}
func TestLogContextShortcut(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetContextFieldExtractor(func(ctx context.Context) Fields {
module, _ := ctx.Value("module").(string)
if module == "" {
return nil
}
return Fields{"module": module}
})
ctx := context.WithValue(context.Background(), "module", "billing")
logger.LogContext(ctx, false, LvNotice, "ctx-log")
got := buf.String()
if !strings.Contains(got, "ctx-log") || !strings.Contains(got, "module=billing") {
t.Fatalf("LogContext should carry context fields, got %q", got)
}
}

894
core.go

File diff suppressed because it is too large Load Diff

72
docs/DEVELOPMENT.md Normal file
View File

@ -0,0 +1,72 @@
# Development Guide
This file defines the local test matrix and troubleshooting notes for `starlog`.
## Local Test Matrix
Run these commands from repository root:
```powershell
go test ./...
go test -race ./...
go test . -run '^$' -bench Benchmark -benchmem -benchtime=100x
go test . -run '^$' -fuzz=FuzzTextAndJSONFormatter -fuzztime=2s
go test . -run '^$' -fuzz=FuzzKeywordHighlight -fuzztime=2s
```
## One-Command Local Check
Use the helper script:
```powershell
powershell -ExecutionPolicy Bypass -File scripts/test-local.ps1
```
The script runs:
1. Unit tests.
2. Race tests (with a precheck).
3. Benchmark smoke.
4. Fuzz smoke.
## Race Troubleshooting (Windows)
If this command fails:
```powershell
go test -race fmt
```
with:
```text
runtime/race: package testmain: cannot find package
```
the problem is the local Go toolchain/runtime environment, not `starlog` code.
Recommended steps:
1. Verify toolchain:
```powershell
where.exe go
go version
go env GOROOT GOPATH GOOS GOARCH CGO_ENABLED
```
2. Clear caches:
```powershell
go clean -cache -testcache -fuzzcache
```
3. Reinstall an official Go distribution for your platform.
4. Re-run:
```powershell
go test -race fmt
```
## CI Mapping
The same matrix is mirrored in:
- `.github/workflows/quality.yml`
Linux executes `-race` and fuzz smoke; Windows keeps unit test coverage.

139
docs/MIGRATION.md Normal file
View File

@ -0,0 +1,139 @@
# MIGRATION (V1)
本文档说明从旧版本/旧用法迁移到当前 `starlog` 的推荐路径。
## 先说结论
- 当前版本是 **V1 兼容迁移阶段**
- **旧 API 仍可继续使用,不需要立即重写。**
- 推荐逐步迁移到 `Config` 快照方式和新能力(结构化、多 sink、脱敏、测试 hook
## 兼容性承诺V1
1. 不移除旧 API。
2. 旧调用方式保持可运行。
3. 新能力以“增量引入”为主,不要求一次性改造。
## 推荐迁移顺序
### 第 0 步:先不改业务逻辑
如果你当前线上稳定,可以先升级依赖并保持原写法。
### 第 1 步:把“多项配置”改为一次性更新
旧写法(仍可用):
```go
log.SetShowColor(false)
log.SetShowLevel(true)
log.SetShowFlag(false)
log.SetLevel(starlog.LvInfo)
```
推荐写法V1 新增):
```go
log.UpdateConfig(func(cfg *starlog.Config) {
cfg.ShowColor = false
cfg.ShowLevel = true
cfg.ShowFlag = false
cfg.Level = starlog.LvInfo
})
```
收益:
- 多项配置原子生效,减少并发中间态。
- 配置修改更集中,便于审计和 review。
### 第 2 步:逐步引入结构化字段与 Context
```go
log.WithField("trace_id", traceID).Info("request done")
log.WithContext(ctx).Error("call downstream failed")
```
### 第 3 步:输出链路升级
- 单路写入升级到 `SetSinks(...)` / `MultiSink`
- 需要分级分文件时使用 `RouteHandler`
- 归档建议走 `RotatePolicy` 主路径,并叠加 `RotateManageOptions`
### 第 4 步:安全与测试
- 脱敏:`SetRedactor` / `AddRedactRule`
- 测试断言:`Observer` / `TestHook`
### 第 5 步:生命周期收口(推荐)
旧写法(仍可用):
```go
defer starlog.CloseStd()
```
推荐写法:
```go
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
defer starlog.Shutdown(ctx)
```
说明:
- `Shutdown(ctx)` 会等待异步 handler drain并统一关闭资源。
- `Close()` 仍可用,但不等待异步 handler 队列。
## 常见旧写法到推荐写法
1. 配置项逐条 setter
推荐:`UpdateConfig` 批量改
2. 只打印字符串
推荐:关键路径增加 `WithField(s)``WithError`
3. 仅 writer 输出
推荐:`MultiSink` + `RouteHandler` 做按级别拆分
4. 原生 `log.Logger` 接入
推荐:`AsStdlibLoggerWithOptions`prefix/flags/level-mapper
5. 手工配置大量 setter
推荐:`NewProductionConfig/NewDevelopmentConfig` + `ApplyProductionConfig/ApplyDevelopmentConfig`
6. 旧 `StartArchive` 轮转启动
推荐:`StartRotatePolicy` / `StartManagedRotatePolicy`,模板场景可直接用 `StartRotateByTime/BySize/ByTimeSize`
## 旧 API 仍可用(重要)
以下旧/兼容路径当前仍可用:
- 旧 setter/getter 全部可用
- 历史错拼兼容名仍可用(如 `EnbaleWrite``IsWriteStoed``HookBeforArchive`
- `Archive` 旧模型仍可用(建议新代码优先 `RotatePolicy`
- 旧生命周期入口仍可用(如 `CloseStd``Close`),但建议新代码优先 `Shutdown(ctx)` / `CloseLogFile`
## 升级后验证建议
1. 跑单元测试:`go test ./...`
2. 跑并发检测:`go test -race ./...`
3. 跑规则检查:`sentrux check .`
4. 重点验证:
- 级别过滤是否符合预期
- 归档文件数量/保留策略
- 异步丢弃计数和写入错误计数
## 何时需要进一步迁移
如果你符合以下场景,建议优先推进推荐写法:
- 多 goroutine 同时改日志配置
- 需要跨模块统一日志字段规范
- 需要审计日志(脱敏、分级路由、归档治理)
---
如果你希望,我可以基于你当前项目的实际日志初始化代码,给出一份“一次改完可落地”的迁移 patch 方案。

686
docs/USAGE.md Normal file
View File

@ -0,0 +1,686 @@
# starlog 使用指南
本文档覆盖从接入到生产落地的常见场景。
- 模块:`b612.me/starlog`
- Go 版本:`1.16+`
## 1. 快速开始
```go
package main
import (
"os"
"b612.me/starlog"
)
func main() {
log := starlog.NewStarlog(os.Stdout)
log.SetName("demo")
log.SetShowStd(true)
log.SetColorMode(starlog.ColorModeLevelOnly)
log.Info("service start")
log.WithField("user_id", 42).Info("login ok")
}
```
## 2. 推荐初始化方式
### 2.1 开发环境
```go
log := starlog.NewDevelopment(os.Stdout)
log.SetName("my-service")
log.SetShowStd(true)
```
特点:
- 默认 `Debug` 级别
- 颜色模式:`ColorModeLevelOnly`
- 显示源码位置
### 2.2 生产环境
```go
log := starlog.NewProduction(nil)
log.SetName("my-service")
if err := starlog.SetLogFile("./logs/app.log", log, true); err != nil {
panic(err)
}
```
特点:
- 默认 `Info` 级别
- 默认 `JSONFormatter`
- 默认不直出控制台
### 2.3 对现有 logger 套用预设
```go
log.ApplyProductionConfig()
// 或
log.ApplyDevelopmentConfig()
```
## 3. 输出 API 与换行规则
### 3.1 输出函数
- `Info/Error/...``fmt.Sprint` 语义
- `Infof/Errorf/...``fmt.Sprintf` 语义
- `Infoln/Errorln/...``fmt.Sprintln` 语义
默认不会自动补 `\n``ln` 系列除外)。
### 3.2 自动补换行
```go
log.SetAutoAppendNewline(true)
```
行为:
- 末尾无 `\n`:自动补一个
- 末尾有 `\n`:保持不变
## 4. 结构化日志
### 4.1 字段与错误
```go
log.WithField("order_id", "O-1001").Info("create order")
log.WithFields(starlog.Fields{
"trace_id": "t-001",
"module": "payment",
}).WithError(err).Error("charge failed")
```
### 4.2 Context 注入
```go
log.SetContextFieldExtractor(func(ctx context.Context) starlog.Fields {
traceID, _ := ctx.Value("trace_id").(string)
if traceID == "" {
return nil
}
return starlog.Fields{"trace_id": traceID}
})
log.WithContext(ctx).Info("request done")
log.InfoContext(ctx, "request done")
```
## 5. 级别过滤
```go
log.SetLevel(starlog.LvInfo)
if log.IsLevelEnabled(starlog.LvDebug) {
log.Debug("expensive debug")
}
lv, err := starlog.ParseLevel("warn") // LvWarning
_ = lv
_ = err
```
`SetStdErrLevel` 只控制 stdout/stderr 分流阈值,不是日志过滤阈值。
## 6. 显示、颜色与格式化
### 6.1 颜色模式
```go
log.SetColorMode(starlog.ColorModeOff)
log.SetColorMode(starlog.ColorModeFullLine)
log.SetColorMode(starlog.ColorModeLevelOnly)
```
### 6.2 关键词和字段着色
```go
log.SetKeywordColor("timeout", []starlog.Attr{starlog.FgRed, starlog.Bold})
log.SetShowFieldColor(true)
log.SetFieldKeyColor([]starlog.Attr{starlog.FgHiBlue})
log.SetFieldTypeColor(starlog.FieldTypeNumber, []starlog.Attr{starlog.FgYellow})
log.SetFieldValueColor("user_id", []starlog.Attr{starlog.FgCyan})
```
### 6.3 关键词预设Moba 风格)
```go
log.ApplyKeywordPreset(starlog.KeywordPresetMobaLite) // 覆盖现有关键词映射
// 或
log.MergeKeywordPreset(starlog.KeywordPresetMobaFull) // 在现有映射上合并
// 可选匹配模式(默认关闭,兼容旧行为)
log.SetKeywordMatchOptions(starlog.KeywordMatchOptions{
IgnoreCase: true, // 忽略大小写
WholeWord: true, // 仅匹配完整单词
})
```
说明:
- `MobaLite`:常用词高亮(如 `error/warn/true/false/success`
- `MobaFull`:在 Lite 基础上增加更多运行态词汇
- 预设后仍可继续 `SetKeywordColor(...)` 覆盖单个关键词颜色
### 6.4 Formatter
```go
log.SetFormatter(starlog.NewTextFormatter())
log.SetFormatter(starlog.NewJSONFormatter())
```
自定义 formatter
```go
type Formatter interface { Format(*Entry) ([]byte, error) }
```
## 7. 输出管道Writer / Sink / MultiSink
### 7.1 Writer
```go
log.SetWriter(os.Stdout)
```
### 7.2 Sink
```go
type Sink interface {
Write([]byte) error
Close() error
}
log.SetSink(mySink)
```
### 7.3 MultiSink
```go
multi := starlog.NewMultiSink(fileSink, networkSink)
multi.SetContinueOnError(true)
log.SetSink(multi)
stats := multi.GetStats()
_ = stats
```
## 8. 按级别分流RouteHandler
示例:`info+notice -> brief.log``error+ -> err.log`
```go
brief, _ := os.OpenFile("./logs/brief.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
errf, _ := os.OpenFile("./logs/err.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
route := starlog.NewRouteHandler(
starlog.Route{
Name: "brief",
Match: starlog.MatchLevels(starlog.LvInfo, starlog.LvNotice),
Formatter: starlog.NewTextFormatter(),
Sink: starlog.NewWriterSink(brief),
},
starlog.Route{
Name: "error",
Match: starlog.MatchAtLeast(starlog.LvError),
Formatter: starlog.NewJSONFormatter(),
Sink: starlog.NewWriterSink(errf),
},
)
log.SetEntryHandler(route)
```
如果要“主日志 + 分流日志”同时保留,用 `ChainHandler` 组合 handler。
## 9. 轮转与归档
### 9.1 `RotatePolicy` 语义
```go
type RotatePolicy interface {
ShouldRotate(starlog.FileInfo, *starlog.Entry) bool
NextPath(string, time.Time) string
}
// 可选扩展接口
type RotateArchivePathProvider interface {
ArchivePath(string, time.Time) string
}
```
语义说明:
- `ShouldRotate`:判定是否触发轮转
- `NextPath`:返回“被切走旧日志”的归档路径
- 若策略实现了 `ArchivePath`,框架优先使用 `ArchivePath`
- 若 `ArchivePath` 返回空字符串,回退到 `NextPath`
### 9.2 主日志轮转(推荐入口)
```go
if err := starlog.StartRotatePolicy(log, policy, 1); err != nil {
panic(err)
}
```
内置模板:
```go
_ = starlog.StartRotateByTime(log, 24*time.Hour, 10)
_ = starlog.StartRotateBySize(log, 200*1024*1024, 5)
_ = starlog.StartRotateByTimeSize(log, 24*time.Hour, 200*1024*1024, 5)
```
### 9.3 管理增强(保留、压缩)
```go
opts := starlog.RotateManageOptions{
MaxBackups: 7,
MaxAge: 7 * 24 * time.Hour,
Compress: true,
Pattern: "20060102-150405",
}
if err := starlog.StartManagedRotatePolicy(log, policy, 1, opts); err != nil {
panic(err)
}
```
模板增强版:
```go
_ = starlog.StartManagedRotateByTime(log, 24*time.Hour, 10, opts)
_ = starlog.StartManagedRotateBySize(log, 200*1024*1024, 5, opts)
_ = starlog.StartManagedRotateByTimeSize(log, 24*time.Hour, 200*1024*1024, 5, opts)
```
### 9.4 RouteHandler 分流文件轮转
分流 sink 可直接使用轮转 sink
```go
debugSink, err := starlog.NewManagedRotateByTimeSink(
"./logs/debug.log",
true,
24*time.Hour,
30*time.Second,
starlog.RotateManageOptions{MaxBackups: 30, Compress: true, Pattern: "20060102"},
)
if err != nil {
panic(err)
}
route := starlog.NewRouteHandler(
starlog.Route{
Name: "debug-info",
Match: starlog.MatchLevels(starlog.LvDebug, starlog.LvInfo),
Formatter: starlog.NewTextFormatter(),
Sink: debugSink,
},
)
log.SetEntryHandler(route)
```
### 9.5 自定义轮转策略,同时应用到两种文件
```go
type HourOrSizePolicy struct {
HourInterval time.Duration
MaxBytes int64
}
func (p HourOrSizePolicy) ShouldRotate(fi starlog.FileInfo, _ *starlog.Entry) bool {
if p.MaxBytes > 0 && fi.Size >= p.MaxBytes {
return true
}
if p.HourInterval > 0 && fi.ModTime.Add(p.HourInterval).Before(time.Now()) {
return true
}
return false
}
func (p HourOrSizePolicy) NextPath(path string, now time.Time) string {
ext := filepath.Ext(path)
base := strings.TrimSuffix(path, ext)
return base + "." + now.Format("20060102-150405") + ext
}
```
应用到主日志:
```go
mainLog := starlog.NewProduction(nil)
_ = starlog.SetLogFile("./logs/app.log", mainLog, true)
policy := HourOrSizePolicy{HourInterval: 6 * time.Hour, MaxBytes: 300 * 1024 * 1024}
_ = starlog.StartManagedRotatePolicy(mainLog, policy, 1, starlog.RotateManageOptions{
MaxBackups: 20,
MaxAge: 14 * 24 * time.Hour,
Compress: true,
Pattern: "20060102-150405",
})
```
应用到分流文件:
```go
debugSink, _ := starlog.NewManagedRotatePolicySink(
"./logs/debug.log",
true,
policy,
30*time.Second,
starlog.RotateManageOptions{
MaxBackups: 30,
MaxAge: 30 * 24 * time.Hour,
Compress: true,
Pattern: "20060102-150405",
},
)
route := starlog.NewRouteHandler(
starlog.Route{
Name: "debug-info",
Match: starlog.MatchLevels(starlog.LvDebug, starlog.LvInfo),
Formatter: starlog.NewTextFormatter(),
Sink: debugSink,
},
)
mainLog.SetEntryHandler(route)
```
### 9.6 兼容入口
`StartArchive` 仍可使用,但新代码建议优先 `StartRotatePolicy` / `StartManagedRotatePolicy`
## 10. 异步、回调与 pending
```go
log.SetHandler(func(data starlog.LogData) {
// 异步显示回调
})
log.SetAsyncFallbackToSync(true)
log.SetAsyncHandlerTimeout(100 * time.Millisecond)
log.SetEntryHandler(myHandler)
log.SetEntryHandlerTimeout(200 * time.Millisecond)
log.SetPendingWriteLimit(1024)
log.SetPendingDropPolicy(starlog.PendingDropOldest)
```
可观测项:
- `GetAsyncDropCount()`
- `GetPendingStats()`
## 11. 高频防爆(去重 / 采样 / 限流)
主链路顺序:`去重 -> 采样 -> 限流`
```go
log.SetDedupConfig(starlog.DedupConfig{
Enable: true,
Levels: []int{starlog.LvInfo, starlog.LvNotice},
Window: 2 * time.Second,
Scope: starlog.DedupScopeByKey,
})
log.SetSamplingConfig(starlog.SamplingConfig{
Enable: true,
Levels: []int{starlog.LvInfo},
Rate: 0.2,
Scope: starlog.SamplingScopeByKey,
})
log.SetRateLimitConfig(starlog.RateLimitConfig{
Enable: true,
Levels: []int{starlog.LvInfo},
Rate: 50,
Burst: 100,
Scope: starlog.RateLimitScopeByKey,
})
```
## 12. 脱敏
```go
log.AddRedactRule(starlog.NewSensitiveFieldRule("[MASK]", "password", "token"))
log.AddRedactRule(starlog.NewMessageRegexRule(regexp.MustCompile(`\b1\d{10}\b`), "[PHONE]"))
log.SetRedactFailMode(starlog.RedactFailMaskAll)
```
失败策略:
- `RedactFailMaskAll`(推荐)
- `RedactFailOpen`
- `RedactFailDrop`
## 13. 指标观测
```go
snapshot := log.GetMetricsSnapshot()
_ = snapshot
```
常用统计:
- `GetPendingStats()`
- `GetSamplingStats()`
- `GetDedupStats()`
- `GetRateLimitStats()`
- `GetWriteErrorCount()`
- `GetAsyncDropCount()`
## 14. 生命周期与退出
语义:
- `Flush()`:刷写 pending
- `Sync()``Flush + 底层 Sync()`(若支持)
- `Close()`:关闭资源,不等待异步队列 drain
- `Shutdown(ctx)`:等待异步 drain再关闭资源
推荐:
```go
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
defer log.Shutdown(ctx)
```
全局 logger 可使用:`starlog.Shutdown(ctx)`
## 15. 标准库桥接
```go
stdLogger := log.AsStdlibLoggerWithOptions(starlog.LvInfo,
starlog.WithStdlibPrefix("http "),
starlog.WithStdlibFlags(0),
starlog.WithStdlibTrimNewline(true),
)
stdLogger.Println("server started")
```
也可以桥接为 `io.Writer``AsWriter(level)` / `AsWriterWithOptions(...)`
## 16. 测试工具
### 16.1 Observer
```go
obs := starlog.NewObserverWithLimit(200)
log.SetEntryHandler(obs)
entries := obs.Entries()
_ = entries
```
### 16.2 TestHook
```go
hook := starlog.NewTestHook(log)
defer hook.Close()
log.Info("hello")
if hook.Count() == 0 {
panic("expect log")
}
```
## 17. 配置快照 API并发配置推荐
```go
log.UpdateConfig(func(cfg *starlog.Config) {
cfg.Level = starlog.LvInfo
cfg.ShowColor = false
cfg.AutoAppendNewline = true
cfg.Sampling = starlog.SamplingConfig{Enable: true, Rate: 0.3}
cfg.Dedup = starlog.DedupConfig{Enable: true, Window: time.Second}
})
```
配套:
- `GetConfig()`
- `ApplyConfig(cfg)`
- `DefaultConfig()`
## 18. 生产最小模板
```go
func NewProdLogger(path string) *starlog.StarLogger {
log := starlog.NewProduction(nil)
log.SetName("svc")
log.SetAutoAppendNewline(true)
if err := starlog.SetLogFile(path, log, true); err != nil {
panic(err)
}
_ = starlog.StartManagedRotateBySize(log, 200*1024*1024, 5, starlog.RotateManageOptions{
MaxBackups: 14,
MaxAge: 14 * 24 * time.Hour,
Compress: true,
Pattern: "20060102-150405",
})
return log
}
```
## 19. 完整实战示例(主日志 + 协程分隔 + 双文件轮转)
覆盖目标:
1. 使用 `starlog`
2. 协程中使用新 `LOG` 分隔符(`NewFlag()` 自动随机)
3. 设置主日志文件
4. 主日志使用压缩轮转
5. `debug/info` 额外保存到 `debug.log`
6. `debug.log` 使用独立轮转策略
```go
package main
import (
"context"
"os"
"path/filepath"
"sync"
"time"
"b612.me/starlog"
)
func main() {
_ = os.MkdirAll("./logs", 0755)
mainLog := starlog.NewProduction(nil)
mainLog.SetName("app-main")
mainLog.SetAutoAppendNewline(true)
mainPath := filepath.Clean("./logs/app.log")
if err := starlog.SetLogFile(mainPath, mainLog, true); err != nil {
panic(err)
}
if err := starlog.StartManagedRotateBySize(mainLog, 200*1024*1024, 5, starlog.RotateManageOptions{
MaxBackups: 14,
MaxAge: 14 * 24 * time.Hour,
Compress: true,
Pattern: "20060102-150405",
}); err != nil {
panic(err)
}
debugPath := filepath.Clean("./logs/debug.log")
debugSink, err := starlog.NewManagedRotateByTimeSink(debugPath, true, 24*time.Hour, 30*time.Second, starlog.RotateManageOptions{
MaxBackups: 30,
MaxAge: 30 * 24 * time.Hour,
Compress: true,
Pattern: "20060102",
})
if err != nil {
panic(err)
}
route := starlog.NewRouteHandler(
starlog.Route{
Name: "debug-info-to-debug-log",
Match: starlog.MatchLevels(starlog.LvDebug, starlog.LvInfo),
Formatter: starlog.NewTextFormatter(),
Sink: debugSink,
},
)
mainLog.SetEntryHandler(route)
var wg sync.WaitGroup
for i := 0; i < 3; i++ {
wg.Add(1)
go func(worker int) {
defer wg.Done()
workerLog := mainLog.NewFlag()
workerLog.WithField("worker", worker).Debug("worker start")
workerLog.WithField("worker", worker).Info("worker running")
if worker == 1 {
workerLog.WithField("worker", worker).Error("worker failed")
}
}(i)
}
wg.Wait()
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
_ = mainLog.Shutdown(ctx)
}
```
## 20. 常见问题
### Q1`Infof/Errorf` 需要手动加 `\n` 吗?
默认需要。若开启 `SetAutoAppendNewline(true)`,末尾无 `\n` 会自动补齐。
### Q2异步模式会丢日志吗
- 显示回调链路:可能因超时或队列满发生丢弃,可通过计数观测
- writer/sink 主写链路:只要未被过滤(级别/去重/采样/限流)且写入正常,会落盘
### Q3新项目应该选 `Archive` 还是 `RotatePolicy`
优先 `RotatePolicy``Archive` 作为兼容路径保留。

View File

@ -1,34 +1,15 @@
package starlog
import (
"os"
)
import "b612.me/starlog/internal/fsutil"
// 检测文件/文件夹是否存在
func Exists(path string) bool {
_, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
return false
}
return true
return fsutil.Exists(path)
}
// IsFile 返回给定文件地址是否是一个文件,
// True为是一个文件,False为不是文件或路径无效
func IsFile(fpath string) bool {
s, err := os.Stat(fpath)
if err != nil {
return false
}
return !s.IsDir()
func IsFile(path string) bool {
return fsutil.IsFile(path)
}
// IsFolder 返回给定文件地址是否是一个文件夹,
// True为是一个文件夹,False为不是文件夹或路径无效
func IsFolder(fpath string) bool {
s, err := os.Stat(fpath)
if err != nil {
return false
}
return s.IsDir()
func IsFolder(path string) bool {
return fsutil.IsFolder(path)
}

View File

@ -5,18 +5,15 @@ package starlog
import (
"os"
"syscall"
"time"
"b612.me/starlog/internal/fsutil"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func GetFileCreationTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Ctimespec)
return fsutil.GetFileCreationTime(fileinfo)
}
func GetFileAccessTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Atimespec)
return fsutil.GetFileAccessTime(fileinfo)
}

View File

@ -5,18 +5,15 @@ package starlog
import (
"os"
"syscall"
"time"
"b612.me/starlog/internal/fsutil"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func GetFileCreationTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Ctim)
return fsutil.GetFileCreationTime(fileinfo)
}
func GetFileAccessTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Atim)
return fsutil.GetFileAccessTime(fileinfo)
}

View File

@ -5,16 +5,15 @@ package starlog
import (
"os"
"syscall"
"time"
"b612.me/starlog/internal/fsutil"
)
func GetFileCreationTime(fileinfo os.FileInfo) time.Time {
d := fileinfo.Sys().(*syscall.Win32FileAttributeData)
return time.Unix(0, d.CreationTime.Nanoseconds())
return fsutil.GetFileCreationTime(fileinfo)
}
func GetFileAccessTime(fileinfo os.FileInfo) time.Time {
d := fileinfo.Sys().(*syscall.Win32FileAttributeData)
return time.Unix(0, d.LastAccessTime.Nanoseconds())
return fsutil.GetFileAccessTime(fileinfo)
}

59
fuzz_go118_test.go Normal file
View File

@ -0,0 +1,59 @@
//go:build go1.18
// +build go1.18
package starlog
import (
"context"
"testing"
)
func FuzzTextAndJSONFormatter(f *testing.F) {
f.Add("hello", "user_id", "42")
f.Add("error happened", "token", "abc123")
f.Fuzz(func(t *testing.T, message string, key string, value string) {
entry := &Entry{
Context: context.Background(),
Level: LvInfo,
LevelName: "INFO",
Message: message,
Fields: Fields{},
}
if key != "" {
entry.Fields[key] = value
}
text, err := NewTextFormatter().Format(entry)
if err != nil {
t.Fatalf("text format failed: %v", err)
}
if len(text) == 0 && (message != "" || len(entry.Fields) > 0) {
t.Fatalf("text formatter output is unexpectedly empty")
}
js, err := NewJSONFormatter().Format(entry)
if err != nil {
t.Fatalf("json format failed: %v", err)
}
if len(js) == 0 {
t.Fatalf("json formatter output is unexpectedly empty")
}
})
}
func FuzzKeywordHighlight(f *testing.F) {
f.Add("panic at line", "panic")
f.Add("token leaked", "token")
f.Fuzz(func(t *testing.T, input string, keyword string) {
core := newLogCore(nil)
core.showColor = true
core.onlyColorLevel = true
core.keywordColors = map[string][]Attr{}
if keyword != "" {
core.keywordColors[keyword] = []Attr{FgRed}
}
_ = core.highlightKeywords(input)
})
}

View File

@ -0,0 +1,88 @@
package archivex
import (
"context"
"os"
"b612.me/starlog/internal/runtimex"
)
type FileRecord struct {
FullPath string
Pointer *os.File
}
type Runner struct {
Cancel context.CancelFunc
Done chan struct{}
}
type Store struct {
files runtimex.MapKV
runners runtimex.MapKV
}
func NewStore() *Store {
return &Store{
files: runtimex.NewMapKV(),
runners: runtimex.NewMapKV(),
}
}
func (store *Store) SetFile(id string, record FileRecord) error {
if store == nil {
return nil
}
return store.files.Store(id, record)
}
func (store *Store) GetFile(id string) (FileRecord, bool) {
if store == nil {
return FileRecord{}, false
}
val := store.files.MustGet(id)
if val == nil {
return FileRecord{}, false
}
record, ok := val.(FileRecord)
if !ok {
return FileRecord{}, false
}
return record, true
}
func (store *Store) DeleteFile(id string) error {
if store == nil {
return nil
}
return store.files.Delete(id)
}
func (store *Store) SetRunner(id string, runner *Runner) error {
if store == nil {
return nil
}
return store.runners.Store(id, runner)
}
func (store *Store) GetRunner(id string) (*Runner, bool) {
if store == nil {
return nil, false
}
val := store.runners.MustGet(id)
if val == nil {
return nil, false
}
runner, ok := val.(*Runner)
if !ok || runner == nil {
return nil, false
}
return runner, true
}
func (store *Store) DeleteRunner(id string) error {
if store == nil {
return nil
}
return store.runners.Delete(id)
}

View File

@ -0,0 +1,58 @@
package archivex
import (
"context"
"io/ioutil"
"os"
"testing"
)
func TestStoreFileLifecycle(t *testing.T) {
store := NewStore()
tmp, err := ioutil.TempFile("", "starlog-archivex-*.log")
if err != nil {
t.Fatalf("TempFile failed: %v", err)
}
defer os.Remove(tmp.Name())
defer tmp.Close()
rec := FileRecord{FullPath: tmp.Name(), Pointer: tmp}
if err := store.SetFile("id-1", rec); err != nil {
t.Fatalf("SetFile failed: %v", err)
}
got, ok := store.GetFile("id-1")
if !ok {
t.Fatalf("GetFile should return stored record")
}
if got.FullPath != rec.FullPath || got.Pointer != rec.Pointer {
t.Fatalf("unexpected file record: %+v", got)
}
if err := store.DeleteFile("id-1"); err != nil {
t.Fatalf("DeleteFile failed: %v", err)
}
if _, ok := store.GetFile("id-1"); ok {
t.Fatalf("record should not exist after DeleteFile")
}
}
func TestStoreRunnerLifecycle(t *testing.T) {
store := NewStore()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runner := &Runner{Cancel: cancel, Done: make(chan struct{})}
if err := store.SetRunner("r-1", runner); err != nil {
t.Fatalf("SetRunner failed: %v", err)
}
got, ok := store.GetRunner("r-1")
if !ok || got != runner {
t.Fatalf("GetRunner should return stored runner")
}
if err := store.DeleteRunner("r-1"); err != nil {
t.Fatalf("DeleteRunner failed: %v", err)
}
if _, ok := store.GetRunner("r-1"); ok {
t.Fatalf("runner should not exist after DeleteRunner")
}
_ = ctx
}

27
internal/fsutil/common.go Normal file
View File

@ -0,0 +1,27 @@
package fsutil
import "os"
func Exists(path string) bool {
_, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
func IsFile(path string) bool {
stat, err := os.Stat(path)
if err != nil {
return false
}
return !stat.IsDir()
}
func IsFolder(path string) bool {
stat, err := os.Stat(path)
if err != nil {
return false
}
return stat.IsDir()
}

View File

@ -0,0 +1,22 @@
//go:build darwin
// +build darwin
package fsutil
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func GetFileCreationTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Ctimespec)
}
func GetFileAccessTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Atimespec)
}

View File

@ -0,0 +1,22 @@
//go:build linux
// +build linux
package fsutil
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func GetFileCreationTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Ctim)
}
func GetFileAccessTime(fileinfo os.FileInfo) time.Time {
return timespecToTime(fileinfo.Sys().(*syscall.Stat_t).Atim)
}

View File

@ -0,0 +1,20 @@
//go:build windows
// +build windows
package fsutil
import (
"os"
"syscall"
"time"
)
func GetFileCreationTime(fileinfo os.FileInfo) time.Time {
data := fileinfo.Sys().(*syscall.Win32FileAttributeData)
return time.Unix(0, data.CreationTime.Nanoseconds())
}
func GetFileAccessTime(fileinfo os.FileInfo) time.Time {
data := fileinfo.Sys().(*syscall.Win32FileAttributeData)
return time.Unix(0, data.LastAccessTime.Nanoseconds())
}

View File

@ -0,0 +1,338 @@
package multisinkx
import (
"fmt"
"sync"
"sync/atomic"
)
type State string
const (
StateHealthy State = "healthy"
StateDegraded State = "degraded"
StateRecovered State = "recovered"
)
const (
stateHealthy uint32 = iota
stateDegraded
stateRecovered
)
type Stats struct {
Index int
Writes uint64
WriteErrors uint64
Closes uint64
CloseErrors uint64
ConsecutiveWriteErrors uint64
ConsecutiveCloseErrors uint64
LastWriteError string
LastCloseError string
State State
}
type Snapshot struct {
ContinueOnError bool
Sinks []Stats
}
type Sink interface {
Write([]byte) error
Close() error
}
type slot struct {
sink Sink
writeCount uint64
writeErrorCount uint64
closeCount uint64
closeErrorCount uint64
consecutiveWriteErrors uint64
consecutiveCloseErrors uint64
state uint32
mu sync.RWMutex
lastWriteError string
lastCloseError string
}
func newSlot(sink Sink) *slot {
result := &slot{
sink: sink,
}
atomic.StoreUint32(&result.state, stateHealthy)
return result
}
func (s *slot) setLastWriteError(err error) {
msg := ""
if err != nil {
msg = err.Error()
}
s.mu.Lock()
s.lastWriteError = msg
s.mu.Unlock()
}
func (s *slot) setLastCloseError(err error) {
msg := ""
if err != nil {
msg = err.Error()
}
s.mu.Lock()
s.lastCloseError = msg
s.mu.Unlock()
}
func (s *slot) setStateHealthyOrRecovered() {
if atomic.LoadUint64(&s.writeErrorCount)+atomic.LoadUint64(&s.closeErrorCount) > 0 {
atomic.StoreUint32(&s.state, stateRecovered)
return
}
atomic.StoreUint32(&s.state, stateHealthy)
}
func (s *slot) observeWrite(err error) {
atomic.AddUint64(&s.writeCount, 1)
if err == nil {
atomic.StoreUint64(&s.consecutiveWriteErrors, 0)
s.setLastWriteError(nil)
s.setStateHealthyOrRecovered()
return
}
atomic.AddUint64(&s.writeErrorCount, 1)
atomic.AddUint64(&s.consecutiveWriteErrors, 1)
atomic.StoreUint32(&s.state, stateDegraded)
s.setLastWriteError(err)
}
func (s *slot) observeClose(err error) {
atomic.AddUint64(&s.closeCount, 1)
if err == nil {
atomic.StoreUint64(&s.consecutiveCloseErrors, 0)
s.setLastCloseError(nil)
s.setStateHealthyOrRecovered()
return
}
atomic.AddUint64(&s.closeErrorCount, 1)
atomic.AddUint64(&s.consecutiveCloseErrors, 1)
atomic.StoreUint32(&s.state, stateDegraded)
s.setLastCloseError(err)
}
func (s *slot) snapshot(index int) Stats {
lastWriteErr := ""
lastCloseErr := ""
s.mu.RLock()
lastWriteErr = s.lastWriteError
lastCloseErr = s.lastCloseError
s.mu.RUnlock()
return Stats{
Index: index,
Writes: atomic.LoadUint64(&s.writeCount),
WriteErrors: atomic.LoadUint64(&s.writeErrorCount),
Closes: atomic.LoadUint64(&s.closeCount),
CloseErrors: atomic.LoadUint64(&s.closeErrorCount),
ConsecutiveWriteErrors: atomic.LoadUint64(&s.consecutiveWriteErrors),
ConsecutiveCloseErrors: atomic.LoadUint64(&s.consecutiveCloseErrors),
LastWriteError: lastWriteErr,
LastCloseError: lastCloseErr,
State: decodeState(atomic.LoadUint32(&s.state)),
}
}
func (s *slot) resetStats() {
atomic.StoreUint64(&s.writeCount, 0)
atomic.StoreUint64(&s.writeErrorCount, 0)
atomic.StoreUint64(&s.closeCount, 0)
atomic.StoreUint64(&s.closeErrorCount, 0)
atomic.StoreUint64(&s.consecutiveWriteErrors, 0)
atomic.StoreUint64(&s.consecutiveCloseErrors, 0)
atomic.StoreUint32(&s.state, stateHealthy)
s.mu.Lock()
s.lastWriteError = ""
s.lastCloseError = ""
s.mu.Unlock()
}
func decodeState(state uint32) State {
switch state {
case stateDegraded:
return StateDegraded
case stateRecovered:
return StateRecovered
default:
return StateHealthy
}
}
type MultiSink struct {
mu sync.RWMutex
slots []*slot
continueOnError bool
}
func New(sinks ...Sink) *MultiSink {
multi := &MultiSink{
continueOnError: true,
slots: make([]*slot, 0, len(sinks)),
}
multi.SetSinks(sinks...)
return multi
}
func (sink *MultiSink) SetSinks(sinks ...Sink) {
if sink == nil {
return
}
filtered := make([]Sink, 0, len(sinks))
for _, item := range sinks {
if item == nil {
continue
}
filtered = append(filtered, item)
}
slots := make([]*slot, 0, len(filtered))
for _, item := range filtered {
slots = append(slots, newSlot(item))
}
sink.mu.Lock()
sink.slots = slots
sink.mu.Unlock()
}
func (sink *MultiSink) AddSink(item Sink) {
if sink == nil || item == nil {
return
}
sink.mu.Lock()
sink.slots = append(sink.slots, newSlot(item))
sink.mu.Unlock()
}
func (sink *MultiSink) SetContinueOnError(continueOnError bool) {
if sink == nil {
return
}
sink.mu.Lock()
sink.continueOnError = continueOnError
sink.mu.Unlock()
}
func (sink *MultiSink) ContinueOnError() bool {
if sink == nil {
return true
}
sink.mu.RLock()
defer sink.mu.RUnlock()
return sink.continueOnError
}
func (sink *MultiSink) SinkCount() int {
if sink == nil {
return 0
}
sink.mu.RLock()
defer sink.mu.RUnlock()
return len(sink.slots)
}
func (sink *MultiSink) GetStats() Snapshot {
if sink == nil {
return Snapshot{
ContinueOnError: true,
Sinks: nil,
}
}
current, continueOnError := sink.snapshot()
stats := make([]Stats, 0, len(current))
for index, item := range current {
if item == nil {
continue
}
stats = append(stats, item.snapshot(index))
}
return Snapshot{
ContinueOnError: continueOnError,
Sinks: stats,
}
}
func (sink *MultiSink) ResetStats() {
if sink == nil {
return
}
current, _ := sink.snapshot()
for _, item := range current {
if item == nil {
continue
}
item.resetStats()
}
}
func (sink *MultiSink) Write(data []byte) error {
if sink == nil {
return nil
}
current, continueOnError := sink.snapshot()
if len(current) == 0 {
return nil
}
var errs []error
for _, item := range current {
if item == nil || item.sink == nil {
continue
}
err := item.sink.Write(data)
item.observeWrite(err)
if err != nil {
if !continueOnError {
return err
}
errs = append(errs, err)
}
}
return packErrors("write", errs)
}
func (sink *MultiSink) Close() error {
if sink == nil {
return nil
}
current, continueOnError := sink.snapshot()
var errs []error
for _, item := range current {
if item == nil || item.sink == nil {
continue
}
err := item.sink.Close()
item.observeClose(err)
if err != nil {
if !continueOnError {
return err
}
errs = append(errs, err)
}
}
return packErrors("close", errs)
}
func (sink *MultiSink) snapshot() ([]*slot, bool) {
sink.mu.RLock()
defer sink.mu.RUnlock()
current := make([]*slot, len(sink.slots))
copy(current, sink.slots)
return current, sink.continueOnError
}
func packErrors(action string, errs []error) error {
if len(errs) == 0 {
return nil
}
if len(errs) == 1 {
return errs[0]
}
return fmt.Errorf("multi sink %s failed with %d errors: %v", action, len(errs), errs[0])
}

View File

@ -0,0 +1,123 @@
package observerx
import (
"sync"
"sync/atomic"
)
type Buffer struct {
mu sync.RWMutex
items []interface{}
limit int
dropped uint64
}
func NewBuffer() *Buffer {
return &Buffer{
items: make([]interface{}, 0, 16),
limit: 0,
}
}
func (buffer *Buffer) Add(item interface{}) {
if buffer == nil {
return
}
buffer.mu.Lock()
if buffer.limit > 0 && len(buffer.items) >= buffer.limit {
buffer.items = buffer.items[1:]
atomic.AddUint64(&buffer.dropped, 1)
}
buffer.items = append(buffer.items, item)
buffer.mu.Unlock()
}
func (buffer *Buffer) SetLimit(limit int) {
if buffer == nil {
return
}
if limit < 0 {
limit = 0
}
buffer.mu.Lock()
buffer.limit = limit
if limit > 0 && len(buffer.items) > limit {
dropped := len(buffer.items) - limit
buffer.items = buffer.items[dropped:]
atomic.AddUint64(&buffer.dropped, uint64(dropped))
}
buffer.mu.Unlock()
}
func (buffer *Buffer) Limit() int {
if buffer == nil {
return 0
}
buffer.mu.RLock()
defer buffer.mu.RUnlock()
return buffer.limit
}
func (buffer *Buffer) Count() int {
if buffer == nil {
return 0
}
buffer.mu.RLock()
defer buffer.mu.RUnlock()
return len(buffer.items)
}
func (buffer *Buffer) Dropped() uint64 {
if buffer == nil {
return 0
}
return atomic.LoadUint64(&buffer.dropped)
}
func (buffer *Buffer) Snapshot() []interface{} {
if buffer == nil {
return nil
}
buffer.mu.RLock()
defer buffer.mu.RUnlock()
result := make([]interface{}, len(buffer.items))
copy(result, buffer.items)
return result
}
func (buffer *Buffer) Last() (interface{}, bool) {
if buffer == nil {
return nil, false
}
buffer.mu.RLock()
defer buffer.mu.RUnlock()
if len(buffer.items) == 0 {
return nil, false
}
return buffer.items[len(buffer.items)-1], true
}
func (buffer *Buffer) TakeAll() []interface{} {
if buffer == nil {
return nil
}
buffer.mu.Lock()
defer buffer.mu.Unlock()
if len(buffer.items) == 0 {
return nil
}
result := make([]interface{}, len(buffer.items))
copy(result, buffer.items)
buffer.items = buffer.items[:0]
return result
}
func (buffer *Buffer) Reset() {
if buffer == nil {
return
}
buffer.mu.Lock()
buffer.items = buffer.items[:0]
buffer.mu.Unlock()
atomic.StoreUint64(&buffer.dropped, 0)
}

View File

@ -0,0 +1,137 @@
package pipelinex
import (
"encoding/json"
"fmt"
"sort"
"strings"
"time"
)
type Entry struct {
Time time.Time
LevelName string
LoggerName string
Thread string
File string
Line int
Func string
Message string
Error string
Fields map[string]interface{}
}
type TextOptions struct {
IncludeTimestamp bool
IncludeLevel bool
IncludeSource bool
IncludeThread bool
IncludeLogger bool
}
func cloneFields(fields map[string]interface{}) map[string]interface{} {
if len(fields) == 0 {
return nil
}
cloned := make(map[string]interface{}, len(fields))
for key, value := range fields {
cloned[key] = value
}
return cloned
}
func renderFields(fields map[string]interface{}) string {
if len(fields) == 0 {
return ""
}
keys := make([]string, 0, len(fields))
for key := range fields {
keys = append(keys, key)
}
sort.Strings(keys)
pairs := make([]string, 0, len(keys))
for _, key := range keys {
pairs = append(pairs, fmt.Sprintf("%s=%v", key, fields[key]))
}
return strings.Join(pairs, " ")
}
func FormatText(entry Entry, options TextOptions) ([]byte, error) {
parts := make([]string, 0, 6)
if options.IncludeTimestamp {
if !entry.Time.IsZero() {
parts = append(parts, entry.Time.Format("2006-01-02 15:04:05.000000"))
}
}
if options.IncludeSource {
source := ""
if entry.File != "" {
source = fmt.Sprintf("%s:%d", entry.File, entry.Line)
}
if entry.Func != "" {
if source != "" {
source += " "
}
source += "<" + entry.Func + ">"
}
if source != "" {
parts = append(parts, source)
}
}
if options.IncludeThread && entry.Thread != "" {
parts = append(parts, "|"+entry.Thread+"|")
}
if options.IncludeLevel {
if entry.LevelName != "" {
parts = append(parts, "["+entry.LevelName+"]")
}
}
if options.IncludeLogger && entry.LoggerName != "" {
parts = append(parts, "logger="+entry.LoggerName)
}
messageParts := make([]string, 0, 3)
if entry.Message != "" {
messageParts = append(messageParts, entry.Message)
}
if entry.Error != "" {
messageParts = append(messageParts, "error="+entry.Error)
}
fieldText := renderFields(entry.Fields)
if fieldText != "" {
messageParts = append(messageParts, fieldText)
}
if len(messageParts) > 0 {
parts = append(parts, strings.Join(messageParts, " "))
}
return []byte(strings.Join(parts, " ")), nil
}
func FormatJSON(entry Entry, pretty bool) ([]byte, error) {
payload := map[string]interface{}{
"time": entry.Time.Format(time.RFC3339Nano),
"level": entry.LevelName,
"msg": entry.Message,
"logger": entry.LoggerName,
"thread": entry.Thread,
}
if entry.File != "" {
payload["file"] = entry.File
}
if entry.Line > 0 {
payload["line"] = entry.Line
}
if entry.Func != "" {
payload["func"] = entry.Func
}
if entry.Error != "" {
payload["error"] = entry.Error
}
if len(entry.Fields) > 0 {
payload["fields"] = cloneFields(entry.Fields)
}
if pretty {
return json.MarshalIndent(payload, "", " ")
}
return json.Marshal(payload)
}

View File

@ -0,0 +1,58 @@
package redactutil
import (
"fmt"
"regexp"
"strings"
)
func NormalizeMask(mask string) string {
mask = strings.TrimSpace(mask)
if mask == "" {
return "[REDACTED]"
}
return mask
}
func BuildFieldSet(fields ...string) map[string]struct{} {
fieldMap := make(map[string]struct{}, len(fields))
for _, field := range fields {
field = strings.TrimSpace(strings.ToLower(field))
if field == "" {
continue
}
fieldMap[field] = struct{}{}
}
return fieldMap
}
func LookupFieldKey(key string) string {
return strings.TrimSpace(strings.ToLower(key))
}
func MaskFields(fields map[string]interface{}, mask string) map[string]interface{} {
if len(fields) == 0 {
return nil
}
mask = NormalizeMask(mask)
masked := make(map[string]interface{}, len(fields))
for key := range fields {
masked[key] = mask
}
return masked
}
func ReplaceRegex(pattern *regexp.Regexp, text string, replacement string) (string, bool) {
if pattern == nil || text == "" {
return text, false
}
if replacement == "" {
replacement = "[REDACTED]"
}
changed := pattern.ReplaceAllString(text, replacement)
return changed, changed != text
}
func IsMasked(value interface{}, mask string) bool {
return fmt.Sprint(value) == mask
}

View File

@ -0,0 +1,199 @@
package rotatemanage
import (
"compress/gzip"
"io"
"os"
"path/filepath"
"sort"
"strings"
"time"
)
type Options struct {
MaxBackups int
MaxAge time.Duration
Compress bool
Pattern string
}
type backupFileMeta struct {
path string
modTime time.Time
}
func Apply(archivePath string, currentPath string, options Options) error {
if archivePath == "" || currentPath == "" {
return nil
}
if options.Compress {
if _, err := gzipBackupFile(archivePath); err != nil {
return err
}
}
backups, err := listManagedBackups(currentPath, options.Pattern)
if err != nil {
return err
}
return cleanupManagedBackups(backups, options)
}
func listManagedBackups(currentPath string, pattern string) ([]backupFileMeta, error) {
dir := filepath.Dir(currentPath)
base := filepath.Base(currentPath)
stem := strings.TrimSuffix(base, filepath.Ext(base))
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
backups := make([]backupFileMeta, 0, len(entries))
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if name == base {
continue
}
matched, err := IsManagedBackupName(name, base, stem, pattern)
if err != nil {
return nil, err
}
if !matched {
continue
}
info, err := entry.Info()
if err != nil {
return nil, err
}
backups = append(backups, backupFileMeta{
path: filepath.Join(dir, name),
modTime: info.ModTime(),
})
}
return backups, nil
}
func IsManagedBackupName(name string, base string, stem string, pattern string) (bool, error) {
if pattern != "" {
return filepath.Match(pattern, name)
}
prefixes := []string{
base + ".",
base + "_",
base + "-",
}
if stem != "" && stem != base {
prefixes = append(prefixes,
stem+".",
stem+"_",
stem+"-",
)
}
for _, prefix := range prefixes {
if !strings.HasPrefix(name, prefix) {
continue
}
if isLikelyManagedBackupSuffix(strings.TrimPrefix(name, prefix)) {
return true, nil
}
}
return false, nil
}
func isLikelyManagedBackupSuffix(suffix string) bool {
suffix = strings.TrimSpace(strings.ToLower(suffix))
if suffix == "" {
return false
}
suffix = strings.TrimSuffix(suffix, ".gz")
suffix = strings.TrimSuffix(suffix, ".zip")
if suffix == "" {
return false
}
if strings.Contains(suffix, "bak") {
return true
}
for _, ch := range suffix {
if ch >= '0' && ch <= '9' {
return true
}
}
return false
}
func cleanupManagedBackups(backups []backupFileMeta, options Options) error {
var firstErr error
now := time.Now()
kept := make([]backupFileMeta, 0, len(backups))
for _, item := range backups {
if options.MaxAge > 0 && now.Sub(item.modTime) > options.MaxAge {
if err := os.Remove(item.path); err != nil && firstErr == nil {
firstErr = err
}
continue
}
kept = append(kept, item)
}
if options.MaxBackups > 0 && len(kept) > options.MaxBackups {
sort.Slice(kept, func(i, j int) bool {
return kept[i].modTime.After(kept[j].modTime)
})
for _, item := range kept[options.MaxBackups:] {
if err := os.Remove(item.path); err != nil && firstErr == nil {
firstErr = err
}
}
}
return firstErr
}
func gzipBackupFile(path string) (string, error) {
if strings.HasSuffix(path, ".gz") {
return path, nil
}
source, err := os.Open(path)
if err != nil {
return "", err
}
destination := path + ".gz"
temp := destination + ".tmp"
target, err := os.Create(temp)
if err != nil {
_ = source.Close()
return "", err
}
gzWriter := gzip.NewWriter(target)
if _, err = io.Copy(gzWriter, source); err != nil {
_ = source.Close()
_ = gzWriter.Close()
_ = target.Close()
_ = os.Remove(temp)
return "", err
}
if err = gzWriter.Close(); err != nil {
_ = source.Close()
_ = target.Close()
_ = os.Remove(temp)
return "", err
}
if err = target.Close(); err != nil {
_ = source.Close()
_ = os.Remove(temp)
return "", err
}
if err = source.Close(); err != nil {
_ = os.Remove(temp)
return "", err
}
if err = os.Rename(temp, destination); err != nil {
_ = os.Remove(temp)
return "", err
}
if err = os.Remove(path); err != nil {
return "", err
}
return destination, nil
}

View File

@ -0,0 +1,68 @@
package routerx
import "fmt"
type Matcher func(level int) bool
type Route struct {
Index int
Name string
Match Matcher
Enabled bool
}
type Snapshot struct {
Index int
Name string
Match Matcher
}
func Normalize(routes []Route) []Snapshot {
if len(routes) == 0 {
return nil
}
result := make([]Snapshot, 0, len(routes))
for _, route := range routes {
if !route.Enabled {
continue
}
name := route.Name
if name == "" {
name = fmt.Sprintf("route-%d", route.Index)
}
match := route.Match
if match == nil {
match = MatchAllLevels()
}
result = append(result, Snapshot{
Index: route.Index,
Name: name,
Match: match,
})
}
return result
}
func MatchAllLevels() Matcher {
return func(level int) bool {
_ = level
return true
}
}
func MatchLevels(levels ...int) Matcher {
levelSet := make(map[int]struct{}, len(levels))
for _, level := range levels {
levelSet[level] = struct{}{}
}
return func(level int) bool {
_, ok := levelSet[level]
return ok
}
}
func MatchAtLeast(minLevel int) Matcher {
return func(level int) bool {
return level >= minLevel
}
}

View File

@ -0,0 +1,142 @@
package runtimex
import (
"errors"
"io"
"sync/atomic"
)
var (
ErrStackClosed = errors.New("stack closed")
ErrStackFull = errors.New("stack full")
)
type ChanStack struct {
data chan interface{}
cap uint64
current uint64
isClose atomic.Value
}
func NewChanStack(cap uint64) *ChanStack {
rtnBuffer := new(ChanStack)
rtnBuffer.cap = cap
rtnBuffer.isClose.Store(false)
rtnBuffer.data = make(chan interface{}, cap)
return rtnBuffer
}
func (s *ChanStack) init() {
s.cap = 1024
s.data = make(chan interface{}, s.cap)
s.isClose.Store(false)
}
func (s *ChanStack) Free() uint64 {
return s.cap - atomic.LoadUint64(&s.current)
}
func (s *ChanStack) Cap() uint64 {
return s.cap
}
func (s *ChanStack) Len() uint64 {
return atomic.LoadUint64(&s.current)
}
func (s *ChanStack) Pop() (interface{}, error) {
if s.isClose.Load() == nil {
s.init()
}
if s.isClose.Load().(bool) {
return nil, io.EOF
}
data, ok := <-s.data
if !ok {
s.isClose.Store(true)
return nil, io.EOF
}
for {
current := atomic.LoadUint64(&s.current)
if current == 0 {
break
}
if atomic.CompareAndSwapUint64(&s.current, current, current-1) {
break
}
}
return data, nil
}
func (s *ChanStack) Push(data interface{}) error {
if s.isClose.Load() == nil {
s.init()
}
if s.isClose.Load().(bool) {
return io.EOF
}
if err := func() (err error) {
defer func() {
if r := recover(); r != nil {
err = io.EOF
}
}()
s.data <- data
return nil
}(); err != nil {
return err
}
for {
current := atomic.LoadUint64(&s.current)
if atomic.CompareAndSwapUint64(&s.current, current, current+1) {
break
}
}
return nil
}
func (s *ChanStack) TryPush(data interface{}) error {
if s.isClose.Load() == nil {
s.init()
}
if s.isClose.Load().(bool) {
return io.EOF
}
if err := func() (err error) {
defer func() {
if r := recover(); r != nil {
err = io.EOF
}
}()
select {
case s.data <- data:
return nil
default:
return ErrStackFull
}
}(); err != nil {
return err
}
for {
current := atomic.LoadUint64(&s.current)
if atomic.CompareAndSwapUint64(&s.current, current, current+1) {
break
}
}
return nil
}
func (s *ChanStack) Close() error {
if s.isClose.Load() == nil {
s.init()
}
if s.isClose.Load().(bool) {
return ErrStackClosed
}
s.isClose.Store(true)
defer func() {
recover()
}()
close(s.data)
return nil
}

View File

@ -0,0 +1,64 @@
package runtimex
import (
"errors"
"io"
"testing"
)
func TestChanStackPushPop(t *testing.T) {
stack := NewChanStack(2)
if err := stack.Push("a"); err != nil {
t.Fatalf("Push failed: %v", err)
}
if err := stack.Push("b"); err != nil {
t.Fatalf("Push failed: %v", err)
}
if stack.Len() != 2 {
t.Fatalf("expected len=2, got %d", stack.Len())
}
if stack.Free() != 0 {
t.Fatalf("expected free=0, got %d", stack.Free())
}
first, err := stack.Pop()
if err != nil {
t.Fatalf("Pop failed: %v", err)
}
if first.(string) != "a" {
t.Fatalf("unexpected first value: %v", first)
}
second, err := stack.Pop()
if err != nil {
t.Fatalf("Pop failed: %v", err)
}
if second.(string) != "b" {
t.Fatalf("unexpected second value: %v", second)
}
}
func TestChanStackTryPushFull(t *testing.T) {
stack := NewChanStack(1)
if err := stack.TryPush("a"); err != nil {
t.Fatalf("TryPush should succeed on empty stack: %v", err)
}
if err := stack.TryPush("b"); !errors.Is(err, ErrStackFull) {
t.Fatalf("TryPush should return ErrStackFull, got %v", err)
}
}
func TestChanStackCloseBehavior(t *testing.T) {
stack := NewChanStack(1)
if err := stack.Close(); err != nil {
t.Fatalf("Close should succeed first time: %v", err)
}
if err := stack.Close(); !errors.Is(err, ErrStackClosed) {
t.Fatalf("Close should return ErrStackClosed on second call, got %v", err)
}
if err := stack.Push("x"); !errors.Is(err, io.EOF) {
t.Fatalf("Push after close should return io.EOF, got %v", err)
}
if _, err := stack.Pop(); !errors.Is(err, io.EOF) {
t.Fatalf("Pop after close should return io.EOF, got %v", err)
}
}

63
internal/runtimex/map.go Normal file
View File

@ -0,0 +1,63 @@
package runtimex
import (
"os"
"sync"
)
type MapKV struct {
kvMap map[interface{}]interface{}
mu sync.RWMutex
}
func NewMapKV() MapKV {
var mp MapKV
mp.kvMap = make(map[interface{}]interface{})
return mp
}
func (m *MapKV) Get(key interface{}) (interface{}, error) {
var err error
m.mu.RLock()
defer m.mu.RUnlock()
data, ok := m.kvMap[key]
if !ok {
err = os.ErrNotExist
}
return data, err
}
func (m *MapKV) MustGet(key interface{}) interface{} {
result, _ := m.Get(key)
return result
}
func (m *MapKV) Store(key interface{}, value interface{}) error {
m.mu.Lock()
defer m.mu.Unlock()
m.kvMap[key] = value
return nil
}
func (m *MapKV) Exists(key interface{}) bool {
m.mu.RLock()
defer m.mu.RUnlock()
_, ok := m.kvMap[key]
return ok
}
func (m *MapKV) Delete(key interface{}) error {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.kvMap, key)
return nil
}
func (m *MapKV) Range(run func(k interface{}, v interface{}) bool) error {
for k, v := range m.kvMap {
if !run(k, v) {
break
}
}
return nil
}

136
internal/stdlibx/bridge.go Normal file
View File

@ -0,0 +1,136 @@
package stdlibx
import (
"errors"
"strings"
)
type LevelMapper func(text string, fallbackLevel int) int
type Options struct {
Prefix string
Flags int
ShowStd bool
TrimNewline bool
LevelMapper LevelMapper
}
type Option func(*Options)
func DefaultOptions() Options {
return Options{
Prefix: "",
Flags: 0,
ShowStd: false,
TrimNewline: true,
LevelMapper: nil,
}
}
func WithPrefix(prefix string) Option {
return func(options *Options) {
if options == nil {
return
}
options.Prefix = prefix
}
}
func WithFlags(flags int) Option {
return func(options *Options) {
if options == nil {
return
}
options.Flags = flags
}
}
func WithShowStd(show bool) Option {
return func(options *Options) {
if options == nil {
return
}
options.ShowStd = show
}
}
func WithTrimNewline(trim bool) Option {
return func(options *Options) {
if options == nil {
return
}
options.TrimNewline = trim
}
}
func WithLevelMapper(mapper LevelMapper) Option {
return func(options *Options) {
if options == nil {
return
}
options.LevelMapper = mapper
}
}
func NormalizeOptions(opts []Option) Options {
options := DefaultOptions()
for _, option := range opts {
if option == nil {
continue
}
option(&options)
}
return options
}
type EmitFunc func(level int, showStd bool, text string)
type Writer struct {
level int
showStd bool
trimNewline bool
levelMapper LevelMapper
emit EmitFunc
}
func NewWriter(level int, options Options, emit EmitFunc) *Writer {
return &Writer{
level: level,
showStd: options.ShowStd,
trimNewline: options.TrimNewline,
levelMapper: options.LevelMapper,
emit: emit,
}
}
func (writer *Writer) SetShowStd(show bool) {
if writer == nil {
return
}
writer.showStd = show
}
func (writer *Writer) SetTrimNewline(trim bool) {
if writer == nil {
return
}
writer.trimNewline = trim
}
func (writer *Writer) Write(data []byte) (int, error) {
if writer == nil || writer.emit == nil {
return 0, errors.New("level writer logger is nil")
}
text := string(data)
if writer.trimNewline {
text = strings.TrimRight(text, "\r\n")
}
if text != "" {
level := writer.level
if writer.levelMapper != nil {
level = writer.levelMapper(text, level)
}
writer.emit(level, writer.showStd, text)
}
return len(data), nil
}

102
keyword_match_test.go Normal file
View File

@ -0,0 +1,102 @@
package starlog
import (
"strings"
"testing"
)
func forceColorForKeywordTest(t *testing.T) func() {
t.Helper()
old := NoColor
NoColor = false
return func() {
NoColor = old
}
}
func TestKeywordMatchDefaultCaseSensitive(t *testing.T) {
defer forceColorForKeywordTest(t)()
core := newLogCore(nil)
core.keywordColors["error"] = []Attr{FgRed}
got := core.highlightKeywords("ERROR error")
if strings.Contains(got, NewColor(FgRed).Sprint("ERROR")) {
t.Fatalf("default keyword match should be case-sensitive")
}
if !strings.Contains(got, NewColor(FgRed).Sprint("error")) {
t.Fatalf("default keyword match should highlight exact keyword")
}
}
func TestKeywordMatchIgnoreCase(t *testing.T) {
defer forceColorForKeywordTest(t)()
core := newLogCore(nil)
core.keywordColors["error"] = []Attr{FgRed}
core.keywordMatchOptions = KeywordMatchOptions{IgnoreCase: true}
got := core.highlightKeywords("ERROR error Error")
if !strings.Contains(got, NewColor(FgRed).Sprint("ERROR")) {
t.Fatalf("ignore-case keyword match should highlight uppercase variant")
}
if !strings.Contains(got, NewColor(FgRed).Sprint("Error")) {
t.Fatalf("ignore-case keyword match should highlight title-case variant")
}
}
func TestKeywordMatchWholeWord(t *testing.T) {
defer forceColorForKeywordTest(t)()
core := newLogCore(nil)
core.keywordColors["error"] = []Attr{FgRed}
core.keywordMatchOptions = KeywordMatchOptions{WholeWord: true}
got := core.highlightKeywords("error errors xerror error_x error")
colored := NewColor(FgRed).Sprint("error")
if strings.Count(got, colored) != 2 {
t.Fatalf("whole-word keyword match should only highlight standalone words, got %q", got)
}
}
func TestKeywordMatchWholeWordIgnoreCase(t *testing.T) {
defer forceColorForKeywordTest(t)()
core := newLogCore(nil)
core.keywordColors["error"] = []Attr{FgRed}
core.keywordMatchOptions = KeywordMatchOptions{
IgnoreCase: true,
WholeWord: true,
}
got := core.highlightKeywords("ERROR errors Error")
if !strings.Contains(got, NewColor(FgRed).Sprint("ERROR")) {
t.Fatalf("whole-word+ignore-case should highlight ERROR")
}
if !strings.Contains(got, NewColor(FgRed).Sprint("Error")) {
t.Fatalf("whole-word+ignore-case should highlight Error")
}
if strings.Contains(got, NewColor(FgRed).Sprint("error")+"s") {
t.Fatalf("whole-word+ignore-case should not highlight partial words")
}
}
func TestKeywordMatchOptionsConfigRoundTrip(t *testing.T) {
logger := NewStarlog(nil)
logger.SetKeywordMatchOptions(KeywordMatchOptions{
IgnoreCase: true,
WholeWord: true,
})
cfg := logger.GetConfig()
if !cfg.KeywordMatch.IgnoreCase || !cfg.KeywordMatch.WholeWord {
t.Fatalf("config snapshot should include keyword match options")
}
logger.SetKeywordMatchOptions(KeywordMatchOptions{})
logger.ApplyConfig(cfg)
got := logger.GetKeywordMatchOptions()
if !got.IgnoreCase || !got.WholeWord {
t.Fatalf("ApplyConfig should restore keyword match options")
}
}

138
keyword_preset.go Normal file
View File

@ -0,0 +1,138 @@
package starlog
import "strings"
type KeywordPreset string
const (
KeywordPresetMobaLite KeywordPreset = "moba-lite"
KeywordPresetMobaFull KeywordPreset = "moba-full"
)
var keywordPresetDefs = map[KeywordPreset]map[string][]Attr{
KeywordPresetMobaLite: buildKeywordPresetMap(false),
KeywordPresetMobaFull: buildKeywordPresetMap(true),
}
func normalizeKeywordPreset(preset KeywordPreset) KeywordPreset {
name := strings.TrimSpace(strings.ToLower(string(preset)))
switch name {
case "moba-lite", "lite", "moba":
return KeywordPresetMobaLite
case "moba-full", "full", "mobaxterm":
return KeywordPresetMobaFull
default:
return KeywordPreset(name)
}
}
func keywordPresetMap(preset KeywordPreset) (map[string][]Attr, bool) {
normalized := normalizeKeywordPreset(preset)
mapping, ok := keywordPresetDefs[normalized]
if !ok {
return nil, false
}
return mapping, true
}
func GetKeywordPreset(preset KeywordPreset) map[string][]Attr {
mapping, ok := keywordPresetMap(preset)
if !ok {
return map[string][]Attr{}
}
return cloneColorMap(mapping)
}
func (logger *StarLogger) ApplyKeywordPreset(preset KeywordPreset) {
if logger == nil {
return
}
mapping, ok := keywordPresetMap(preset)
if !ok {
return
}
logger.SetKeywordColors(mapping)
}
func (logger *StarLogger) MergeKeywordPreset(preset KeywordPreset) {
if logger == nil {
return
}
mapping, ok := keywordPresetMap(preset)
if !ok {
return
}
logger.logcore.mu.Lock()
defer logger.logcore.mu.Unlock()
merged := cloneColorMap(mapping)
for keyword, attrs := range logger.logcore.keywordColors {
merged[keyword] = cloneAttrs(attrs)
}
logger.logcore.keywordColors = merged
logger.logcore.rebuildKeywordCachesLocked()
}
func buildKeywordPresetMap(full bool) map[string][]Attr {
colors := make(map[string][]Attr)
putKeywordGroup(colors, []string{
"error", "failed", "fatal", "panic", "critical", "timeout",
"denied", "refused", "invalid", "exception",
}, []Attr{FgHiRed, Bold})
putKeywordGroup(colors, []string{
"warn", "warning", "retry", "slow",
}, []Attr{FgHiYellow, Bold})
putKeywordGroup(colors, []string{
"ok", "success", "passed", "done", "connected", "ready",
}, []Attr{FgHiGreen, Bold})
putKeywordGroup(colors, []string{
"true", "yes", "enabled", "on",
}, []Attr{FgGreen, Bold})
putKeywordGroup(colors, []string{
"false", "no", "disabled", "off",
}, []Attr{FgHiMagenta, Bold})
putKeywordGroup(colors, []string{
"info", "notice", "debug",
}, []Attr{FgHiCyan})
if full {
putKeywordGroup(colors, []string{
"closed", "disconnect", "cancel", "drop", "overload",
}, []Attr{FgMagenta})
putKeywordGroup(colors, []string{
"start", "started", "ready", "healthy", "up",
}, []Attr{FgGreen})
putKeywordGroup(colors, []string{
"stop", "stopped", "down", "degraded",
}, []Attr{FgYellow})
}
return colors
}
func putKeywordGroup(colors map[string][]Attr, keywords []string, attrs []Attr) {
for _, keyword := range keywords {
for _, variant := range buildKeywordVariants(keyword) {
colors[variant] = cloneAttrs(attrs)
}
}
}
func buildKeywordVariants(keyword string) []string {
keyword = strings.TrimSpace(keyword)
if keyword == "" {
return nil
}
lower := strings.ToLower(keyword)
upper := strings.ToUpper(keyword)
title := strings.ToUpper(lower[:1]) + lower[1:]
variants := []string{lower}
if upper != lower {
variants = append(variants, upper)
}
if title != lower && title != upper {
variants = append(variants, title)
}
return variants
}

91
keyword_preset_test.go Normal file
View File

@ -0,0 +1,91 @@
package starlog
import "testing"
func attrsEqual(a []Attr, b []Attr) bool {
if len(a) != len(b) {
return false
}
for idx := range a {
if a[idx] != b[idx] {
return false
}
}
return true
}
func TestGetKeywordPresetMobaLite(t *testing.T) {
preset := GetKeywordPreset(KeywordPresetMobaLite)
if len(preset) == 0 {
t.Fatalf("moba lite preset should not be empty")
}
if _, ok := preset["error"]; !ok {
t.Fatalf("moba lite preset should include error")
}
if _, ok := preset["true"]; !ok {
t.Fatalf("moba lite preset should include true")
}
if _, ok := preset["ERROR"]; !ok {
t.Fatalf("moba lite preset should include ERROR variant")
}
preset["__mutated__"] = []Attr{FgBlue}
latest := GetKeywordPreset(KeywordPresetMobaLite)
if _, ok := latest["__mutated__"]; ok {
t.Fatalf("GetKeywordPreset should return a cloned map")
}
}
func TestApplyKeywordPreset(t *testing.T) {
logger := NewStarlog(nil)
logger.SetKeywordColor("custom", []Attr{FgBlue})
logger.ApplyKeywordPreset(KeywordPresetMobaLite)
colors := logger.GetKeywordColors()
if _, ok := colors["custom"]; ok {
t.Fatalf("ApplyKeywordPreset should replace existing keyword map")
}
if _, ok := colors["error"]; !ok {
t.Fatalf("ApplyKeywordPreset should include preset keywords")
}
}
func TestMergeKeywordPreset(t *testing.T) {
logger := NewStarlog(nil)
logger.SetKeywordColor("custom", []Attr{FgCyan})
logger.SetKeywordColor("error", []Attr{FgBlue})
logger.MergeKeywordPreset(KeywordPresetMobaLite)
colors := logger.GetKeywordColors()
if _, ok := colors["true"]; !ok {
t.Fatalf("MergeKeywordPreset should include preset keywords")
}
if _, ok := colors["custom"]; !ok {
t.Fatalf("MergeKeywordPreset should keep existing custom keyword")
}
got := colors["error"]
want := []Attr{FgBlue}
if !attrsEqual(got, want) {
t.Fatalf("existing keyword color should override preset, got %v", got)
}
}
func TestKeywordPresetUnknownNoOp(t *testing.T) {
logger := NewStarlog(nil)
logger.SetKeywordColor("keep", []Attr{FgGreen})
logger.ApplyKeywordPreset(KeywordPreset("unknown"))
afterApply := logger.GetKeywordColors()
if _, ok := afterApply["keep"]; !ok {
t.Fatalf("ApplyKeywordPreset unknown preset should not change map")
}
logger.MergeKeywordPreset(KeywordPreset("unknown"))
afterMerge := logger.GetKeywordColors()
if _, ok := afterMerge["keep"]; !ok {
t.Fatalf("MergeKeywordPreset unknown preset should not change map")
}
}

133
lifecycle.go Normal file
View File

@ -0,0 +1,133 @@
package starlog
import (
"context"
"fmt"
"io"
"time"
)
type sinkSyncer interface {
Sync() error
}
type writerSyncer interface {
Sync() error
}
func mergeLifecycleError(current error, next error) error {
if next == nil {
return current
}
if current == nil {
return next
}
return fmt.Errorf("%v; %w", current, next)
}
func WaitAsyncDrain(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
for {
stackMu.Lock()
current := stacks
started := stackStarted
stackMu.Unlock()
if !started || current == nil || current.Len() == 0 {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(5 * time.Millisecond):
}
}
}
func (logger *StarLogger) Flush() error {
if logger == nil || logger.logcore == nil {
return nil
}
logger.logcore.mu.Lock()
previousSwitching := logger.logcore.switching
logger.logcore.switching = false
logger.logcore.writePendingLocked()
logger.logcore.switching = previousSwitching
logger.logcore.mu.Unlock()
return nil
}
func (logger *StarLogger) Sync() error {
if logger == nil || logger.logcore == nil {
return nil
}
if err := logger.Flush(); err != nil {
return err
}
logger.logcore.mu.Lock()
sink := logger.logcore.sink
writer := logger.logcore.output
logger.logcore.mu.Unlock()
var err error
if sink != nil {
if syncer, ok := sink.(sinkSyncer); ok {
err = mergeLifecycleError(err, syncer.Sync())
}
return err
}
if writer != nil {
if syncer, ok := writer.(writerSyncer); ok {
err = mergeLifecycleError(err, syncer.Sync())
}
}
return err
}
// Close flushes/syncs and closes archive-managed file/sink/writer resources.
// It does not wait for async handler queue; use Shutdown for graceful app exit.
func (logger *StarLogger) Close() error {
if logger == nil || logger.logcore == nil {
return nil
}
var err error
StopArchive(logger)
err = mergeLifecycleError(err, logger.Sync())
err = mergeLifecycleError(err, CloseLogFile(logger))
logger.logcore.mu.Lock()
sink := logger.logcore.sink
writer := logger.logcore.output
entryHandler := logger.logcore.entryHandler
logger.logcore.mu.Unlock()
if sink != nil {
err = mergeLifecycleError(err, sink.Close())
} else if writer != nil {
if closer, ok := writer.(io.Closer); ok {
err = mergeLifecycleError(err, closer.Close())
}
}
if entryHandler != nil {
if closer, ok := entryHandler.(interface{ Close() error }); ok {
err = mergeLifecycleError(err, closer.Close())
}
}
logger.StopWrite()
return err
}
func (logger *StarLogger) Shutdown(ctx context.Context) error {
if logger == nil || logger.logcore == nil {
return nil
}
var err error
err = mergeLifecycleError(err, logger.Flush())
err = mergeLifecycleError(err, WaitAsyncDrain(ctx))
StopStacks()
err = mergeLifecycleError(err, logger.Close())
return err
}

170
lifecycle_test.go Normal file
View File

@ -0,0 +1,170 @@
package starlog
import (
"bytes"
"context"
"errors"
"path/filepath"
"sync/atomic"
"testing"
"time"
)
type syncWriteBuffer struct {
bytes.Buffer
syncCalls uint64
}
func (buffer *syncWriteBuffer) Sync() error {
atomic.AddUint64(&buffer.syncCalls, 1)
return nil
}
type closeSink struct {
closeCalls uint64
}
func (sink *closeSink) Write(data []byte) error {
_ = data
return nil
}
func (sink *closeSink) Close() error {
atomic.AddUint64(&sink.closeCalls, 1)
return nil
}
func TestFlushDrainsPendingWrites(t *testing.T) {
var output bytes.Buffer
logger := newStructuredTestLogger(&output)
logger.SetSwitching(true)
logger.Infoln("pending")
if output.Len() != 0 {
t.Fatalf("pending logs should not be written while switching=true")
}
if err := logger.Flush(); err != nil {
t.Fatalf("Flush failed: %v", err)
}
if output.Len() == 0 {
t.Fatalf("Flush should write buffered logs")
}
}
func TestSyncCallsWriterSync(t *testing.T) {
var writer syncWriteBuffer
logger := NewStarlog(&writer)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
logger.Infoln("sync")
if err := logger.Sync(); err != nil {
t.Fatalf("Sync failed: %v", err)
}
if atomic.LoadUint64(&writer.syncCalls) == 0 {
t.Fatalf("Sync should call underlying writer Sync method")
}
}
func TestCloseClosesSinkAndStopsWrite(t *testing.T) {
sink := &closeSink{}
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
logger.SetSink(sink)
if err := logger.Close(); err != nil {
t.Fatalf("Close failed: %v", err)
}
if atomic.LoadUint64(&sink.closeCalls) == 0 {
t.Fatalf("Close should call sink.Close")
}
if !logger.IsWriteStopped() {
t.Fatalf("Close should stop writer")
}
}
func TestWaitAsyncDrainContextTimeout(t *testing.T) {
stackMu.Lock()
stackStarted = true
stacks = newStarChanStack(1)
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
defer func() {
stackMu.Lock()
if stacks != nil {
_ = stacks.Close()
}
stackStarted = false
stacks = nil
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
}()
if err := stacks.Push("x"); err != nil {
t.Fatalf("prepare queue failed: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond)
defer cancel()
err := WaitAsyncDrain(ctx)
if !errors.Is(err, context.DeadlineExceeded) {
t.Fatalf("WaitAsyncDrain should return context deadline, got %v", err)
}
}
func TestShutdownStopsAsyncStacks(t *testing.T) {
resetAsyncMetricsForTest()
defer func() {
resetAsyncMetricsForTest()
StopStacks()
}()
logger := NewStarlog(nil)
logger.SetShowStd(false)
handled := make(chan struct{}, 1)
logger.SetHandler(func(LogData) {
select {
case handled <- struct{}{}:
default:
}
})
logger.Infoln("shutdown")
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := logger.Shutdown(ctx); err != nil {
t.Fatalf("Shutdown failed: %v", err)
}
select {
case <-handled:
case <-time.After(200 * time.Millisecond):
t.Fatalf("async handler should complete before shutdown")
}
stackMu.Lock()
started := stackStarted
stackMu.Unlock()
if started {
t.Fatalf("Shutdown should stop async stacks")
}
}
func TestCloseManagedLogFileNoDoubleCloseError(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "lifecycle_close.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
if err := logger.Close(); err != nil {
t.Fatalf("Close should not fail for managed log file: %v", err)
}
}

95
metrics.go Normal file
View File

@ -0,0 +1,95 @@
package starlog
import "time"
type AsyncMetrics struct {
Started bool
QueueLength uint64
QueueCapacity uint64
QueueFree uint64
Dropped uint64
FallbackToSync bool
HandlerTimeout time.Duration
}
type ErrorMetrics struct {
WriteErrors uint64
RedactionErrors uint64
}
type MetricsSnapshot struct {
Time time.Time
LoggerName string
Level int
HasWriter bool
HasSink bool
HasMultiSink bool
ArchiveRunning bool
Pending PendingStats
Sampling SamplingStats
Dedup DedupStats
RateLimit RateLimitStats
Async AsyncMetrics
Errors ErrorMetrics
MultiSink MultiSinkStats
}
func GetAsyncMetrics() AsyncMetrics {
stackMu.Lock()
started := stackStarted
current := stacks
stackMu.Unlock()
snapshot := AsyncMetrics{
Started: started,
Dropped: GetAsyncDropCount(),
FallbackToSync: GetAsyncFallbackToSync(),
HandlerTimeout: GetAsyncHandlerTimeout(),
}
if current != nil {
snapshot.QueueLength = current.Len()
snapshot.QueueCapacity = current.Cap()
snapshot.QueueFree = current.Free()
}
return snapshot
}
func (logger *StarLogger) GetMetricsSnapshot() MetricsSnapshot {
snapshot := MetricsSnapshot{
Time: time.Now(),
Async: GetAsyncMetrics(),
Errors: ErrorMetrics{
WriteErrors: GetWriteErrorCount(),
},
}
if logger == nil || logger.logcore == nil {
return snapshot
}
snapshot.Pending = logger.GetPendingStats()
snapshot.Sampling = logger.GetSamplingStats()
snapshot.Dedup = logger.GetDedupStats()
snapshot.RateLimit = logger.GetRateLimitStats()
snapshot.Errors.RedactionErrors = logger.GetRedactErrorCount()
snapshot.ArchiveRunning = IsArchiveRun(logger)
logger.logcore.mu.Lock()
snapshot.LoggerName = logger.logcore.name
snapshot.Level = logger.logcore.minLevel
snapshot.HasWriter = logger.logcore.output != nil
sink := logger.logcore.sink
snapshot.HasSink = sink != nil
logger.logcore.mu.Unlock()
if multi, ok := sink.(*MultiSink); ok && multi != nil {
snapshot.HasMultiSink = true
snapshot.MultiSink = multi.GetStats()
}
return snapshot
}
func (logger *StarLogger) GetAsyncMetrics() AsyncMetrics {
_ = logger
return GetAsyncMetrics()
}

99
metrics_test.go Normal file
View File

@ -0,0 +1,99 @@
package starlog
import (
"sync/atomic"
"testing"
"time"
)
type metricsSink struct {
writes uint64
}
func (sink *metricsSink) Write(data []byte) error {
_ = data
atomic.AddUint64(&sink.writes, 1)
return nil
}
func (sink *metricsSink) Close() error {
return nil
}
func TestMetricsSnapshotIncludesPendingAndMultiSink(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetPendingWriteLimit(8)
logger.SetPendingDropPolicy(PendingDropOldest)
logger.SetSamplingConfig(SamplingConfig{
Enable: true,
Levels: []int{LvInfo},
Rate: 0.5,
Scope: SamplingScopeGlobal,
})
logger.SetDedupConfig(DedupConfig{
Enable: true,
Levels: []int{LvInfo},
Window: time.Second,
Scope: DedupScopeByKey,
})
multi := NewMultiSink(&metricsSink{})
logger.SetSink(multi)
logger.SetSwitching(true)
logger.Infoln("pending")
snapshot := logger.GetMetricsSnapshot()
if !snapshot.HasSink {
t.Fatalf("snapshot should report sink")
}
if !snapshot.HasMultiSink {
t.Fatalf("snapshot should report multi sink")
}
if len(snapshot.MultiSink.Sinks) != 1 {
t.Fatalf("snapshot multi sink stats should include one sink")
}
if snapshot.Pending.Length == 0 {
t.Fatalf("snapshot should include pending queue length")
}
if snapshot.Pending.Policy != PendingDropOldest {
t.Fatalf("snapshot should include pending policy")
}
if snapshot.Errors.WriteErrors != GetWriteErrorCount() {
t.Fatalf("snapshot write error count should match global counter")
}
if !snapshot.Sampling.Enabled || snapshot.Sampling.Rate != 0.5 {
t.Fatalf("snapshot should include sampling stats, got %+v", snapshot.Sampling)
}
if !snapshot.Dedup.Enabled || snapshot.Dedup.Window != time.Second {
t.Fatalf("snapshot should include dedup stats, got %+v", snapshot.Dedup)
}
logger.SetSwitching(false)
}
func TestGetAsyncMetrics(t *testing.T) {
StopStacks()
StartStacks()
metrics := GetAsyncMetrics()
if !metrics.Started {
t.Fatalf("async metrics should show started state")
}
if metrics.QueueCapacity == 0 {
t.Fatalf("async metrics should expose queue capacity")
}
StopStacks()
metrics = GetAsyncMetrics()
if metrics.Started {
t.Fatalf("async metrics should show stopped state after StopStacks")
}
}
func TestMetricsSnapshotNilLogger(t *testing.T) {
var logger *StarLogger
snapshot := logger.GetMetricsSnapshot()
if snapshot.Time.IsZero() {
t.Fatalf("snapshot should contain timestamp")
}
}

145
multi_sink.go Normal file
View File

@ -0,0 +1,145 @@
package starlog
import "b612.me/starlog/internal/multisinkx"
type SinkState string
const (
SinkStateHealthy SinkState = SinkState(multisinkx.StateHealthy)
SinkStateDegraded SinkState = SinkState(multisinkx.StateDegraded)
SinkStateRecovered SinkState = SinkState(multisinkx.StateRecovered)
)
type SinkStats struct {
Index int
Writes uint64
WriteErrors uint64
Closes uint64
CloseErrors uint64
ConsecutiveWriteErrors uint64
ConsecutiveCloseErrors uint64
LastWriteError string
LastCloseError string
State SinkState
}
type MultiSinkStats struct {
ContinueOnError bool
Sinks []SinkStats
}
type MultiSink struct {
core *multisinkx.MultiSink
}
func wrapSinks(sinks []Sink) []multisinkx.Sink {
if len(sinks) == 0 {
return nil
}
result := make([]multisinkx.Sink, 0, len(sinks))
for _, sink := range sinks {
if sink == nil {
continue
}
result = append(result, sink)
}
return result
}
func toSinkStats(stats multisinkx.Stats) SinkStats {
return SinkStats{
Index: stats.Index,
Writes: stats.Writes,
WriteErrors: stats.WriteErrors,
Closes: stats.Closes,
CloseErrors: stats.CloseErrors,
ConsecutiveWriteErrors: stats.ConsecutiveWriteErrors,
ConsecutiveCloseErrors: stats.ConsecutiveCloseErrors,
LastWriteError: stats.LastWriteError,
LastCloseError: stats.LastCloseError,
State: SinkState(stats.State),
}
}
func toMultiSinkStats(snapshot multisinkx.Snapshot) MultiSinkStats {
result := MultiSinkStats{
ContinueOnError: snapshot.ContinueOnError,
Sinks: make([]SinkStats, 0, len(snapshot.Sinks)),
}
for _, item := range snapshot.Sinks {
result.Sinks = append(result.Sinks, toSinkStats(item))
}
return result
}
func NewMultiSink(sinks ...Sink) *MultiSink {
return &MultiSink{
core: multisinkx.New(wrapSinks(sinks)...),
}
}
func (sink *MultiSink) SetSinks(sinks ...Sink) {
if sink == nil || sink.core == nil {
return
}
sink.core.SetSinks(wrapSinks(sinks)...)
}
func (sink *MultiSink) AddSink(item Sink) {
if sink == nil || sink.core == nil || item == nil {
return
}
sink.core.AddSink(item)
}
func (sink *MultiSink) SetContinueOnError(continueOnError bool) {
if sink == nil || sink.core == nil {
return
}
sink.core.SetContinueOnError(continueOnError)
}
func (sink *MultiSink) ContinueOnError() bool {
if sink == nil || sink.core == nil {
return true
}
return sink.core.ContinueOnError()
}
func (sink *MultiSink) SinkCount() int {
if sink == nil || sink.core == nil {
return 0
}
return sink.core.SinkCount()
}
func (sink *MultiSink) GetStats() MultiSinkStats {
if sink == nil || sink.core == nil {
return MultiSinkStats{
ContinueOnError: true,
Sinks: nil,
}
}
return toMultiSinkStats(sink.core.GetStats())
}
func (sink *MultiSink) ResetStats() {
if sink == nil || sink.core == nil {
return
}
sink.core.ResetStats()
}
func (sink *MultiSink) Write(data []byte) error {
if sink == nil || sink.core == nil {
return nil
}
return sink.core.Write(data)
}
func (sink *MultiSink) Close() error {
if sink == nil || sink.core == nil {
return nil
}
return sink.core.Close()
}

221
multi_sink_test.go Normal file
View File

@ -0,0 +1,221 @@
package starlog
import (
"bytes"
"errors"
"sync/atomic"
"testing"
)
type sinkAlwaysFail struct{}
func (sink *sinkAlwaysFail) Write(data []byte) error {
_ = data
return errors.New("sink failed")
}
func (sink *sinkAlwaysFail) Close() error {
return nil
}
type sinkToggleFail struct {
fail uint32
}
func (sink *sinkToggleFail) SetFail(fail bool) {
if fail {
atomic.StoreUint32(&sink.fail, 1)
return
}
atomic.StoreUint32(&sink.fail, 0)
}
func (sink *sinkToggleFail) Write(data []byte) error {
_ = data
if atomic.LoadUint32(&sink.fail) == 1 {
return errors.New("toggle sink write failed")
}
return nil
}
func (sink *sinkToggleFail) Close() error {
if atomic.LoadUint32(&sink.fail) == 1 {
return errors.New("toggle sink close failed")
}
return nil
}
func TestMultiSinkWritesAll(t *testing.T) {
var a bytes.Buffer
var b bytes.Buffer
multi := NewMultiSink(NewWriterSink(&a), NewWriterSink(&b))
if err := multi.Write([]byte("hello")); err != nil {
t.Fatalf("multi sink write should succeed, got %v", err)
}
if a.String() != "hello" || b.String() != "hello" {
t.Fatalf("all sinks should receive data, got a=%q b=%q", a.String(), b.String())
}
}
func TestMultiSinkContinueOnError(t *testing.T) {
var out bytes.Buffer
multi := NewMultiSink(&sinkAlwaysFail{}, NewWriterSink(&out))
if err := multi.Write([]byte("x")); err == nil {
t.Fatalf("write should return error when one sink fails")
}
if out.String() != "x" {
t.Fatalf("healthy sink should still receive data when continueOnError=true, got %q", out.String())
}
}
func TestMultiSinkStopOnError(t *testing.T) {
var out bytes.Buffer
multi := NewMultiSink(&sinkAlwaysFail{}, NewWriterSink(&out))
multi.SetContinueOnError(false)
if err := multi.Write([]byte("x")); err == nil {
t.Fatalf("write should return error in stop-on-error mode")
}
if out.String() != "" {
t.Fatalf("later sinks should not run when continueOnError=false, got %q", out.String())
}
}
func TestLoggerSetSinks(t *testing.T) {
var a bytes.Buffer
var b bytes.Buffer
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
logger.SetSinks(NewWriterSink(&a), NewWriterSink(&b))
logger.Info("fanout")
if !bytes.Contains(a.Bytes(), []byte("fanout")) || !bytes.Contains(b.Bytes(), []byte("fanout")) {
t.Fatalf("logger should write to all configured sinks")
}
}
func TestMultiSinkStatsPerSink(t *testing.T) {
var out bytes.Buffer
toggle := &sinkToggleFail{}
multi := NewMultiSink(toggle, NewWriterSink(&out))
if err := multi.Write([]byte("a")); err != nil {
t.Fatalf("first write should succeed, got %v", err)
}
stats := multi.GetStats()
if len(stats.Sinks) != 2 {
t.Fatalf("expected 2 sink stats, got %d", len(stats.Sinks))
}
first := stats.Sinks[0]
second := stats.Sinks[1]
if first.Writes != 1 || first.WriteErrors != 0 || first.State != SinkStateHealthy {
t.Fatalf("unexpected first sink stats after success: %+v", first)
}
if second.Writes != 1 || second.WriteErrors != 0 || second.State != SinkStateHealthy {
t.Fatalf("unexpected second sink stats after success: %+v", second)
}
toggle.SetFail(true)
if err := multi.Write([]byte("b")); err == nil {
t.Fatalf("write should fail when toggle sink is failing")
}
stats = multi.GetStats()
first = stats.Sinks[0]
second = stats.Sinks[1]
if first.Writes != 2 || first.WriteErrors != 1 {
t.Fatalf("unexpected first sink counters after failure: %+v", first)
}
if first.ConsecutiveWriteErrors == 0 || first.State != SinkStateDegraded {
t.Fatalf("first sink should become degraded on failure: %+v", first)
}
if first.LastWriteError == "" {
t.Fatalf("first sink should record last write error")
}
if second.Writes != 2 || second.WriteErrors != 0 || second.State != SinkStateHealthy {
t.Fatalf("healthy sink should continue receiving writes: %+v", second)
}
}
func TestMultiSinkStatsRecoveryAndReset(t *testing.T) {
toggle := &sinkToggleFail{}
multi := NewMultiSink(toggle)
toggle.SetFail(true)
if err := multi.Write([]byte("x")); err == nil {
t.Fatalf("write should fail when sink is failing")
}
toggle.SetFail(false)
if err := multi.Write([]byte("y")); err != nil {
t.Fatalf("write should recover when sink becomes healthy, got %v", err)
}
stats := multi.GetStats()
if len(stats.Sinks) != 1 {
t.Fatalf("expected 1 sink stats, got %d", len(stats.Sinks))
}
first := stats.Sinks[0]
if first.Writes != 2 || first.WriteErrors != 1 {
t.Fatalf("unexpected sink counters after recovery: %+v", first)
}
if first.ConsecutiveWriteErrors != 0 {
t.Fatalf("consecutive write errors should reset after recovery: %+v", first)
}
if first.State != SinkStateRecovered {
t.Fatalf("sink should be recovered after success following failures: %+v", first)
}
multi.ResetStats()
stats = multi.GetStats()
first = stats.Sinks[0]
if first.Writes != 0 || first.WriteErrors != 0 || first.Closes != 0 || first.CloseErrors != 0 {
t.Fatalf("reset should clear sink counters: %+v", first)
}
if first.LastWriteError != "" || first.LastCloseError != "" {
t.Fatalf("reset should clear last errors: %+v", first)
}
if first.State != SinkStateHealthy {
t.Fatalf("reset should set healthy state: %+v", first)
}
}
func TestMultiSinkCloseStats(t *testing.T) {
toggle := &sinkToggleFail{}
multi := NewMultiSink(toggle)
toggle.SetFail(true)
if err := multi.Close(); err == nil {
t.Fatalf("close should fail when sink close fails")
}
stats := multi.GetStats()
if len(stats.Sinks) != 1 {
t.Fatalf("expected 1 sink stats, got %d", len(stats.Sinks))
}
first := stats.Sinks[0]
if first.Closes != 1 || first.CloseErrors != 1 {
t.Fatalf("unexpected close counters after failure: %+v", first)
}
if first.ConsecutiveCloseErrors == 0 || first.State != SinkStateDegraded {
t.Fatalf("sink should become degraded on close failure: %+v", first)
}
toggle.SetFail(false)
if err := multi.Close(); err != nil {
t.Fatalf("close should recover when sink becomes healthy, got %v", err)
}
stats = multi.GetStats()
first = stats.Sinks[0]
if first.Closes != 2 || first.CloseErrors != 1 {
t.Fatalf("unexpected close counters after recovery: %+v", first)
}
if first.ConsecutiveCloseErrors != 0 || first.State != SinkStateRecovered {
t.Fatalf("close error state should recover after success: %+v", first)
}
}

71
newline_test.go Normal file
View File

@ -0,0 +1,71 @@
package starlog
import (
"bytes"
"strings"
"testing"
)
func TestAutoAppendNewlineDefaultOff(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.Infof("hello %d", 1)
got := buf.String()
if strings.HasSuffix(got, "\n") {
t.Fatalf("default behavior should keep no trailing newline for Infof, got %q", got)
}
}
func TestAutoAppendNewlineForInfof(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetAutoAppendNewline(true)
logger.Infof("hello %d", 2)
got := buf.String()
if !strings.HasSuffix(got, "\n") {
t.Fatalf("Infof should auto append trailing newline when enabled, got %q", got)
}
if strings.Count(got, "\n") != 1 {
t.Fatalf("Infof should append only one newline, got %q", got)
}
}
func TestAutoAppendNewlineNoDoubleForInfoln(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetAutoAppendNewline(true)
logger.Infoln("line")
got := buf.String()
if strings.Count(got, "\n") != 1 {
t.Fatalf("Infoln should keep single newline with auto append enabled, got %q", got)
}
}
func TestAutoAppendNewlineForWritef(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetAutoAppendNewline(true)
logger.Writef("raw-%d", 3)
got := buf.String()
if got != "raw-3\n" {
t.Fatalf("Writef should auto append newline when enabled, got %q", got)
}
}
func TestAutoAppendNewlineConfigApply(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
cfg := logger.GetConfig()
if cfg.AutoAppendNewline {
t.Fatalf("default AutoAppendNewline should be false")
}
cfg.AutoAppendNewline = true
logger.ApplyConfig(cfg)
if !logger.GetAutoAppendNewline() {
t.Fatalf("AutoAppendNewline should be true after ApplyConfig")
}
logger.Infof("cfg")
if !strings.HasSuffix(buf.String(), "\n") {
t.Fatalf("Infof should append newline after config apply")
}
}

268
observer.go Normal file
View File

@ -0,0 +1,268 @@
package starlog
import (
"context"
"b612.me/starlog/internal/observerx"
)
type Observer struct {
buffer *observerx.Buffer
}
func (observer *Observer) ensureBuffer() *observerx.Buffer {
if observer == nil {
return nil
}
if observer.buffer == nil {
observer.buffer = observerx.NewBuffer()
}
return observer.buffer
}
func NewObserver() *Observer {
return &Observer{
buffer: observerx.NewBuffer(),
}
}
func NewObserverWithLimit(limit int) *Observer {
observer := NewObserver()
observer.SetLimit(limit)
return observer
}
func (observer *Observer) Handle(ctx context.Context, entry *Entry) error {
_ = ctx
if observer == nil || entry == nil {
return nil
}
buffer := observer.ensureBuffer()
if buffer == nil {
return nil
}
entryCopy := *entry
entryCopy.Fields = cloneFields(entry.Fields)
buffer.Add(entryCopy)
return nil
}
func (observer *Observer) SetLimit(limit int) {
if observer == nil {
return
}
buffer := observer.ensureBuffer()
if buffer == nil {
return
}
buffer.SetLimit(limit)
}
func (observer *Observer) Limit() int {
if observer == nil {
return 0
}
buffer := observer.ensureBuffer()
if buffer == nil {
return 0
}
return buffer.Limit()
}
func (observer *Observer) Count() int {
if observer == nil {
return 0
}
buffer := observer.ensureBuffer()
if buffer == nil {
return 0
}
return buffer.Count()
}
func (observer *Observer) Dropped() uint64 {
if observer == nil {
return 0
}
buffer := observer.ensureBuffer()
if buffer == nil {
return 0
}
return buffer.Dropped()
}
func (observer *Observer) Entries() []Entry {
if observer == nil {
return nil
}
buffer := observer.ensureBuffer()
if buffer == nil {
return nil
}
items := buffer.Snapshot()
result := make([]Entry, 0, len(items))
for _, raw := range items {
entry, ok := raw.(Entry)
if !ok {
continue
}
item := entry
item.Fields = cloneFields(entry.Fields)
result = append(result, item)
}
return result
}
func (observer *Observer) Last() (Entry, bool) {
if observer == nil {
return Entry{}, false
}
buffer := observer.ensureBuffer()
if buffer == nil {
return Entry{}, false
}
raw, ok := buffer.Last()
if !ok {
return Entry{}, false
}
entry, ok := raw.(Entry)
if !ok {
return Entry{}, false
}
entry.Fields = cloneFields(entry.Fields)
return entry, true
}
func (observer *Observer) TakeAll() []Entry {
if observer == nil {
return nil
}
buffer := observer.ensureBuffer()
if buffer == nil {
return nil
}
items := buffer.TakeAll()
if len(items) == 0 {
return nil
}
result := make([]Entry, 0, len(items))
for _, raw := range items {
entry, ok := raw.(Entry)
if !ok {
continue
}
item := entry
item.Fields = cloneFields(entry.Fields)
result = append(result, item)
}
return result
}
func (observer *Observer) Reset() {
if observer == nil {
return
}
buffer := observer.ensureBuffer()
if buffer == nil {
return
}
buffer.Reset()
}
type testHookHandler struct {
observer *Observer
next Handler
}
func (handler *testHookHandler) Handle(ctx context.Context, entry *Entry) error {
if handler == nil {
return nil
}
var firstErr error
if handler.observer != nil {
if err := handler.observer.Handle(ctx, entry); err != nil && firstErr == nil {
firstErr = err
}
}
if handler.next != nil {
if err := handler.next.Handle(ctx, entry); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
type TestHook struct {
logger *StarLogger
observer *Observer
previous Handler
handler Handler
}
func NewTestHook(logger *StarLogger) *TestHook {
if logger == nil {
return nil
}
observer := NewObserver()
previous := logger.GetEntryHandler()
wrapper := &testHookHandler{
observer: observer,
next: previous,
}
logger.SetEntryHandler(wrapper)
return &TestHook{
logger: logger,
observer: observer,
previous: previous,
handler: wrapper,
}
}
func (hook *TestHook) Observer() *Observer {
if hook == nil {
return nil
}
return hook.observer
}
func (hook *TestHook) Entries() []Entry {
if hook == nil || hook.observer == nil {
return nil
}
return hook.observer.Entries()
}
func (hook *TestHook) Count() int {
if hook == nil || hook.observer == nil {
return 0
}
return hook.observer.Count()
}
func (hook *TestHook) Last() (Entry, bool) {
if hook == nil || hook.observer == nil {
return Entry{}, false
}
return hook.observer.Last()
}
func (hook *TestHook) Reset() {
if hook == nil || hook.observer == nil {
return
}
hook.observer.Reset()
}
// Close tries to restore the previous entry handler.
// It returns false when current handler was replaced externally.
func (hook *TestHook) Close() bool {
if hook == nil || hook.logger == nil {
return false
}
current := hook.logger.GetEntryHandler()
if current != hook.handler {
return false
}
hook.logger.SetEntryHandler(hook.previous)
return true
}

174
observer_test.go Normal file
View File

@ -0,0 +1,174 @@
package starlog
import (
"bytes"
"context"
"strings"
"sync/atomic"
"testing"
"time"
)
func waitObserverCount(t *testing.T, observer *Observer, want int, timeout time.Duration) {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if observer != nil && observer.Count() >= want {
return
}
time.Sleep(5 * time.Millisecond)
}
got := 0
if observer != nil {
got = observer.Count()
}
t.Fatalf("observer count timeout, want >= %d got %d", want, got)
}
func waitObserverCondition(t *testing.T, timeout time.Duration, cond func() bool, reason string) {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if cond() {
return
}
time.Sleep(5 * time.Millisecond)
}
t.Fatalf("observer condition timeout: %s", reason)
}
func TestObserverCollectsEntries(t *testing.T) {
defer StopStacks()
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
observer := NewObserver()
logger.AppendEntryHandler(observer)
logger.WithField("user_id", 42).Info("login ok")
waitObserverCount(t, observer, 1, 300*time.Millisecond)
entries := observer.Entries()
if len(entries) == 0 {
t.Fatalf("observer should collect entries")
}
last := entries[len(entries)-1]
if last.Message != "login ok" {
t.Fatalf("unexpected observed message: %q", last.Message)
}
if got, ok := last.Fields["user_id"]; !ok || got != 42 {
t.Fatalf("unexpected observed fields: %+v", last.Fields)
}
}
func TestObserverLimitAndDropped(t *testing.T) {
defer StopStacks()
observer := NewObserverWithLimit(2)
logger := newStructuredTestLogger(&bytes.Buffer{})
logger.AppendEntryHandler(observer)
logger.Info("one")
logger.Info("two")
logger.Info("three")
waitObserverCondition(t, 400*time.Millisecond, func() bool {
if observer.Dropped() == 0 {
return false
}
entries := observer.Entries()
if len(entries) != 2 {
return false
}
return entries[0].Message == "two" && entries[1].Message == "three"
}, "observer limit should keep newest two entries")
entries := observer.Entries()
if len(entries) != 2 {
t.Fatalf("observer should keep only limited entries, got %d", len(entries))
}
if entries[0].Message != "two" || entries[1].Message != "three" {
t.Fatalf("observer should keep newest entries, got %q %q", entries[0].Message, entries[1].Message)
}
if observer.Dropped() == 0 {
t.Fatalf("observer dropped count should increase when over limit")
}
}
func TestObserverTakeAllAndReset(t *testing.T) {
defer StopStacks()
observer := NewObserver()
logger := newStructuredTestLogger(&bytes.Buffer{})
logger.AppendEntryHandler(observer)
logger.Info("a")
logger.Info("b")
waitObserverCount(t, observer, 2, 300*time.Millisecond)
all := observer.TakeAll()
if len(all) != 2 {
t.Fatalf("take all should return all collected entries, got %d", len(all))
}
if observer.Count() != 0 {
t.Fatalf("take all should clear observer entries")
}
logger.Info("c")
waitObserverCount(t, observer, 1, 300*time.Millisecond)
observer.Reset()
if observer.Count() != 0 || observer.Dropped() != 0 {
t.Fatalf("reset should clear observer state")
}
}
func TestTestHookAttachAndRestore(t *testing.T) {
defer StopStacks()
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
var previousCount uint64
logger.SetEntryHandler(HandlerFunc(func(_ context.Context, _ *Entry) error {
atomic.AddUint64(&previousCount, 1)
return nil
}))
hook := NewTestHook(logger)
if hook == nil {
t.Fatalf("new test hook should not be nil")
}
logger.Info("hooked")
waitObserverCount(t, hook.Observer(), 1, 300*time.Millisecond)
if atomic.LoadUint64(&previousCount) == 0 {
t.Fatalf("test hook should keep previous handler in chain")
}
last, ok := hook.Last()
if !ok || last.Message != "hooked" {
t.Fatalf("unexpected hook last entry: ok=%v msg=%q", ok, last.Message)
}
if !hook.Close() {
t.Fatalf("hook close should restore previous handler when unchanged")
}
before := hook.Count()
logger.Info("after-close")
time.Sleep(30 * time.Millisecond)
if hook.Count() != before {
t.Fatalf("closed hook should stop collecting new entries")
}
if atomic.LoadUint64(&previousCount) < 2 {
t.Fatalf("previous handler should continue working after hook close")
}
if !strings.Contains(buf.String(), "after-close") {
t.Fatalf("logger output should still contain logs after hook close")
}
}
func TestTestHookCloseWhenHandlerReplaced(t *testing.T) {
defer StopStacks()
logger := newStructuredTestLogger(&bytes.Buffer{})
hook := NewTestHook(logger)
logger.SetEntryHandler(nil)
if hook.Close() {
t.Fatalf("hook close should fail when handler replaced externally")
}
}

432
p0_reliability_test.go Normal file
View File

@ -0,0 +1,432 @@
package starlog
import (
"bytes"
"context"
"errors"
"io"
"strings"
"sync/atomic"
"testing"
"time"
)
func TestWriteBufferFlushAfterSwitchingOff(t *testing.T) {
var buf bytes.Buffer
logger := NewStarlog(&buf)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetSwitching(true)
logger.Infoln("first")
logger.Infoln("second")
if got := buf.String(); strings.Contains(got, "first") || strings.Contains(got, "second") {
t.Fatalf("logs should stay buffered while switching=true, got: %q", got)
}
logger.SetSwitching(false)
got := buf.String()
if !strings.Contains(got, "first") || !strings.Contains(got, "second") {
t.Fatalf("buffered logs were not flushed after switching=false, got: %q", got)
}
}
func TestAsyncQueuePushFailureFallsBackToSyncHandler(t *testing.T) {
resetAsyncMetricsForTest()
defer func() {
resetAsyncMetricsForTest()
stackMu.Lock()
if stacks != nil {
_ = stacks.Close()
}
stackStarted = false
stacks = nil
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
}()
logger := NewStarlog(nil)
logger.SetShowStd(false)
var handled uint64
logger.handlerFunc = func(data LogData) {
atomic.AddUint64(&handled, 1)
}
alertCalled := make(chan struct{}, 1)
SetAsyncErrorHandler(func(err error, data LogData) {
select {
case alertCalled <- struct{}{}:
default:
}
})
stackMu.Lock()
stackStarted = true
stacks = nil
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
logger.Infoln("trigger async fallback")
if atomic.LoadUint64(&handled) != 1 {
t.Fatalf("handler should be called once via sync fallback, got: %d", handled)
}
if GetAsyncDropCount() == 0 {
t.Fatalf("async drop counter should increase on push failure")
}
select {
case <-alertCalled:
default:
t.Fatalf("async alert handler should be invoked on push failure")
}
}
func TestStarChanStackPushClosedReturnsEOF(t *testing.T) {
stack := newStarChanStack(1)
if err := stack.Close(); err != nil {
t.Fatalf("Close failed: %v", err)
}
if err := stack.Push("x"); err != io.EOF {
t.Fatalf("Push on closed stack should return io.EOF, got: %v", err)
}
}
func TestStarChanStackTryPushFullReturnsQueueFull(t *testing.T) {
stack := newStarChanStack(1)
if err := stack.Push("first"); err != nil {
t.Fatalf("Push failed: %v", err)
}
if err := stack.TryPush("second"); !errors.Is(err, errStackFull) {
t.Fatalf("TryPush on full queue should return errStackFull, got: %v", err)
}
}
func TestPendingWriteLimitDropOldest(t *testing.T) {
var buf bytes.Buffer
logger := NewStarlog(&buf)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetPendingWriteLimit(2)
logger.SetPendingDropPolicy(PendingDropOldest)
logger.SetSwitching(true)
logger.Infoln("one")
logger.Infoln("two")
logger.Infoln("three")
logger.SetSwitching(false)
got := buf.String()
if strings.Contains(got, "one") {
t.Fatalf("oldest pending log should be dropped, got %q", got)
}
if !strings.Contains(got, "two") || !strings.Contains(got, "three") {
t.Fatalf("newer pending logs should remain, got %q", got)
}
if logger.GetPendingDropCount() != 1 {
t.Fatalf("expected one dropped pending write, got %d", logger.GetPendingDropCount())
}
}
func TestPendingWriteLimitBlockPolicy(t *testing.T) {
var buf bytes.Buffer
logger := NewStarlog(&buf)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetPendingWriteLimit(1)
logger.SetPendingDropPolicy(PendingBlock)
logger.SetSwitching(true)
logger.Infoln("one")
done := make(chan struct{})
go func() {
logger.Infoln("two")
close(done)
}()
blocked := false
deadline := time.Now().Add(200 * time.Millisecond)
for time.Now().Before(deadline) {
select {
case <-done:
t.Fatalf("block policy should wait while queue is full and switching=true")
default:
}
if logger.GetPendingBlockCount() > 0 {
blocked = true
break
}
time.Sleep(5 * time.Millisecond)
}
if !blocked {
t.Fatalf("expected pending block count to increase")
}
logger.SetSwitching(false)
select {
case <-done:
case <-time.After(300 * time.Millisecond):
t.Fatalf("blocked write should continue after switching=false")
}
got := buf.String()
if !strings.Contains(got, "one") || !strings.Contains(got, "two") {
t.Fatalf("both logs should be persisted, got %q", got)
}
if logger.GetPendingDropCount() != 0 {
t.Fatalf("block policy should avoid drops, got %d", logger.GetPendingDropCount())
}
stats := logger.GetPendingStats()
if stats.BlockCount == 0 {
t.Fatalf("pending stats should expose block count")
}
}
func TestPendingStatsSnapshot(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetPendingWriteLimit(3)
logger.SetPendingDropPolicy(PendingDropNewest)
logger.SetSwitching(true)
logger.Infoln("one")
logger.Infoln("two")
stats := logger.GetPendingStats()
if stats.Limit != 3 {
t.Fatalf("unexpected limit: %d", stats.Limit)
}
if stats.Length != 2 {
t.Fatalf("unexpected pending length: %d", stats.Length)
}
if stats.Policy != PendingDropNewest {
t.Fatalf("unexpected policy: %v", stats.Policy)
}
if !stats.Switching {
t.Fatalf("expected switching flag true in stats snapshot")
}
if stats.PeakLength < 2 {
t.Fatalf("expected peak length >= 2, got %d", stats.PeakLength)
}
logger.SetSwitching(false)
}
type errWriter struct{}
func (w *errWriter) Write(p []byte) (int, error) {
return 0, errors.New("write failed")
}
func TestWriteErrorObservable(t *testing.T) {
resetAsyncMetricsForTest()
defer resetAsyncMetricsForTest()
logger := NewStarlog(&errWriter{})
logger.SetShowStd(false)
logger.SetShowColor(false)
observed := make(chan struct{}, 1)
SetWriteErrorHandler(func(err error, data LogData) {
if err != nil {
select {
case observed <- struct{}{}:
default:
}
}
})
logger.Infoln("write error check")
if GetWriteErrorCount() == 0 {
t.Fatalf("write error count should increase")
}
select {
case <-observed:
default:
t.Fatalf("write error handler should be invoked")
}
}
func TestAsyncHandlerPanicDoesNotCrash(t *testing.T) {
resetAsyncMetricsForTest()
defer func() {
resetAsyncMetricsForTest()
StopStacks()
}()
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetHandler(func(LogData) {
panic("boom")
})
logger.Infoln("panic safe")
time.Sleep(20 * time.Millisecond)
if GetAsyncDropCount() == 0 {
t.Fatalf("panic in async handler should be reported as drop")
}
}
func TestEntryHandlerTimeoutFallback(t *testing.T) {
resetAsyncMetricsForTest()
defer func() {
resetAsyncMetricsForTest()
StopStacks()
}()
logger := NewStarlog(nil)
logger.SetShowStd(false)
logger.SetEntryHandler(HandlerFunc(func(context.Context, *Entry) error {
time.Sleep(80 * time.Millisecond)
return nil
}))
logger.SetEntryHandlerTimeout(10 * time.Millisecond)
begin := time.Now()
logger.Infoln("entry timeout")
cost := time.Since(begin)
if cost > 60*time.Millisecond {
t.Fatalf("entry handler timeout should protect main path, took %v", cost)
}
deadline := time.Now().Add(300 * time.Millisecond)
for time.Now().Before(deadline) {
if GetAsyncDropCount() > 0 {
return
}
time.Sleep(5 * time.Millisecond)
}
t.Fatalf("entry handler timeout should be observable via async drop count")
}
func TestEntryHandlerQueueFullNoFallbackDoesNotBlock(t *testing.T) {
resetAsyncMetricsForTest()
defer func() {
resetAsyncMetricsForTest()
stackMu.Lock()
stackStarted = false
stacks = nil
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
}()
SetAsyncFallbackToSync(false)
logger := NewStarlog(nil)
logger.SetShowStd(false)
var handled uint64
logger.SetEntryHandler(HandlerFunc(func(context.Context, *Entry) error {
atomic.AddUint64(&handled, 1)
time.Sleep(80 * time.Millisecond)
return nil
}))
stackMu.Lock()
stackStarted = true
stacks = newStarChanStack(1)
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
if err := stacks.Push(logTransfer{
handlerFunc: func(LogData) {
time.Sleep(80 * time.Millisecond)
},
}); err != nil {
t.Fatalf("prepare full async queue failed: %v", err)
}
begin := time.Now()
logger.Infoln("entry queue full")
cost := time.Since(begin)
if cost > 60*time.Millisecond {
t.Fatalf("entry handler should not block when queue is full, took %v", cost)
}
if atomic.LoadUint64(&handled) != 0 {
t.Fatalf("entry handler should be dropped when queue is full and fallback disabled, got %d", handled)
}
if GetAsyncDropCount() == 0 {
t.Fatalf("queue-full drop should be observable")
}
}
func TestWriteMethodNameCompatibility(t *testing.T) {
logger := NewStarlog(nil)
logger.StopWrite()
if !logger.IsWriteStopped() || !logger.IsWriteStoed() {
t.Fatalf("both write-stop getters should report true")
}
logger.EnableWrite()
if logger.IsWriteStopped() || logger.IsWriteStoed() {
t.Fatalf("EnableWrite should resume writer for both getter names")
}
}
func TestLevelFilterAPI(t *testing.T) {
var buf bytes.Buffer
logger := NewStarlog(&buf)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
if !logger.IsLevelEnabled(LvDebug) {
t.Fatalf("debug level should be enabled by default")
}
logger.SetLevel(LvWarning)
if logger.GetLevel() != LvWarning {
t.Fatalf("unexpected level threshold: %d", logger.GetLevel())
}
if logger.IsLevelEnabled(LvInfo) {
t.Fatalf("info should be filtered by warning threshold")
}
if !logger.IsLevelEnabled(LvError) {
t.Fatalf("error should be enabled by warning threshold")
}
logger.Infoln("filtered")
logger.Warningln("visible")
logStr := buf.String()
if strings.Contains(logStr, "filtered") {
t.Fatalf("info log should be filtered, got %q", logStr)
}
if !strings.Contains(logStr, "visible") {
t.Fatalf("warning log should remain, got %q", logStr)
}
}
func TestParseLevel(t *testing.T) {
tests := map[string]int{
"debug": LvDebug,
"INFO": LvInfo,
"notice": LvNotice,
"warn": LvWarning,
"warning": LvWarning,
"err": LvError,
"critical": LvCritical,
"panic": LvPanic,
"fatal": LvFatal,
"7": 7,
}
for input, expected := range tests {
parsed, err := ParseLevel(input)
if err != nil {
t.Fatalf("ParseLevel(%q) returned error: %v", input, err)
}
if parsed != expected {
t.Fatalf("ParseLevel(%q)=%d, want %d", input, parsed, expected)
}
}
_, err := ParseLevel("unknown-level")
if !errors.Is(err, ErrInvalidLevel) {
t.Fatalf("ParseLevel invalid input should return ErrInvalidLevel, got %v", err)
}
}

204
p1_concurrency_test.go Normal file
View File

@ -0,0 +1,204 @@
package starlog
import (
"context"
"io"
"sync"
"testing"
"time"
)
type lockedWriter struct {
mu sync.Mutex
buf []byte
}
func (writer *lockedWriter) Write(data []byte) (int, error) {
writer.mu.Lock()
defer writer.mu.Unlock()
writer.buf = append(writer.buf, data...)
return len(data), nil
}
func TestConcurrentConfigAndLogging(t *testing.T) {
writer := &lockedWriter{}
logger := NewStarlog(writer)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
logger.SetPendingWriteLimit(256)
stopCtx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
defer cancel()
var wait sync.WaitGroup
wait.Add(4)
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.WithField("gid", 1).Info("hello")
}
}
}()
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.SetColorMode(ColorModeOff)
logger.SetColorMode(ColorModeLevelOnly)
logger.SetColorMode(ColorModeFullLine)
logger.SetShowFieldColor(true)
logger.SetShowFieldColor(false)
}
}
}()
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.SetFormatter(NewTextFormatter())
logger.SetFormatter(NewJSONFormatter())
logger.SetFormatter(nil)
}
}
}()
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.StopWrite()
logger.EnableWrite()
logger.SetSwitching(true)
logger.SetSwitching(false)
}
}
}()
wait.Wait()
if logger.GetPendingDropCount() > 0 && logger.GetPendingWriteLimit() == 0 {
t.Fatalf("pending drop count should not increase when limit disabled")
}
}
func TestConcurrentSinkSwitchAndWrite(t *testing.T) {
logger := NewStarlog(io.Discard)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetPendingWriteLimit(64)
logger.SetPendingDropPolicy(PendingDropOldest)
stopCtx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
defer cancel()
var wait sync.WaitGroup
wait.Add(3)
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.WithField("k", "v").Infoln("line")
}
}
}()
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.SetWriter(io.Discard)
logger.SetSink(nil)
}
}
}()
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.SetSwitching(true)
logger.SetSwitching(false)
}
}
}()
wait.Wait()
}
func TestConcurrentUpdateConfigAndLogging(t *testing.T) {
writer := &lockedWriter{}
logger := NewStarlog(writer)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
stopCtx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
defer cancel()
var wait sync.WaitGroup
wait.Add(2)
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.WithField("k", "v").Info("line")
}
}
}()
go func() {
defer wait.Done()
for {
select {
case <-stopCtx.Done():
return
default:
logger.UpdateConfig(func(cfg *Config) {
cfg.Level = LvDebug
cfg.ShowColor = false
cfg.OnlyColorLevel = false
cfg.ShowFieldColor = false
cfg.PendingWriteLimit = 128
cfg.PendingDropPolicy = PendingDropOldest
})
}
}
}()
wait.Wait()
}

322
pipeline.go Normal file
View File

@ -0,0 +1,322 @@
package starlog
import (
"context"
"errors"
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"b612.me/starlog/internal/pipelinex"
)
type FileInfo = os.FileInfo
type Fields map[string]interface{}
type Entry struct {
Time time.Time
Level int
LevelName string
LoggerName string
Thread string
File string
Line int
Func string
Message string
Fields Fields
Err error
Context context.Context
}
type Handler interface {
Handle(context.Context, *Entry) error
}
type Formatter interface {
Format(*Entry) ([]byte, error)
}
type Sink interface {
Write([]byte) error
Close() error
}
type RotatePolicy interface {
ShouldRotate(FileInfo, *Entry) bool
NextPath(string, time.Time) string
}
// RotateArchivePathProvider is an optional extension for RotatePolicy.
// If implemented, ArchivePath is preferred over NextPath when resolving
// the archived file destination path.
type RotateArchivePathProvider interface {
ArchivePath(string, time.Time) string
}
func resolveRotateArchivePath(policy RotatePolicy, current string, now time.Time) string {
if policy == nil {
return ""
}
if provider, ok := policy.(RotateArchivePathProvider); ok {
if path := provider.ArchivePath(current, now); path != "" {
return path
}
}
return policy.NextPath(current, now)
}
type Redactor interface {
Redact(context.Context, *Entry) error
}
type RedactRule interface {
Apply(context.Context, *Entry) (bool, error)
}
type HandlerFunc func(context.Context, *Entry) error
func (f HandlerFunc) Handle(ctx context.Context, entry *Entry) error {
if f == nil {
return nil
}
return f(ctx, entry)
}
type RedactorFunc func(context.Context, *Entry) error
func (f RedactorFunc) Redact(ctx context.Context, entry *Entry) error {
if f == nil {
return nil
}
return f(ctx, entry)
}
type RedactRuleFunc func(context.Context, *Entry) (bool, error)
func (f RedactRuleFunc) Apply(ctx context.Context, entry *Entry) (bool, error) {
if f == nil {
return false, nil
}
return f(ctx, entry)
}
func cloneFields(fields Fields) Fields {
if len(fields) == 0 {
return nil
}
cloned := make(Fields, len(fields))
for k, v := range fields {
cloned[k] = v
}
return cloned
}
func mergeFields(base Fields, extra Fields) Fields {
switch {
case len(base) == 0 && len(extra) == 0:
return nil
case len(base) == 0:
return cloneFields(extra)
case len(extra) == 0:
return cloneFields(base)
default:
merged := make(Fields, len(base)+len(extra))
for k, v := range base {
merged[k] = v
}
for k, v := range extra {
merged[k] = v
}
return merged
}
}
func renderFields(fields Fields) string {
if len(fields) == 0 {
return ""
}
keys := make([]string, 0, len(fields))
for key := range fields {
keys = append(keys, key)
}
sort.Strings(keys)
pairs := make([]string, 0, len(keys))
for _, key := range keys {
pairs = append(pairs, fmt.Sprintf("%s=%v", key, fields[key]))
}
return strings.Join(pairs, " ")
}
type TextFormatter struct {
IncludeTimestamp bool
IncludeLevel bool
IncludeSource bool
IncludeThread bool
IncludeLogger bool
}
func NewTextFormatter() *TextFormatter {
return &TextFormatter{
IncludeTimestamp: true,
IncludeLevel: true,
IncludeSource: true,
IncludeThread: true,
IncludeLogger: false,
}
}
func (formatter *TextFormatter) Format(entry *Entry) ([]byte, error) {
if entry == nil {
return []byte(""), nil
}
options := pipelinex.TextOptions{
IncludeTimestamp: formatter == nil || formatter.IncludeTimestamp,
IncludeLevel: formatter == nil || formatter.IncludeLevel,
IncludeSource: formatter == nil || formatter.IncludeSource,
IncludeThread: formatter == nil || formatter.IncludeThread,
IncludeLogger: formatter != nil && formatter.IncludeLogger,
}
pipeEntry := pipelinex.Entry{
Time: entry.Time,
LevelName: entry.LevelName,
LoggerName: entry.LoggerName,
Thread: entry.Thread,
File: entry.File,
Line: entry.Line,
Func: entry.Func,
Message: entry.Message,
Fields: cloneFields(entry.Fields),
}
if entry.Err != nil {
pipeEntry.Error = entry.Err.Error()
}
return pipelinex.FormatText(pipeEntry, options)
}
type JSONFormatter struct {
Pretty bool
}
func NewJSONFormatter() *JSONFormatter {
return &JSONFormatter{}
}
func (formatter *JSONFormatter) Format(entry *Entry) ([]byte, error) {
if entry == nil {
return []byte("{}"), nil
}
pipeEntry := pipelinex.Entry{
Time: entry.Time,
LevelName: entry.LevelName,
LoggerName: entry.LoggerName,
Thread: entry.Thread,
File: entry.File,
Line: entry.Line,
Func: entry.Func,
Message: entry.Message,
Fields: cloneFields(entry.Fields),
}
if entry.Err != nil {
pipeEntry.Error = entry.Err.Error()
}
return pipelinex.FormatJSON(pipeEntry, formatter != nil && formatter.Pretty)
}
type WriterSink struct {
writer io.Writer
closer io.Closer
}
func NewWriterSink(writer io.Writer) *WriterSink {
sink := &WriterSink{writer: writer}
if closer, ok := writer.(io.Closer); ok {
sink.closer = closer
}
return sink
}
func (sink *WriterSink) Write(data []byte) error {
if sink == nil || sink.writer == nil {
return errors.New("sink writer is nil")
}
_, err := sink.writer.Write(data)
return err
}
func (sink *WriterSink) Close() error {
if sink == nil || sink.closer == nil {
return nil
}
return sink.closer.Close()
}
type RotatePolicyArchive struct {
policy RotatePolicy
checkInterval int64
hookBefore func(*StarLogger, string, string, os.FileInfo) error
hookAfter func(*StarLogger, string, string, os.FileInfo) error
}
func NewRotatePolicyArchive(policy RotatePolicy, checkInterval int64) *RotatePolicyArchive {
return &RotatePolicyArchive{
policy: policy,
checkInterval: checkInterval,
}
}
func (archive *RotatePolicyArchive) ShouldArchiveNow(logger *StarLogger, fullpath string, info os.FileInfo) bool {
if archive == nil || archive.policy == nil {
return false
}
return archive.policy.ShouldRotate(info, nil)
}
func (archive *RotatePolicyArchive) NextLogFilePath(logger *StarLogger, oldpath string, info os.FileInfo) string {
return oldpath
}
func (archive *RotatePolicyArchive) ArchiveLogFilePath(logger *StarLogger, oldpath string, info os.FileInfo) string {
if archive == nil || archive.policy == nil {
return oldpath
}
return resolveRotateArchivePath(archive.policy, oldpath, time.Now())
}
func (archive *RotatePolicyArchive) Interval() int64 {
if archive == nil || archive.checkInterval <= 0 {
return 1
}
return archive.checkInterval
}
func (archive *RotatePolicyArchive) HookBeforArchive() func(*StarLogger, string, string, os.FileInfo) error {
return archive.HookBeforeArchive()
}
func (archive *RotatePolicyArchive) HookBeforeArchive() func(*StarLogger, string, string, os.FileInfo) error {
return archive.hookBefore
}
func (archive *RotatePolicyArchive) HookAfterArchive() func(*StarLogger, string, string, os.FileInfo) error {
return archive.hookAfter
}
func (archive *RotatePolicyArchive) DoArchive() func(*StarLogger, string, string, os.FileInfo) error {
return nil
}
func (archive *RotatePolicyArchive) SetHookBeforeArchive(hook func(*StarLogger, string, string, os.FileInfo) error) {
archive.hookBefore = hook
}
func (archive *RotatePolicyArchive) SetHookBeforArchive(hook func(*StarLogger, string, string, os.FileInfo) error) {
archive.SetHookBeforeArchive(hook)
}
func (archive *RotatePolicyArchive) SetHookAfterArchive(hook func(*StarLogger, string, string, os.FileInfo) error) {
archive.hookAfter = hook
}

91
presets.go Normal file
View File

@ -0,0 +1,91 @@
package starlog
import "io"
func applyProductionPreset(cfg *Config) {
if cfg == nil {
return
}
cfg.Level = LvInfo
cfg.StdErrLevel = LvError
cfg.ShowColor = false
cfg.OnlyColorLevel = false
cfg.ShowStd = false
cfg.ShowOriginFile = false
cfg.ShowFuncName = false
cfg.ShowFlag = false
cfg.ShowLevel = true
cfg.ShowFieldColor = false
cfg.Formatter = NewJSONFormatter()
cfg.PendingWriteLimit = 4096
cfg.PendingDropPolicy = PendingDropOldest
cfg.RedactFailMode = RedactFailMaskAll
cfg.RedactMaskToken = "[REDACTED]"
cfg.Sampling = normalizeSamplingConfig(DefaultSamplingConfig())
cfg.Dedup = normalizeDedupConfig(DefaultDedupConfig())
}
func applyDevelopmentPreset(cfg *Config) {
if cfg == nil {
return
}
cfg.Level = LvDebug
cfg.StdErrLevel = LvError
cfg.ShowColor = true
cfg.OnlyColorLevel = true
cfg.ShowStd = false
cfg.ShowOriginFile = true
cfg.ShowFuncName = true
cfg.ShowFlag = true
cfg.ShowLevel = true
cfg.ShowFieldColor = true
cfg.Formatter = nil
cfg.PendingWriteLimit = 1024
cfg.PendingDropPolicy = PendingDropOldest
cfg.RedactFailMode = RedactFailMaskAll
cfg.RedactMaskToken = "[REDACTED]"
cfg.Sampling = normalizeSamplingConfig(DefaultSamplingConfig())
cfg.Dedup = normalizeDedupConfig(DefaultDedupConfig())
}
func NewProductionConfig() Config {
cfg := DefaultConfig()
applyProductionPreset(&cfg)
return cfg
}
func NewDevelopmentConfig() Config {
cfg := DefaultConfig()
applyDevelopmentPreset(&cfg)
return cfg
}
func (logger *StarLogger) ApplyProductionConfig() {
if logger == nil {
return
}
logger.UpdateConfig(func(cfg *Config) {
applyProductionPreset(cfg)
})
}
func (logger *StarLogger) ApplyDevelopmentConfig() {
if logger == nil {
return
}
logger.UpdateConfig(func(cfg *Config) {
applyDevelopmentPreset(cfg)
})
}
func NewProduction(out io.Writer) *StarLogger {
logger := NewStarlog(out)
logger.ApplyProductionConfig()
return logger
}
func NewDevelopment(out io.Writer) *StarLogger {
logger := NewStarlog(out)
logger.ApplyDevelopmentConfig()
return logger
}

83
presets_test.go Normal file
View File

@ -0,0 +1,83 @@
package starlog
import (
"bytes"
"io"
"testing"
)
func TestNewProductionConfig(t *testing.T) {
cfg := NewProductionConfig()
if cfg.Level != LvInfo {
t.Fatalf("production level should be info, got %d", cfg.Level)
}
if cfg.ShowColor {
t.Fatalf("production should disable color")
}
if cfg.ShowStd {
t.Fatalf("production should disable direct std output")
}
if cfg.Formatter == nil {
t.Fatalf("production should set formatter")
}
if _, ok := cfg.Formatter.(*JSONFormatter); !ok {
t.Fatalf("production formatter should be JSONFormatter")
}
}
func TestNewDevelopmentConfig(t *testing.T) {
cfg := NewDevelopmentConfig()
if cfg.Level != LvDebug {
t.Fatalf("development level should be debug, got %d", cfg.Level)
}
if !cfg.ShowColor || !cfg.OnlyColorLevel {
t.Fatalf("development should enable level-only color mode")
}
if !cfg.ShowFuncName || !cfg.ShowOriginFile || !cfg.ShowFlag {
t.Fatalf("development should keep rich source fields")
}
if cfg.Formatter != nil {
t.Fatalf("development preset should use built-in text rendering")
}
}
func TestApplyProductionConfigPreservesWriterAndName(t *testing.T) {
var buf bytes.Buffer
logger := NewStarlog(&buf)
logger.SetName("svc")
logger.ApplyProductionConfig()
cfg := logger.GetConfig()
if cfg.Name != "svc" {
t.Fatalf("apply production should preserve logger name")
}
if cfg.Writer == nil {
t.Fatalf("apply production should preserve writer")
}
if _, ok := cfg.Formatter.(*JSONFormatter); !ok {
t.Fatalf("apply production should set JSON formatter")
}
}
func TestPresetConstructors(t *testing.T) {
prod := NewProduction(io.Discard)
if prod == nil {
t.Fatalf("NewProduction should return logger")
}
prodCfg := prod.GetConfig()
if prodCfg.Level != LvInfo {
t.Fatalf("NewProduction should apply production config")
}
if prodCfg.Writer == nil {
t.Fatalf("NewProduction should keep output writer")
}
dev := NewDevelopment(io.Discard)
if dev == nil {
t.Fatalf("NewDevelopment should return logger")
}
devCfg := dev.GetConfig()
if devCfg.Level != LvDebug {
t.Fatalf("NewDevelopment should apply development config")
}
}

561
rate_limit.go Normal file
View File

@ -0,0 +1,561 @@
package starlog
import (
"math"
"strconv"
"strings"
"sync"
"time"
)
type RateLimitScope int
const (
RateLimitScopeGlobal RateLimitScope = iota
RateLimitScopeByKey
)
type RateLimitDropPolicy int
const (
RateLimitDrop RateLimitDropPolicy = iota
RateLimitPassThrough
)
type RateLimitDropData struct {
Time time.Time
Key string
Reason string
Level int
LevelName string
LoggerName string
Message string
PassThrough bool
DroppedCount uint64
PassedThroughCount uint64
Suppressed uint64
Summary bool
SummarySuppressed uint64
}
type RateLimitStats struct {
Enabled bool
Rate float64
Burst int
Scope RateLimitScope
Allowed uint64
Dropped uint64
PassedThrough uint64
Suppressed uint64
Summaries uint64
LastDropTime time.Time
LastDropKey string
LastReason string
CurrentKeys int
}
type RateLimitConfig struct {
Enable bool
Levels []int
Rate float64
Burst int
Scope RateLimitScope
KeyFunc func(*Entry) string
MaxKeys int
KeyTTL time.Duration
DropPolicy RateLimitDropPolicy
OnDrop func(RateLimitDropData)
ExemptLevels []int
ExemptMatcher func(*Entry) bool
SummaryInterval time.Duration
}
type rateLimitBucket struct {
tokens float64
lastRefill time.Time
lastSeen time.Time
}
type rateLimiter struct {
mu sync.Mutex
cfg RateLimitConfig
limitedLevel map[int]struct{}
exemptLevel map[int]struct{}
buckets map[string]*rateLimitBucket
nowFunc func() time.Time
lastCleanupTime time.Time
lastSummaryTime time.Time
allowedCount uint64
droppedCount uint64
passedThroughCount uint64
suppressedCount uint64
summaryCount uint64
lastDropTime time.Time
lastDropKey string
lastDropReason string
}
func DefaultRateLimitConfig() RateLimitConfig {
return RateLimitConfig{
Enable: false,
Levels: nil,
Rate: 0,
Burst: 1,
Scope: RateLimitScopeGlobal,
KeyFunc: nil,
MaxKeys: 4096,
KeyTTL: 10 * time.Minute,
DropPolicy: RateLimitDrop,
OnDrop: nil,
ExemptLevels: []int{LvError, LvCritical, LvPanic, LvFatal},
ExemptMatcher: nil,
SummaryInterval: 0,
}
}
func cloneIntSlice(values []int) []int {
if len(values) == 0 {
return nil
}
result := make([]int, len(values))
copy(result, values)
return result
}
func cloneRateLimitConfig(cfg RateLimitConfig) RateLimitConfig {
cloned := cfg
cloned.Levels = cloneIntSlice(cfg.Levels)
cloned.ExemptLevels = cloneIntSlice(cfg.ExemptLevels)
return cloned
}
func normalizeRateLimitConfig(cfg RateLimitConfig) RateLimitConfig {
if cfg.Rate < 0 {
cfg.Rate = 0
}
if cfg.Burst <= 0 {
if cfg.Rate > 0 {
cfg.Burst = int(math.Ceil(cfg.Rate))
}
if cfg.Burst <= 0 {
cfg.Burst = 1
}
}
switch cfg.Scope {
case RateLimitScopeGlobal, RateLimitScopeByKey:
default:
cfg.Scope = RateLimitScopeGlobal
}
switch cfg.DropPolicy {
case RateLimitDrop, RateLimitPassThrough:
default:
cfg.DropPolicy = RateLimitDrop
}
if cfg.MaxKeys <= 0 {
cfg.MaxKeys = 4096
}
if cfg.KeyTTL <= 0 {
cfg.KeyTTL = 10 * time.Minute
}
if cfg.SummaryInterval < 0 {
cfg.SummaryInterval = 0
}
return cloneRateLimitConfig(cfg)
}
func buildLevelSet(levels []int) map[int]struct{} {
result := make(map[int]struct{}, len(levels))
for _, level := range levels {
result[level] = struct{}{}
}
return result
}
func newRateLimiter() *rateLimiter {
cfg := normalizeRateLimitConfig(DefaultRateLimitConfig())
return &rateLimiter{
cfg: cfg,
limitedLevel: buildLevelSet(cfg.Levels),
exemptLevel: buildLevelSet(cfg.ExemptLevels),
buckets: make(map[string]*rateLimitBucket),
nowFunc: time.Now,
}
}
func (limiter *rateLimiter) setNowFuncForTest(nowFunc func() time.Time) {
if limiter == nil {
return
}
limiter.mu.Lock()
if nowFunc == nil {
limiter.nowFunc = time.Now
} else {
limiter.nowFunc = nowFunc
}
limiter.mu.Unlock()
}
func (limiter *rateLimiter) now() time.Time {
if limiter == nil || limiter.nowFunc == nil {
return time.Now()
}
return limiter.nowFunc()
}
func (limiter *rateLimiter) SetConfig(cfg RateLimitConfig) {
if limiter == nil {
return
}
normalized := normalizeRateLimitConfig(cfg)
limiter.mu.Lock()
limiter.cfg = normalized
limiter.limitedLevel = buildLevelSet(normalized.Levels)
limiter.exemptLevel = buildLevelSet(normalized.ExemptLevels)
limiter.buckets = make(map[string]*rateLimitBucket)
limiter.lastCleanupTime = time.Time{}
limiter.lastSummaryTime = time.Time{}
limiter.mu.Unlock()
}
func (limiter *rateLimiter) Config() RateLimitConfig {
if limiter == nil {
return normalizeRateLimitConfig(DefaultRateLimitConfig())
}
limiter.mu.Lock()
defer limiter.mu.Unlock()
return cloneRateLimitConfig(limiter.cfg)
}
func (limiter *rateLimiter) Stats() RateLimitStats {
if limiter == nil {
return RateLimitStats{}
}
limiter.mu.Lock()
defer limiter.mu.Unlock()
return RateLimitStats{
Enabled: limiter.cfg.Enable,
Rate: limiter.cfg.Rate,
Burst: limiter.cfg.Burst,
Scope: limiter.cfg.Scope,
Allowed: limiter.allowedCount,
Dropped: limiter.droppedCount,
PassedThrough: limiter.passedThroughCount,
Suppressed: limiter.suppressedCount,
Summaries: limiter.summaryCount,
LastDropTime: limiter.lastDropTime,
LastDropKey: limiter.lastDropKey,
LastReason: limiter.lastDropReason,
CurrentKeys: len(limiter.buckets),
}
}
func (limiter *rateLimiter) ResetStats() {
if limiter == nil {
return
}
limiter.mu.Lock()
limiter.allowedCount = 0
limiter.droppedCount = 0
limiter.passedThroughCount = 0
limiter.suppressedCount = 0
limiter.summaryCount = 0
limiter.lastDropTime = time.Time{}
limiter.lastDropKey = ""
limiter.lastDropReason = ""
limiter.mu.Unlock()
}
func (limiter *rateLimiter) isLimitedLevel(level int) bool {
if len(limiter.limitedLevel) == 0 {
return true
}
_, ok := limiter.limitedLevel[level]
return ok
}
func (limiter *rateLimiter) isExempt(entry *Entry) bool {
if entry == nil {
return false
}
if _, ok := limiter.exemptLevel[entry.Level]; ok {
return true
}
if limiter.cfg.ExemptMatcher != nil && limiter.cfg.ExemptMatcher(entry) {
return true
}
return false
}
func (limiter *rateLimiter) resolveKey(entry *Entry) string {
if limiter.cfg.Scope == RateLimitScopeGlobal {
return "__global__"
}
if limiter.cfg.KeyFunc != nil {
key := strings.TrimSpace(limiter.cfg.KeyFunc(entry))
if key != "" {
return key
}
}
if entry == nil {
return "__empty__"
}
message := strings.TrimSpace(entry.Message)
if message == "" {
return strconv.Itoa(entry.Level)
}
return strconv.Itoa(entry.Level) + ":" + message
}
func (limiter *rateLimiter) cleanupBucketsLocked(now time.Time) {
if limiter.cfg.Scope != RateLimitScopeByKey || limiter.cfg.KeyTTL <= 0 {
return
}
if !limiter.lastCleanupTime.IsZero() && now.Sub(limiter.lastCleanupTime) < time.Second {
return
}
for key, bucket := range limiter.buckets {
if bucket == nil {
delete(limiter.buckets, key)
continue
}
if now.Sub(bucket.lastSeen) > limiter.cfg.KeyTTL {
delete(limiter.buckets, key)
}
}
limiter.lastCleanupTime = now
}
func (limiter *rateLimiter) getBucketLocked(key string, now time.Time) *rateLimitBucket {
if bucket, ok := limiter.buckets[key]; ok && bucket != nil {
return bucket
}
if limiter.cfg.Scope == RateLimitScopeByKey && limiter.cfg.MaxKeys > 0 && len(limiter.buckets) >= limiter.cfg.MaxKeys {
overflowKey := "__overflow__"
if bucket, ok := limiter.buckets[overflowKey]; ok && bucket != nil {
return bucket
}
oldestKey := ""
oldestTime := now
for existingKey, bucket := range limiter.buckets {
if bucket == nil {
oldestKey = existingKey
break
}
if oldestKey == "" || bucket.lastSeen.Before(oldestTime) {
oldestKey = existingKey
oldestTime = bucket.lastSeen
}
}
if oldestKey != "" {
delete(limiter.buckets, oldestKey)
}
key = overflowKey
}
bucket := &rateLimitBucket{
tokens: float64(limiter.cfg.Burst),
lastRefill: now,
lastSeen: now,
}
limiter.buckets[key] = bucket
return bucket
}
func (limiter *rateLimiter) takeTokenLocked(bucket *rateLimitBucket, now time.Time) bool {
if bucket == nil {
return true
}
if bucket.lastRefill.IsZero() {
bucket.lastRefill = now
}
elapsed := now.Sub(bucket.lastRefill).Seconds()
if elapsed > 0 && limiter.cfg.Rate > 0 {
bucket.tokens += elapsed * limiter.cfg.Rate
maxTokens := float64(limiter.cfg.Burst)
if bucket.tokens > maxTokens {
bucket.tokens = maxTokens
}
bucket.lastRefill = now
}
bucket.lastSeen = now
if bucket.tokens >= 1 {
bucket.tokens -= 1
return true
}
return false
}
func cloneEntryForDrop(entry *Entry) Entry {
if entry == nil {
return Entry{}
}
cloned := *entry
cloned.Fields = cloneFields(entry.Fields)
return cloned
}
func (limiter *rateLimiter) Allow(entry *Entry) bool {
if limiter == nil || entry == nil {
return true
}
now := limiter.now()
var callback func(RateLimitDropData)
dropData := RateLimitDropData{}
allow := true
limiter.mu.Lock()
if !limiter.cfg.Enable || limiter.cfg.Rate <= 0 {
limiter.allowedCount++
limiter.mu.Unlock()
return true
}
if !limiter.isLimitedLevel(entry.Level) || limiter.isExempt(entry) {
limiter.allowedCount++
limiter.mu.Unlock()
return true
}
limiter.cleanupBucketsLocked(now)
key := limiter.resolveKey(entry)
bucket := limiter.getBucketLocked(key, now)
if limiter.takeTokenLocked(bucket, now) {
limiter.allowedCount++
limiter.mu.Unlock()
return true
}
limiter.suppressedCount++
limiter.lastDropTime = now
limiter.lastDropKey = key
limiter.lastDropReason = "rate_limit_exceeded"
if limiter.cfg.DropPolicy == RateLimitPassThrough {
limiter.passedThroughCount++
limiter.allowedCount++
allow = true
} else {
limiter.droppedCount++
allow = false
}
dropData = RateLimitDropData{
Time: now,
Key: key,
Reason: limiter.lastDropReason,
Level: entry.Level,
LevelName: entry.LevelName,
LoggerName: entry.LoggerName,
Message: entry.Message,
PassThrough: limiter.cfg.DropPolicy == RateLimitPassThrough,
DroppedCount: limiter.droppedCount,
PassedThroughCount: limiter.passedThroughCount,
Suppressed: limiter.suppressedCount,
}
if limiter.cfg.SummaryInterval > 0 {
if limiter.lastSummaryTime.IsZero() {
limiter.lastSummaryTime = now
} else if now.Sub(limiter.lastSummaryTime) >= limiter.cfg.SummaryInterval {
dropData.Summary = true
dropData.SummarySuppressed = limiter.suppressedCount
limiter.summaryCount++
limiter.suppressedCount = 0
limiter.lastSummaryTime = now
}
}
callback = limiter.cfg.OnDrop
limiter.mu.Unlock()
if callback != nil {
entryCopy := cloneEntryForDrop(entry)
dropData.Level = entryCopy.Level
dropData.LevelName = entryCopy.LevelName
dropData.LoggerName = entryCopy.LoggerName
dropData.Message = entryCopy.Message
func() {
defer func() {
recover()
}()
callback(dropData)
}()
}
return allow
}
func (logger *starlog) allowByRateLimit(entry *Entry) bool {
if logger == nil || logger.rateLimiter == nil {
return true
}
return logger.rateLimiter.Allow(entry)
}
func (logger *StarLogger) SetRateLimitConfig(cfg RateLimitConfig) {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
if logger.logcore.rateLimiter == nil {
logger.logcore.rateLimiter = newRateLimiter()
}
limiter := logger.logcore.rateLimiter
logger.logcore.mu.Unlock()
limiter.SetConfig(cfg)
}
func (logger *StarLogger) GetRateLimitConfig() RateLimitConfig {
if logger == nil || logger.logcore == nil {
return normalizeRateLimitConfig(DefaultRateLimitConfig())
}
logger.logcore.mu.Lock()
limiter := logger.logcore.rateLimiter
logger.logcore.mu.Unlock()
if limiter == nil {
return normalizeRateLimitConfig(DefaultRateLimitConfig())
}
return limiter.Config()
}
func (logger *StarLogger) EnableRateLimit(enable bool) {
cfg := logger.GetRateLimitConfig()
cfg.Enable = enable
logger.SetRateLimitConfig(cfg)
}
func (logger *StarLogger) SetRateLimitDropHandler(handler func(RateLimitDropData)) {
cfg := logger.GetRateLimitConfig()
cfg.OnDrop = handler
logger.SetRateLimitConfig(cfg)
}
func (logger *StarLogger) GetRateLimitStats() RateLimitStats {
if logger == nil || logger.logcore == nil {
return RateLimitStats{}
}
logger.logcore.mu.Lock()
limiter := logger.logcore.rateLimiter
logger.logcore.mu.Unlock()
if limiter == nil {
return RateLimitStats{}
}
return limiter.Stats()
}
func (logger *StarLogger) ResetRateLimitStats() {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
limiter := logger.logcore.rateLimiter
logger.logcore.mu.Unlock()
if limiter == nil {
return
}
limiter.ResetStats()
}

311
rate_limit_test.go Normal file
View File

@ -0,0 +1,311 @@
package starlog
import (
"bytes"
"strings"
"sync"
"testing"
"time"
)
func setRateLimiterNowForTest(logger *StarLogger, nowFunc func() time.Time) {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
limiter := logger.logcore.rateLimiter
logger.logcore.mu.Unlock()
if limiter == nil {
return
}
limiter.setNowFuncForTest(nowFunc)
}
func TestRateLimitDrop(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
dropped := make(chan RateLimitDropData, 1)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Levels = []int{LvInfo}
cfg.Rate = 1
cfg.Burst = 1
cfg.DropPolicy = RateLimitDrop
cfg.OnDrop = func(data RateLimitDropData) {
select {
case dropped <- data:
default:
}
}
logger.SetRateLimitConfig(cfg)
now := time.Date(2026, 3, 19, 10, 0, 0, 0, time.UTC)
setRateLimiterNowForTest(logger, func() time.Time { return now })
logger.Info("first")
logger.Info("second")
got := buf.String()
if !strings.Contains(got, "first") {
t.Fatalf("first log should pass, got %q", got)
}
if strings.Contains(got, "second") {
t.Fatalf("second log should be dropped, got %q", got)
}
select {
case data := <-dropped:
if data.PassThrough {
t.Fatalf("drop policy should not pass through, got %+v", data)
}
if data.Key == "" {
t.Fatalf("drop callback should provide key")
}
default:
t.Fatalf("drop callback should be invoked")
}
stats := logger.GetRateLimitStats()
if stats.Allowed != 1 || stats.Dropped != 1 || stats.Suppressed != 1 {
t.Fatalf("unexpected rate limit stats: %+v", stats)
}
}
func TestRateLimitPassThrough(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
dropped := make(chan RateLimitDropData, 1)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Levels = []int{LvInfo}
cfg.Rate = 1
cfg.Burst = 1
cfg.DropPolicy = RateLimitPassThrough
cfg.OnDrop = func(data RateLimitDropData) {
select {
case dropped <- data:
default:
}
}
logger.SetRateLimitConfig(cfg)
now := time.Date(2026, 3, 19, 10, 1, 0, 0, time.UTC)
setRateLimiterNowForTest(logger, func() time.Time { return now })
logger.Info("one")
logger.Info("two")
got := buf.String()
if !strings.Contains(got, "one") || !strings.Contains(got, "two") {
t.Fatalf("pass-through mode should keep both logs, got %q", got)
}
select {
case data := <-dropped:
if !data.PassThrough {
t.Fatalf("drop callback should mark pass-through mode, got %+v", data)
}
default:
t.Fatalf("drop callback should be invoked in pass-through mode")
}
stats := logger.GetRateLimitStats()
if stats.Allowed != 2 || stats.Dropped != 0 || stats.PassedThrough != 1 || stats.Suppressed != 1 {
t.Fatalf("unexpected pass-through stats: %+v", stats)
}
}
func TestRateLimitLevelsAndExempt(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Levels = []int{LvInfo}
cfg.ExemptLevels = []int{LvError}
cfg.Rate = 1
cfg.Burst = 1
logger.SetRateLimitConfig(cfg)
now := time.Date(2026, 3, 19, 10, 2, 0, 0, time.UTC)
setRateLimiterNowForTest(logger, func() time.Time { return now })
logger.Debug("d1")
logger.Debug("d2")
logger.Info("i1")
logger.Info("i2")
logger.Error("e1")
logger.Error("e2")
got := buf.String()
if !strings.Contains(got, "d1") || !strings.Contains(got, "d2") {
t.Fatalf("debug logs should not be limited, got %q", got)
}
if !strings.Contains(got, "i1") || strings.Contains(got, "i2") {
t.Fatalf("info logs should be limited, got %q", got)
}
if !strings.Contains(got, "e1") || !strings.Contains(got, "e2") {
t.Fatalf("error logs should be exempt, got %q", got)
}
stats := logger.GetRateLimitStats()
if stats.Dropped != 1 || stats.Allowed != 5 {
t.Fatalf("unexpected level/exempt stats: %+v", stats)
}
}
func TestRateLimitByKey(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Levels = []int{LvInfo}
cfg.Scope = RateLimitScopeByKey
cfg.Rate = 1
cfg.Burst = 1
cfg.MaxKeys = 16
cfg.KeyFunc = func(entry *Entry) string {
if entry == nil || entry.Fields == nil {
return ""
}
if user, ok := entry.Fields["user"].(string); ok {
return user
}
return ""
}
logger.SetRateLimitConfig(cfg)
now := time.Date(2026, 3, 19, 10, 3, 0, 0, time.UTC)
setRateLimiterNowForTest(logger, func() time.Time { return now })
logger.WithField("user", "a").Info("key-a-one")
logger.WithField("user", "a").Info("key-a-two")
logger.WithField("user", "b").Info("key-b-one")
got := buf.String()
if !strings.Contains(got, "key-a-one") || strings.Contains(got, "key-a-two") {
t.Fatalf("same key should be limited, got %q", got)
}
if !strings.Contains(got, "key-b-one") {
t.Fatalf("different key should have independent bucket, got %q", got)
}
stats := logger.GetRateLimitStats()
if stats.Dropped != 1 || stats.Allowed != 2 || stats.CurrentKeys < 2 {
t.Fatalf("unexpected by-key stats: %+v", stats)
}
}
func TestRateLimitSummary(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
var mu sync.Mutex
callbacks := make([]RateLimitDropData, 0, 4)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Levels = []int{LvInfo}
cfg.Rate = 0.001
cfg.Burst = 1
cfg.SummaryInterval = 2 * time.Second
cfg.OnDrop = func(data RateLimitDropData) {
mu.Lock()
callbacks = append(callbacks, data)
mu.Unlock()
}
logger.SetRateLimitConfig(cfg)
now := time.Date(2026, 3, 19, 10, 4, 0, 0, time.UTC)
setRateLimiterNowForTest(logger, func() time.Time { return now })
logger.Info("one")
logger.Info("two")
now = now.Add(3 * time.Second)
logger.Info("three")
mu.Lock()
defer mu.Unlock()
if len(callbacks) != 2 {
t.Fatalf("expected 2 drop callbacks, got %d", len(callbacks))
}
if callbacks[0].Summary {
t.Fatalf("first drop should not be summary, got %+v", callbacks[0])
}
if !callbacks[1].Summary {
t.Fatalf("second drop should trigger summary, got %+v", callbacks[1])
}
if callbacks[1].SummarySuppressed != 2 {
t.Fatalf("summary should include suppressed count, got %+v", callbacks[1])
}
stats := logger.GetRateLimitStats()
if stats.Summaries != 1 {
t.Fatalf("expected one summary event, got %+v", stats)
}
}
func TestRateLimitConfigSnapshotApply(t *testing.T) {
logger := NewStarlog(nil)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Levels = []int{LvWarning, LvError}
cfg.Rate = 5
cfg.Burst = 10
cfg.Scope = RateLimitScopeByKey
cfg.MaxKeys = 99
cfg.KeyTTL = time.Minute
cfg.DropPolicy = RateLimitPassThrough
logger.SetRateLimitConfig(cfg)
snapshot := logger.GetConfig()
if !snapshot.RateLimit.Enable || snapshot.RateLimit.Rate != 5 || snapshot.RateLimit.Burst != 10 {
t.Fatalf("config snapshot should include rate limit config, got %+v", snapshot.RateLimit)
}
if snapshot.RateLimit.Scope != RateLimitScopeByKey || snapshot.RateLimit.DropPolicy != RateLimitPassThrough {
t.Fatalf("unexpected rate limit snapshot details: %+v", snapshot.RateLimit)
}
snapshot.RateLimit.Enable = false
snapshot.RateLimit.Rate = 1
snapshot.RateLimit.Burst = 1
logger.ApplyConfig(snapshot)
current := logger.GetRateLimitConfig()
if current.Enable {
t.Fatalf("apply config should update rate limit enable flag")
}
if current.Rate != 1 || current.Burst != 1 {
t.Fatalf("apply config should update rate limit values, got %+v", current)
}
}
func TestStdRateLimitBridge(t *testing.T) {
backup := GetConfig()
defer ApplyConfig(backup)
cfg := DefaultRateLimitConfig()
cfg.Enable = true
cfg.Rate = 2
cfg.Burst = 2
SetRateLimitConfig(cfg)
got := GetRateLimitConfig()
if !got.Enable || got.Rate != 2 || got.Burst != 2 {
t.Fatalf("std bridge should set/get rate limit config, got %+v", got)
}
EnableRateLimit(false)
got = GetRateLimitConfig()
if got.Enable {
t.Fatalf("EnableRateLimit(false) should disable limiter")
}
ResetRateLimitStats()
stats := GetRateLimitStats()
if stats.Allowed != 0 || stats.Dropped != 0 || stats.Suppressed != 0 {
t.Fatalf("reset stats should clear counters, got %+v", stats)
}
}

166
redaction.go Normal file
View File

@ -0,0 +1,166 @@
package starlog
import (
"context"
"fmt"
"regexp"
"sync/atomic"
"b612.me/starlog/internal/redactutil"
)
func cloneRedactRules(source []RedactRule) []RedactRule {
if len(source) == 0 {
return nil
}
cloned := make([]RedactRule, len(source))
copy(cloned, source)
return cloned
}
func normalizeRedactMask(mask string) string {
return redactutil.NormalizeMask(mask)
}
func maskEntry(entry *Entry, token string) {
if entry == nil {
return
}
token = normalizeRedactMask(token)
entry.Message = token
entry.Err = nil
if len(entry.Fields) == 0 {
return
}
maskedFields := redactutil.MaskFields(entry.Fields, token)
if len(maskedFields) == 0 {
entry.Fields = nil
return
}
entry.Fields = Fields(maskedFields)
}
func (logger *starlog) applyRedaction(snapshot *starlog, entry *Entry) bool {
if entry == nil {
return true
}
ctx := entry.Context
if ctx == nil {
ctx = context.Background()
}
for _, rule := range snapshot.redactRules {
if rule == nil {
continue
}
if _, err := rule.Apply(ctx, entry); err != nil {
return logger.handleRedactionFailure(snapshot, entry, err)
}
}
if snapshot.redactor != nil {
if err := snapshot.redactor.Redact(ctx, entry); err != nil {
return logger.handleRedactionFailure(snapshot, entry, err)
}
}
return true
}
func (logger *starlog) handleRedactionFailure(snapshot *starlog, entry *Entry, err error) bool {
reportWriteError(fmt.Errorf("%w: %v", ErrRedactionFailed, err), LogData{
Name: snapshot.name,
Log: entry.Message,
})
atomic.AddUint64(&logger.redactErrorCount, 1)
switch snapshot.redactFailMode {
case RedactFailOpen:
return true
case RedactFailDrop:
return false
default:
maskEntry(entry, snapshot.redactMaskToken)
return true
}
}
type RuleRedactor struct {
rules []RedactRule
}
func NewRuleRedactor(rules ...RedactRule) *RuleRedactor {
return &RuleRedactor{
rules: cloneRedactRules(rules),
}
}
func (redactor *RuleRedactor) Redact(ctx context.Context, entry *Entry) error {
if redactor == nil || entry == nil {
return nil
}
for _, rule := range redactor.rules {
if rule == nil {
continue
}
if _, err := rule.Apply(ctx, entry); err != nil {
return err
}
}
return nil
}
type SensitiveFieldRule struct {
fields map[string]struct{}
mask string
}
func NewSensitiveFieldRule(mask string, fields ...string) *SensitiveFieldRule {
return &SensitiveFieldRule{
fields: redactutil.BuildFieldSet(fields...),
mask: normalizeRedactMask(mask),
}
}
func (rule *SensitiveFieldRule) Apply(ctx context.Context, entry *Entry) (bool, error) {
_ = ctx
if rule == nil || entry == nil || len(entry.Fields) == 0 {
return false, nil
}
changed := false
for key, value := range entry.Fields {
lookup := redactutil.LookupFieldKey(key)
if _, ok := rule.fields[lookup]; !ok {
continue
}
if !redactutil.IsMasked(value, rule.mask) {
changed = true
}
entry.Fields[key] = rule.mask
}
return changed, nil
}
type MessageRegexRule struct {
pattern *regexp.Regexp
replacement string
}
func NewMessageRegexRule(pattern *regexp.Regexp, replacement string) *MessageRegexRule {
if replacement == "" {
replacement = "[REDACTED]"
}
return &MessageRegexRule{
pattern: pattern,
replacement: replacement,
}
}
func (rule *MessageRegexRule) Apply(ctx context.Context, entry *Entry) (bool, error) {
_ = ctx
if rule == nil || rule.pattern == nil || entry == nil || entry.Message == "" {
return false, nil
}
redacted, changed := redactutil.ReplaceRegex(rule.pattern, entry.Message, rule.replacement)
if !changed {
return false, nil
}
entry.Message = redacted
return true, nil
}

181
redaction_test.go Normal file
View File

@ -0,0 +1,181 @@
package starlog
import (
"bytes"
"context"
"errors"
"regexp"
"strings"
"testing"
)
func TestCustomRedactor(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetRedactor(RedactorFunc(func(ctx context.Context, entry *Entry) error {
_ = ctx
entry.Message = "masked-message"
if entry.Fields == nil {
entry.Fields = make(Fields)
}
entry.Fields["token"] = "***"
entry.Err = nil
return nil
}))
logger.WithField("token", "raw-token").WithError(errors.New("boom")).Error("origin message")
got := buf.String()
if !strings.Contains(got, "masked-message") || !strings.Contains(got, "token=***") {
t.Fatalf("expected custom redactor output, got %q", got)
}
if strings.Contains(got, "origin message") || strings.Contains(got, "raw-token") || strings.Contains(got, "boom") {
t.Fatalf("expected original sensitive values to be hidden, got %q", got)
}
}
func TestSensitiveFieldRule(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.AddRedactRule(NewSensitiveFieldRule("", "password", "token"))
logger.WithFields(Fields{
"password": "p@ssw0rd",
"token": "abc123",
"user": "alice",
}).Info("login")
got := buf.String()
if !strings.Contains(got, "password=[REDACTED]") || !strings.Contains(got, "token=[REDACTED]") {
t.Fatalf("expected sensitive fields to be redacted, got %q", got)
}
if !strings.Contains(got, "user=alice") {
t.Fatalf("non-sensitive field should remain, got %q", got)
}
if strings.Contains(got, "p@ssw0rd") || strings.Contains(got, "abc123") {
t.Fatalf("sensitive values leaked in output: %q", got)
}
}
func TestMessageRegexRule(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.AddRedactRule(NewMessageRegexRule(regexp.MustCompile(`\d{11}`), "***"))
logger.Info("phone=13812345678")
got := buf.String()
if !strings.Contains(got, "phone=***") {
t.Fatalf("expected phone number to be masked, got %q", got)
}
if strings.Contains(got, "13812345678") {
t.Fatalf("phone number should not appear in output: %q", got)
}
}
func TestRedactFailMaskAllDefault(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetRedactor(RedactorFunc(func(context.Context, *Entry) error {
return errors.New("redactor failed")
}))
logger.WithField("password", "secret").Info("hello")
got := buf.String()
if !strings.Contains(got, "[REDACTED]") {
t.Fatalf("expected fallback mask token in output, got %q", got)
}
if strings.Contains(got, "hello") || strings.Contains(got, "secret") {
t.Fatalf("raw content should be masked on redaction failure, got %q", got)
}
if logger.GetRedactErrorCount() == 0 {
t.Fatalf("redaction error count should increase")
}
}
func TestRedactFailDrop(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetRedactFailMode(RedactFailDrop)
logger.SetRedactor(RedactorFunc(func(context.Context, *Entry) error {
return errors.New("drop this log")
}))
logger.Info("should disappear")
if got := buf.String(); got != "" {
t.Fatalf("log should be dropped on redaction failure, got %q", got)
}
if logger.GetRedactErrorCount() == 0 {
t.Fatalf("redaction error count should increase")
}
}
func TestRedactFailOpen(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetRedactFailMode(RedactFailOpen)
logger.SetRedactor(RedactorFunc(func(context.Context, *Entry) error {
return errors.New("open mode")
}))
logger.Info("keep raw")
got := buf.String()
if !strings.Contains(got, "keep raw") {
t.Fatalf("log should keep original content on open mode, got %q", got)
}
if logger.GetRedactErrorCount() == 0 {
t.Fatalf("redaction error count should increase")
}
}
func TestSetRedactMaskToken(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetRedactMaskToken("***")
logger.SetRedactor(RedactorFunc(func(context.Context, *Entry) error {
return errors.New("mask token test")
}))
logger.WithField("token", "v").Info("raw")
got := buf.String()
if !strings.Contains(got, "***") {
t.Fatalf("expected custom mask token in output, got %q", got)
}
if strings.Contains(got, "raw") || strings.Contains(got, "v") {
t.Fatalf("expected raw values to be hidden by custom mask token, got %q", got)
}
}
func TestRedactionFailureReportsWriteError(t *testing.T) {
resetAsyncMetricsForTest()
defer resetAsyncMetricsForTest()
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetRedactor(RedactorFunc(func(context.Context, *Entry) error {
return errors.New("redactor failed")
}))
observed := make(chan error, 1)
SetWriteErrorHandler(func(err error, data LogData) {
if err == nil {
return
}
select {
case observed <- err:
default:
}
})
logger.Info("check redaction error report")
if GetWriteErrorCount() == 0 {
t.Fatalf("write error count should increase when redaction fails")
}
select {
case err := <-observed:
if !errors.Is(err, ErrRedactionFailed) {
t.Fatalf("expected ErrRedactionFailed, got %v", err)
}
default:
t.Fatalf("write error handler should be invoked on redaction failure")
}
}

53
rotate_manage.go Normal file
View File

@ -0,0 +1,53 @@
package starlog
import (
"os"
"time"
"b612.me/starlog/internal/rotatemanage"
)
type RotateManageOptions struct {
MaxBackups int
MaxAge time.Duration
Compress bool
Pattern string
}
func NewManagedRotateArchive(policy RotatePolicy, checkInterval int64, options RotateManageOptions) *RotatePolicyArchive {
archive := NewRotatePolicyArchive(policy, checkInterval)
return WithRotateManageOptions(archive, options)
}
func WithRotateManageOptions(archive *RotatePolicyArchive, options RotateManageOptions) *RotatePolicyArchive {
if archive == nil {
return nil
}
previous := archive.HookAfterArchive()
archive.SetHookAfterArchive(func(logger *StarLogger, archivePath string, currentPath string, info os.FileInfo) error {
if previous != nil {
if err := previous(logger, archivePath, currentPath, info); err != nil {
return err
}
}
return ApplyRotateManageOptions(archivePath, currentPath, options)
})
return archive
}
func ApplyRotateManageOptions(archivePath string, currentPath string, options RotateManageOptions) error {
return rotatemanage.Apply(archivePath, currentPath, toInternalRotateManageOptions(options))
}
func toInternalRotateManageOptions(options RotateManageOptions) rotatemanage.Options {
return rotatemanage.Options{
MaxBackups: options.MaxBackups,
MaxAge: options.MaxAge,
Compress: options.Compress,
Pattern: options.Pattern,
}
}
func isManagedBackupName(name string, base string, stem string, pattern string) (bool, error) {
return rotatemanage.IsManagedBackupName(name, base, stem, pattern)
}

149
rotate_manage_test.go Normal file
View File

@ -0,0 +1,149 @@
package starlog
import (
"os"
"path/filepath"
"strconv"
"testing"
"time"
)
func writeFileWithModTime(t *testing.T, path string, modTime time.Time) {
t.Helper()
if err := os.WriteFile(path, []byte("x"), 0644); err != nil {
t.Fatalf("write file failed: %v", err)
}
if err := os.Chtimes(path, modTime, modTime); err != nil {
t.Fatalf("set mod time failed: %v", err)
}
}
func TestApplyRotateManageOptionsCompress(t *testing.T) {
dir := testBinDir(t)
current := filepath.Join(dir, "app.log")
archive := filepath.Join(dir, "app.log.1")
writeFileWithModTime(t, current, time.Now())
writeFileWithModTime(t, archive, time.Now())
if err := ApplyRotateManageOptions(archive, current, RotateManageOptions{
Compress: true,
Pattern: "app.log.*",
}); err != nil {
t.Fatalf("ApplyRotateManageOptions compress failed: %v", err)
}
if _, err := os.Stat(archive + ".gz"); err != nil {
t.Fatalf("compressed file should exist: %v", err)
}
if _, err := os.Stat(archive); !os.IsNotExist(err) {
t.Fatalf("original archive file should be removed after compression")
}
}
func TestApplyRotateManageOptionsMaxBackups(t *testing.T) {
dir := testBinDir(t)
current := filepath.Join(dir, "app.log")
writeFileWithModTime(t, current, time.Now())
baseTime := time.Now().Add(-4 * time.Hour)
for i := 1; i <= 4; i++ {
path := filepath.Join(dir, "app.log."+strconv.Itoa(i))
writeFileWithModTime(t, path, baseTime.Add(time.Duration(i)*time.Hour))
}
if err := ApplyRotateManageOptions(filepath.Join(dir, "app.log.4"), current, RotateManageOptions{
MaxBackups: 2,
Pattern: "app.log.*",
}); err != nil {
t.Fatalf("ApplyRotateManageOptions max backups failed: %v", err)
}
matches, err := filepath.Glob(filepath.Join(dir, "app.log.*"))
if err != nil {
t.Fatalf("glob failed: %v", err)
}
if len(matches) != 2 {
t.Fatalf("should keep only 2 backup files, got %d (%v)", len(matches), matches)
}
}
func TestApplyRotateManageOptionsMaxAge(t *testing.T) {
dir := testBinDir(t)
current := filepath.Join(dir, "app.log")
oldBackup := filepath.Join(dir, "app.log.old")
newBackup := filepath.Join(dir, "app.log.new")
writeFileWithModTime(t, current, time.Now())
writeFileWithModTime(t, oldBackup, time.Now().Add(-4*time.Hour))
writeFileWithModTime(t, newBackup, time.Now().Add(-10*time.Minute))
if err := ApplyRotateManageOptions(newBackup, current, RotateManageOptions{
MaxAge: time.Hour,
Pattern: "app.log.*",
}); err != nil {
t.Fatalf("ApplyRotateManageOptions max age failed: %v", err)
}
if _, err := os.Stat(oldBackup); !os.IsNotExist(err) {
t.Fatalf("old backup should be removed by max age")
}
if _, err := os.Stat(newBackup); err != nil {
t.Fatalf("new backup should be kept: %v", err)
}
}
func TestWithRotateManageOptionsKeepsPreviousHook(t *testing.T) {
archive := NewRotatePolicyArchive(&rotateWhenNonEmptyPolicy{}, 1)
var called bool
archive.SetHookAfterArchive(func(*StarLogger, string, string, os.FileInfo) error {
called = true
return nil
})
WithRotateManageOptions(archive, RotateManageOptions{})
dir := testBinDir(t)
current := filepath.Join(dir, "app.log")
archived := filepath.Join(dir, "app.log.1")
writeFileWithModTime(t, current, time.Now())
writeFileWithModTime(t, archived, time.Now())
hook := archive.HookAfterArchive()
if hook == nil {
t.Fatalf("hook after archive should not be nil")
}
if err := hook(nil, archived, current, nil); err != nil {
t.Fatalf("hook execution failed: %v", err)
}
if !called {
t.Fatalf("previous hook should still run after wrapping")
}
}
func TestIsManagedBackupNameDefaultSafeMatch(t *testing.T) {
base := "app.log"
stem := "app"
tests := []struct {
name string
matched bool
}{
{name: "app.log.20260318", matched: true},
{name: "app.log-1", matched: true},
{name: "app.log.1.gz", matched: true},
{name: "app.log.bak", matched: true},
{name: "app_20260318.log", matched: true},
{name: "app.log.current", matched: false},
{name: "app.log.backup", matched: false},
{name: "app-config.log", matched: false},
{name: "other.log.1", matched: false},
}
for _, tc := range tests {
matched, err := isManagedBackupName(tc.name, base, stem, "")
if err != nil {
t.Fatalf("isManagedBackupName(%q) returned err: %v", tc.name, err)
}
if matched != tc.matched {
t.Fatalf("isManagedBackupName(%q)=%v, want %v", tc.name, matched, tc.matched)
}
}
}

217
rotate_templates.go Normal file
View File

@ -0,0 +1,217 @@
package starlog
import (
"errors"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
const defaultRotateNamePattern = "20060102-150405"
func normalizeRotateNamePattern(pattern string) string {
pattern = strings.TrimSpace(pattern)
if pattern == "" {
return defaultRotateNamePattern
}
return pattern
}
func buildRotatePathWithPattern(current string, now time.Time, pattern string) string {
pattern = normalizeRotateNamePattern(pattern)
dir := filepath.Dir(current)
base := filepath.Base(current)
ext := filepath.Ext(base)
stem := strings.TrimSuffix(base, ext)
suffix := now.Format(pattern)
if ext == "" {
return filepath.Join(dir, stem+"."+suffix)
}
return filepath.Join(dir, stem+"."+suffix+ext)
}
func resolveEntryTime(entry *Entry) time.Time {
if entry != nil && !entry.Time.IsZero() {
return entry.Time
}
return time.Now()
}
type RotateByTimePolicy struct {
interval time.Duration
pattern string
mu sync.Mutex
lastRotate time.Time
}
func NewRotateByTimePolicy(interval time.Duration) *RotateByTimePolicy {
return NewRotateByTimePolicyWithPattern(interval, "")
}
func NewRotateByTimePolicyWithPattern(interval time.Duration, pattern string) *RotateByTimePolicy {
if interval <= 0 {
interval = 24 * time.Hour
}
return &RotateByTimePolicy{
interval: interval,
pattern: normalizeRotateNamePattern(pattern),
}
}
func (policy *RotateByTimePolicy) ShouldRotate(info FileInfo, entry *Entry) bool {
if policy == nil || info == nil {
return false
}
now := resolveEntryTime(entry)
policy.mu.Lock()
defer policy.mu.Unlock()
if policy.lastRotate.IsZero() {
policy.lastRotate = GetFileCreationTime(info)
if policy.lastRotate.IsZero() {
policy.lastRotate = now
}
return false
}
if now.Sub(policy.lastRotate) >= policy.interval {
policy.lastRotate = now
return true
}
return false
}
func (policy *RotateByTimePolicy) NextPath(current string, now time.Time) string {
if policy == nil {
return buildRotatePathWithPattern(current, now, "")
}
return buildRotatePathWithPattern(current, now, policy.pattern)
}
type RotateBySizePolicy struct {
maxSize int64
pattern string
}
func NewRotateBySizePolicy(maxSizeBytes int64) *RotateBySizePolicy {
return NewRotateBySizePolicyWithPattern(maxSizeBytes, "")
}
func NewRotateBySizePolicyWithPattern(maxSizeBytes int64, pattern string) *RotateBySizePolicy {
if maxSizeBytes <= 0 {
maxSizeBytes = 100 * 1024 * 1024
}
return &RotateBySizePolicy{
maxSize: maxSizeBytes,
pattern: normalizeRotateNamePattern(pattern),
}
}
func (policy *RotateBySizePolicy) ShouldRotate(info FileInfo, entry *Entry) bool {
_ = entry
if policy == nil || info == nil {
return false
}
return info.Size() >= policy.maxSize
}
func (policy *RotateBySizePolicy) NextPath(current string, now time.Time) string {
if policy == nil {
return buildRotatePathWithPattern(current, now, "")
}
return buildRotatePathWithPattern(current, now, policy.pattern)
}
type RotateByTimeSizePolicy struct {
interval time.Duration
maxSize int64
pattern string
mu sync.Mutex
lastRotate time.Time
}
func NewRotateByTimeSizePolicy(interval time.Duration, maxSizeBytes int64) *RotateByTimeSizePolicy {
return NewRotateByTimeSizePolicyWithPattern(interval, maxSizeBytes, "")
}
func NewRotateByTimeSizePolicyWithPattern(interval time.Duration, maxSizeBytes int64, pattern string) *RotateByTimeSizePolicy {
if interval <= 0 {
interval = 24 * time.Hour
}
if maxSizeBytes <= 0 {
maxSizeBytes = 100 * 1024 * 1024
}
return &RotateByTimeSizePolicy{
interval: interval,
maxSize: maxSizeBytes,
pattern: normalizeRotateNamePattern(pattern),
}
}
func (policy *RotateByTimeSizePolicy) ShouldRotate(info FileInfo, entry *Entry) bool {
if policy == nil || info == nil {
return false
}
now := resolveEntryTime(entry)
policy.mu.Lock()
defer policy.mu.Unlock()
if policy.lastRotate.IsZero() {
policy.lastRotate = GetFileCreationTime(info)
if policy.lastRotate.IsZero() {
policy.lastRotate = now
}
}
if info.Size() >= policy.maxSize {
policy.lastRotate = now
return true
}
if now.Sub(policy.lastRotate) >= policy.interval {
policy.lastRotate = now
return true
}
return false
}
func (policy *RotateByTimeSizePolicy) NextPath(current string, now time.Time) string {
if policy == nil {
return buildRotatePathWithPattern(current, now, "")
}
return buildRotatePathWithPattern(current, now, policy.pattern)
}
func StartRotateByTime(logger *StarLogger, interval time.Duration, checkInterval int64) error {
return StartRotatePolicy(logger, NewRotateByTimePolicy(interval), checkInterval)
}
func StartRotateBySize(logger *StarLogger, maxSizeBytes int64, checkInterval int64) error {
return StartRotatePolicy(logger, NewRotateBySizePolicy(maxSizeBytes), checkInterval)
}
func StartRotateByTimeSize(logger *StarLogger, interval time.Duration, maxSizeBytes int64, checkInterval int64) error {
return StartRotatePolicy(logger, NewRotateByTimeSizePolicy(interval, maxSizeBytes), checkInterval)
}
func StartManagedRotatePolicy(logger *StarLogger, policy RotatePolicy, checkInterval int64, options RotateManageOptions) error {
if policy == nil {
return errors.New("rotate policy is nil")
}
strategy := buildRotateStrategy(policy, checkInterval)
strategy.afterHook = func(logger *StarLogger, archivePath string, currentPath string, info os.FileInfo) error {
return ApplyRotateManageOptions(archivePath, currentPath, options)
}
return startArchiveWithStrategy(logger, strategy)
}
func StartManagedRotateByTime(logger *StarLogger, interval time.Duration, checkInterval int64, options RotateManageOptions) error {
return StartManagedRotatePolicy(logger, NewRotateByTimePolicy(interval), checkInterval, options)
}
func StartManagedRotateBySize(logger *StarLogger, maxSizeBytes int64, checkInterval int64, options RotateManageOptions) error {
return StartManagedRotatePolicy(logger, NewRotateBySizePolicy(maxSizeBytes), checkInterval, options)
}
func StartManagedRotateByTimeSize(logger *StarLogger, interval time.Duration, maxSizeBytes int64, checkInterval int64, options RotateManageOptions) error {
return StartManagedRotatePolicy(logger, NewRotateByTimeSizePolicy(interval, maxSizeBytes), checkInterval, options)
}

124
rotate_templates_test.go Normal file
View File

@ -0,0 +1,124 @@
package starlog
import (
"path/filepath"
"strings"
"testing"
"time"
)
func TestRotateTemplatePathBuilder(t *testing.T) {
path := buildRotatePathWithPattern("./logs/app.log", time.Date(2026, 3, 19, 10, 11, 12, 0, time.UTC), "20060102")
base := filepath.Base(path)
if base != "app.20260319.log" {
t.Fatalf("unexpected rotated path: %s", base)
}
}
func TestRotateBySizePolicyShouldRotate(t *testing.T) {
p := NewRotateBySizePolicy(1)
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "tpl_size.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer func() { _ = logger.Close() }()
logger.Infoln("x")
info, err := GetLogFileInfo(logger)
if err != nil {
t.Fatalf("GetLogFileInfo failed: %v", err)
}
if !p.ShouldRotate(info, &Entry{Time: time.Now()}) {
t.Fatalf("size policy should rotate when file size threshold reached")
}
}
func TestRotateByTimePolicyShouldRotate(t *testing.T) {
p := NewRotateByTimePolicy(time.Second)
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "tpl_time.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer func() { _ = logger.Close() }()
logger.Infoln("time")
info, err := GetLogFileInfo(logger)
if err != nil {
t.Fatalf("GetLogFileInfo failed: %v", err)
}
base := resolveEntryTime(&Entry{Time: time.Now()})
if p.ShouldRotate(info, &Entry{Time: base}) {
t.Fatalf("first check should initialize and not rotate")
}
if p.ShouldRotate(info, &Entry{Time: base.Add(500 * time.Millisecond)}) {
t.Fatalf("within interval should not rotate")
}
if !p.ShouldRotate(info, &Entry{Time: base.Add(1100 * time.Millisecond)}) {
t.Fatalf("after interval should rotate")
}
}
func TestRotateByTimeSizePolicyShouldRotate(t *testing.T) {
p := NewRotateByTimeSizePolicy(time.Hour, 1024)
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "tpl_timesize.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer func() { _ = logger.Close() }()
logger.Infoln("small")
info, err := GetLogFileInfo(logger)
if err != nil {
t.Fatalf("GetLogFileInfo failed: %v", err)
}
base := resolveEntryTime(&Entry{Time: time.Now()})
if p.ShouldRotate(info, &Entry{Time: base}) {
t.Fatalf("first check should initialize and not rotate")
}
if p.ShouldRotate(info, &Entry{Time: base.Add(2 * time.Second)}) {
t.Fatalf("before interval and size threshold should not rotate")
}
p2 := NewRotateByTimeSizePolicy(time.Hour, 1)
if !p2.ShouldRotate(info, &Entry{Time: base.Add(3 * time.Second)}) {
t.Fatalf("size threshold should rotate for time-size policy")
}
}
func TestStartManagedRotatePolicyAndHelpers(t *testing.T) {
logger := NewStarlog(nil)
logger.SetShowStd(false)
logPath := filepath.Join(testBinDir(t), "tpl_helper.log")
if err := SetLogFile(logPath, logger, false); err != nil {
t.Fatalf("SetLogFile failed: %v", err)
}
defer func() { _ = logger.Close() }()
defer StopArchive(logger)
if err := StartManagedRotatePolicy(logger, nil, 1, RotateManageOptions{}); err == nil {
t.Fatalf("nil policy should return error")
}
if err := StartManagedRotateBySize(logger, 1, 1, RotateManageOptions{Pattern: "tpl_helper.log.*"}); err != nil {
t.Fatalf("StartManagedRotateBySize failed: %v", err)
}
logger.Infoln(strings.Repeat("a", 64))
found := false
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
matches, _ := filepath.Glob(filepath.Join(filepath.Dir(logPath), "tpl_helper.*.log"))
if len(matches) > 0 {
found = true
break
}
time.Sleep(100 * time.Millisecond)
}
if !found {
t.Fatalf("managed rotate helper should create backup files")
}
}

228
rotating_sink.go Normal file
View File

@ -0,0 +1,228 @@
package starlog
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
var ErrRotatingFileSinkClosed = errors.New("rotating file sink closed")
type RotatingFileSink struct {
mu sync.Mutex
path string
policy RotatePolicy
checkInterval time.Duration
options RotateManageOptions
appendMode bool
file *os.File
lastCheck time.Time
closed bool
}
func normalizeRotateCheckInterval(interval time.Duration) time.Duration {
if interval <= 0 {
return time.Second
}
return interval
}
func ensureLogDir(path string) error {
dir := filepath.Dir(path)
if dir == "" || dir == "." {
return nil
}
return os.MkdirAll(dir, 0755)
}
func newRotatePolicySink(path string, appendMode bool, policy RotatePolicy, checkInterval time.Duration, options RotateManageOptions) (*RotatingFileSink, error) {
if policy == nil {
return nil, errors.New("rotate policy is nil")
}
fullpath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
sink := &RotatingFileSink{
path: fullpath,
policy: policy,
checkInterval: normalizeRotateCheckInterval(checkInterval),
options: options,
appendMode: appendMode,
}
if err = sink.openFileLocked(appendMode); err != nil {
return nil, err
}
return sink, nil
}
func NewRotatePolicySink(path string, appendMode bool, policy RotatePolicy, checkInterval time.Duration) (*RotatingFileSink, error) {
return newRotatePolicySink(path, appendMode, policy, checkInterval, RotateManageOptions{})
}
func NewManagedRotatePolicySink(path string, appendMode bool, policy RotatePolicy, checkInterval time.Duration, options RotateManageOptions) (*RotatingFileSink, error) {
return newRotatePolicySink(path, appendMode, policy, checkInterval, options)
}
func NewRotateByTimeSink(path string, appendMode bool, interval time.Duration, checkInterval time.Duration) (*RotatingFileSink, error) {
return NewRotatePolicySink(path, appendMode, NewRotateByTimePolicy(interval), checkInterval)
}
func NewManagedRotateByTimeSink(path string, appendMode bool, interval time.Duration, checkInterval time.Duration, options RotateManageOptions) (*RotatingFileSink, error) {
return NewManagedRotatePolicySink(path, appendMode, NewRotateByTimePolicy(interval), checkInterval, options)
}
func NewRotateBySizeSink(path string, appendMode bool, maxSizeBytes int64, checkInterval time.Duration) (*RotatingFileSink, error) {
return NewRotatePolicySink(path, appendMode, NewRotateBySizePolicy(maxSizeBytes), checkInterval)
}
func NewManagedRotateBySizeSink(path string, appendMode bool, maxSizeBytes int64, checkInterval time.Duration, options RotateManageOptions) (*RotatingFileSink, error) {
return NewManagedRotatePolicySink(path, appendMode, NewRotateBySizePolicy(maxSizeBytes), checkInterval, options)
}
func NewRotateByTimeSizeSink(path string, appendMode bool, interval time.Duration, maxSizeBytes int64, checkInterval time.Duration) (*RotatingFileSink, error) {
return NewRotatePolicySink(path, appendMode, NewRotateByTimeSizePolicy(interval, maxSizeBytes), checkInterval)
}
func NewManagedRotateByTimeSizeSink(path string, appendMode bool, interval time.Duration, maxSizeBytes int64, checkInterval time.Duration, options RotateManageOptions) (*RotatingFileSink, error) {
return NewManagedRotatePolicySink(path, appendMode, NewRotateByTimeSizePolicy(interval, maxSizeBytes), checkInterval, options)
}
func (sink *RotatingFileSink) openFileLocked(appendMode bool) error {
if sink == nil {
return nil
}
if err := ensureLogDir(sink.path); err != nil {
return err
}
flags := os.O_CREATE | os.O_WRONLY
if appendMode {
flags |= os.O_APPEND
} else {
flags |= os.O_TRUNC
}
fp, err := os.OpenFile(sink.path, flags, 0644)
if err != nil {
return err
}
sink.file = fp
return nil
}
func (sink *RotatingFileSink) shouldCheckRotateLocked(now time.Time) bool {
if sink == nil {
return false
}
if sink.lastCheck.IsZero() || now.Sub(sink.lastCheck) >= sink.checkInterval {
sink.lastCheck = now
return true
}
return false
}
func (sink *RotatingFileSink) rotateIfNeededLocked(now time.Time) error {
if sink == nil || sink.file == nil || sink.policy == nil {
return nil
}
info, err := sink.file.Stat()
if err != nil {
return err
}
entry := &Entry{Time: now}
if !sink.policy.ShouldRotate(info, entry) {
return nil
}
archivePath := strings.TrimSpace(resolveRotateArchivePath(sink.policy, sink.path, now))
if archivePath == "" || archivePath == sink.path {
return nil
}
if err = ensureLogDir(archivePath); err != nil {
return err
}
if err = sink.file.Close(); err != nil {
return err
}
sink.file = nil
if err = os.Rename(sink.path, archivePath); err != nil {
reopenErr := sink.openFileLocked(true)
if reopenErr != nil {
return fmt.Errorf("rotate rename failed: %v; reopen failed: %w", err, reopenErr)
}
return err
}
if err = sink.openFileLocked(false); err != nil {
return err
}
if err = ApplyRotateManageOptions(archivePath, sink.path, sink.options); err != nil {
return err
}
return nil
}
func (sink *RotatingFileSink) Write(data []byte) error {
if sink == nil {
return nil
}
sink.mu.Lock()
defer sink.mu.Unlock()
if sink.closed {
return ErrRotatingFileSinkClosed
}
if sink.file == nil {
if err := sink.openFileLocked(true); err != nil {
return err
}
}
now := time.Now()
if sink.shouldCheckRotateLocked(now) {
if err := sink.rotateIfNeededLocked(now); err != nil {
return err
}
}
_, err := sink.file.Write(data)
return err
}
func (sink *RotatingFileSink) Sync() error {
if sink == nil {
return nil
}
sink.mu.Lock()
defer sink.mu.Unlock()
if sink.file == nil {
return nil
}
return sink.file.Sync()
}
func (sink *RotatingFileSink) Close() error {
if sink == nil {
return nil
}
sink.mu.Lock()
defer sink.mu.Unlock()
if sink.closed {
return nil
}
sink.closed = true
if sink.file == nil {
return nil
}
err := sink.file.Close()
sink.file = nil
return err
}
func (sink *RotatingFileSink) Path() string {
if sink == nil {
return ""
}
sink.mu.Lock()
defer sink.mu.Unlock()
return sink.path
}

96
rotating_sink_test.go Normal file
View File

@ -0,0 +1,96 @@
package starlog
import (
"errors"
"path/filepath"
"strings"
"testing"
"time"
)
func waitForRotate(t *testing.T, timeout time.Duration, cond func() bool, reason string) {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if cond() {
return
}
time.Sleep(10 * time.Millisecond)
}
t.Fatalf("timeout waiting for condition: %s", reason)
}
func TestNewRotatePolicySinkNilPolicy(t *testing.T) {
sink, err := NewRotatePolicySink(filepath.Join(testBinDir(t), "a.log"), true, nil, time.Second)
if err == nil {
t.Fatalf("expected error for nil rotate policy")
}
if sink != nil {
t.Fatalf("sink should be nil when create fails")
}
}
func TestManagedRotateBySizeSinkCreatesBackups(t *testing.T) {
path := filepath.Join(testBinDir(t), "route.log")
sink, err := NewManagedRotateBySizeSink(path, true, 64, 5*time.Millisecond, RotateManageOptions{
MaxBackups: 10,
Pattern: "route.*.log",
})
if err != nil {
t.Fatalf("create sink failed: %v", err)
}
defer sink.Close()
for idx := 0; idx < 40; idx++ {
if err = sink.Write([]byte(strings.Repeat("x", 16))); err != nil {
t.Fatalf("sink write failed: %v", err)
}
time.Sleep(3 * time.Millisecond)
}
waitForRotate(t, 2*time.Second, func() bool {
matches, _ := filepath.Glob(filepath.Join(filepath.Dir(path), "route.*.log"))
return len(matches) > 0
}, "rotating sink backups")
}
func TestRotatingFileSinkClose(t *testing.T) {
path := filepath.Join(testBinDir(t), "close.log")
sink, err := NewRotateBySizeSink(path, true, 1024, time.Second)
if err != nil {
t.Fatalf("create sink failed: %v", err)
}
if err = sink.Close(); err != nil {
t.Fatalf("close failed: %v", err)
}
if err = sink.Write([]byte("after-close")); !errors.Is(err, ErrRotatingFileSinkClosed) {
t.Fatalf("write after close should return ErrRotatingFileSinkClosed, got %v", err)
}
}
func TestRotatingFileSinkPrefersArchivePathProvider(t *testing.T) {
path := filepath.Join(testBinDir(t), "sink_provider.log")
sink, err := NewRotatePolicySink(path, true, &rotatePreferArchivePathPolicy{}, 5*time.Millisecond)
if err != nil {
t.Fatalf("create sink failed: %v", err)
}
defer sink.Close()
if err = sink.Write([]byte("first")); err != nil {
t.Fatalf("first write failed: %v", err)
}
time.Sleep(10 * time.Millisecond)
if err = sink.Write([]byte("second")); err != nil {
t.Fatalf("second write failed: %v", err)
}
waitForRotate(t, 2*time.Second, func() bool {
matches, _ := filepath.Glob(path + ".*.archive.bak")
return len(matches) > 0
}, "rotating sink archive path provider")
nextMatches, _ := filepath.Glob(path + ".*.next.bak")
if len(nextMatches) > 0 {
t.Fatalf("rotating sink should not use NextPath when ArchivePath is available")
}
}

245
route_handler.go Normal file
View File

@ -0,0 +1,245 @@
package starlog
import (
"context"
"fmt"
"reflect"
"sync/atomic"
"b612.me/starlog/internal/routerx"
)
type LevelMatcher = routerx.Matcher
type Route struct {
Name string
Match LevelMatcher
Formatter Formatter
Sink Sink
}
type routeSnapshot struct {
name string
match LevelMatcher
formatter Formatter
sink Sink
}
type RouteHandler struct {
routes atomic.Value
}
func NewRouteHandler(routes ...Route) *RouteHandler {
handler := &RouteHandler{}
handler.routes.Store([]routeSnapshot{})
handler.SetRoutes(routes)
return handler
}
func sinkIdentity(sink Sink) (string, bool) {
if sink == nil {
return "", false
}
val := reflect.ValueOf(sink)
if !val.IsValid() {
return "", false
}
switch val.Kind() {
case reflect.Ptr, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.UnsafePointer:
if val.IsNil() {
return "", false
}
return fmt.Sprintf("%T:%x", sink, val.Pointer()), true
default:
return "", false
}
}
func (handler *RouteHandler) SetRoutes(routes []Route) {
if handler == nil {
return
}
handler.routes.Store(normalizeRoutes(routes))
}
func (handler *RouteHandler) ReplaceRoutes(routes ...Route) {
handler.SetRoutes(routes)
}
func (handler *RouteHandler) Handle(ctx context.Context, entry *Entry) error {
if handler == nil || entry == nil {
return nil
}
rawRoutes := handler.routes.Load()
snapshots, ok := rawRoutes.([]routeSnapshot)
if !ok || len(snapshots) == 0 {
return nil
}
var firstErr error
for _, route := range snapshots {
if route.match != nil && !route.match(entry.Level) {
continue
}
formatter := route.formatter
if formatter == nil {
formatter = NewTextFormatter()
}
formatted, err := formatter.Format(entry)
if err != nil {
wrapErr := fmt.Errorf("route %s format failed: %w", route.name, err)
reportWriteError(wrapErr, LogData{
Name: route.name,
Log: entry.Message,
})
if firstErr == nil {
firstErr = wrapErr
}
continue
}
if route.sink == nil {
continue
}
if err = route.sink.Write(formatted); err != nil {
wrapErr := fmt.Errorf("route %s write failed: %w", route.name, err)
reportWriteError(wrapErr, LogData{
Name: route.name,
Log: string(formatted),
})
if firstErr == nil {
firstErr = wrapErr
}
continue
}
}
return firstErr
}
func (handler *RouteHandler) Close() error {
if handler == nil {
return nil
}
rawRoutes := handler.routes.Load()
snapshots, ok := rawRoutes.([]routeSnapshot)
if !ok || len(snapshots) == 0 {
return nil
}
closed := make(map[string]struct{}, len(snapshots))
var firstErr error
for _, route := range snapshots {
if route.sink == nil {
continue
}
if id, ok := sinkIdentity(route.sink); ok {
if _, exists := closed[id]; exists {
continue
}
closed[id] = struct{}{}
}
if err := route.sink.Close(); err != nil {
wrapErr := fmt.Errorf("route %s close failed: %w", route.name, err)
reportWriteError(wrapErr, LogData{
Name: route.name,
})
if firstErr == nil {
firstErr = wrapErr
}
}
}
return firstErr
}
func MatchAllLevels() LevelMatcher {
return routerx.MatchAllLevels()
}
func MatchLevels(levels ...int) LevelMatcher {
return routerx.MatchLevels(levels...)
}
func MatchAtLeast(minLevel int) LevelMatcher {
return routerx.MatchAtLeast(minLevel)
}
type chainedHandler struct {
handlers []Handler
}
func NewChainHandler(handlers ...Handler) Handler {
filtered := make([]Handler, 0, len(handlers))
for _, handler := range handlers {
if handler == nil {
continue
}
filtered = append(filtered, handler)
}
return &chainedHandler{
handlers: filtered,
}
}
func ChainHandler(handlers ...Handler) Handler {
return NewChainHandler(handlers...)
}
func (handler *chainedHandler) Handle(ctx context.Context, entry *Entry) error {
if handler == nil {
return nil
}
var firstErr error
for _, item := range handler.handlers {
if item == nil {
continue
}
if err := item.Handle(ctx, entry); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (handler *chainedHandler) Close() error {
if handler == nil {
return nil
}
var firstErr error
for _, item := range handler.handlers {
if item == nil {
continue
}
closer, ok := item.(interface{ Close() error })
if !ok {
continue
}
if err := closer.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func normalizeRoutes(routes []Route) []routeSnapshot {
if len(routes) == 0 {
return nil
}
baseRoutes := make([]routerx.Route, 0, len(routes))
for index, route := range routes {
baseRoutes = append(baseRoutes, routerx.Route{
Index: index,
Name: route.Name,
Match: route.Match,
Enabled: route.Sink != nil,
})
}
normalized := routerx.Normalize(baseRoutes)
result := make([]routeSnapshot, 0, len(normalized))
for _, item := range normalized {
route := routes[item.Index]
result = append(result, routeSnapshot{
name: item.Name,
match: item.Match,
formatter: route.Formatter,
sink: route.Sink,
})
}
return result
}

306
route_handler_test.go Normal file
View File

@ -0,0 +1,306 @@
package starlog
import (
"bytes"
"context"
"errors"
"io/ioutil"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
type messageOnlyFormatter struct{}
func (formatter *messageOnlyFormatter) Format(entry *Entry) ([]byte, error) {
if entry == nil {
return []byte{}, nil
}
return []byte(entry.Message + "\n"), nil
}
type failSink struct{}
func (sink *failSink) Write(data []byte) error {
_ = data
return errors.New("route sink write failed")
}
func (sink *failSink) Close() error {
return nil
}
type closeCountSink struct {
closeCount uint64
}
func (sink *closeCountSink) Write(data []byte) error {
_ = data
return nil
}
func (sink *closeCountSink) Close() error {
atomic.AddUint64(&sink.closeCount, 1)
return nil
}
type safeBuffer struct {
mu sync.Mutex
buf bytes.Buffer
}
func (buffer *safeBuffer) Write(p []byte) (int, error) {
buffer.mu.Lock()
defer buffer.mu.Unlock()
return buffer.buf.Write(p)
}
func (buffer *safeBuffer) String() string {
buffer.mu.Lock()
defer buffer.mu.Unlock()
return buffer.buf.String()
}
func waitFor(t *testing.T, timeout time.Duration, cond func() bool, reason string) {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if cond() {
return
}
time.Sleep(5 * time.Millisecond)
}
t.Fatalf("timeout waiting for condition: %s", reason)
}
func TestRouteHandlerSplitByLevel(t *testing.T) {
var totalBuf safeBuffer
var briefBuf safeBuffer
var errBuf safeBuffer
logger := newStructuredTestLogger(&totalBuf)
router := NewRouteHandler(
Route{
Name: "brief",
Match: MatchLevels(LvInfo, LvNotice),
Formatter: &messageOnlyFormatter{},
Sink: NewWriterSink(&briefBuf),
},
Route{
Name: "err",
Match: MatchLevels(LvError),
Formatter: &messageOnlyFormatter{},
Sink: NewWriterSink(&errBuf),
},
)
logger.AppendEntryHandler(router)
logger.Info("i1")
logger.Notice("n1")
logger.Error("e1")
waitFor(t, 300*time.Millisecond, func() bool {
return strings.Contains(briefBuf.String(), "i1") &&
strings.Contains(briefBuf.String(), "n1") &&
strings.Contains(errBuf.String(), "e1")
}, "route handler split outputs")
total := totalBuf.String()
if !strings.Contains(total, "i1") || !strings.Contains(total, "n1") || !strings.Contains(total, "e1") {
t.Fatalf("total log should keep all levels, got %q", total)
}
brief := briefBuf.String()
if !strings.Contains(brief, "i1") || !strings.Contains(brief, "n1") {
t.Fatalf("brief route should contain info+notice, got %q", brief)
}
if strings.Contains(brief, "e1") {
t.Fatalf("brief route should not contain error log, got %q", brief)
}
errLog := errBuf.String()
if !strings.Contains(errLog, "e1") {
t.Fatalf("err route should contain error log, got %q", errLog)
}
if strings.Contains(errLog, "i1") || strings.Contains(errLog, "n1") {
t.Fatalf("err route should only contain error log, got %q", errLog)
}
}
func TestRouteHandlerDynamicReplaceRoutes(t *testing.T) {
var totalBuf safeBuffer
var briefBuf safeBuffer
var errBuf safeBuffer
logger := newStructuredTestLogger(&totalBuf)
router := NewRouteHandler(
Route{
Name: "brief",
Match: MatchLevels(LvInfo),
Formatter: &messageOnlyFormatter{},
Sink: NewWriterSink(&briefBuf),
},
)
logger.SetEntryHandler(router)
logger.Info("brief-1")
waitFor(t, 300*time.Millisecond, func() bool {
return strings.Contains(briefBuf.String(), "brief-1")
}, "initial brief route output")
router.ReplaceRoutes(
Route{
Name: "err",
Match: MatchLevels(LvError),
Formatter: &messageOnlyFormatter{},
Sink: NewWriterSink(&errBuf),
},
)
logger.Info("brief-2")
logger.Error("err-1")
waitFor(t, 300*time.Millisecond, func() bool {
return strings.Contains(errBuf.String(), "err-1")
}, "replaced error route output")
brief := briefBuf.String()
if !strings.Contains(brief, "brief-1") {
t.Fatalf("brief route should keep previous matching log, got %q", brief)
}
if strings.Contains(brief, "brief-2") || strings.Contains(brief, "err-1") {
t.Fatalf("brief route should be replaced and stop receiving new logs, got %q", brief)
}
errLog := errBuf.String()
if !strings.Contains(errLog, "err-1") {
t.Fatalf("err route should receive error log after replace, got %q", errLog)
}
if strings.Contains(errLog, "brief-1") || strings.Contains(errLog, "brief-2") {
t.Fatalf("err route should not receive info logs, got %q", errLog)
}
}
func TestChainHandlerRunsAll(t *testing.T) {
var c1 uint64
var c2 uint64
handler := ChainHandler(
HandlerFunc(func(context.Context, *Entry) error {
atomic.AddUint64(&c1, 1)
return nil
}),
HandlerFunc(func(context.Context, *Entry) error {
atomic.AddUint64(&c2, 1)
return nil
}),
)
if err := handler.Handle(context.Background(), &Entry{}); err != nil {
t.Fatalf("chain handler should not return error, got %v", err)
}
if atomic.LoadUint64(&c1) != 1 || atomic.LoadUint64(&c2) != 1 {
t.Fatalf("all handlers should run once, got c1=%d c2=%d", c1, c2)
}
}
func TestRouteHandlerWriteErrorObservable(t *testing.T) {
resetAsyncMetricsForTest()
defer resetAsyncMetricsForTest()
var totalBuf safeBuffer
logger := newStructuredTestLogger(&totalBuf)
logger.SetEntryHandler(NewRouteHandler(
Route{
Name: "failed-route",
Match: MatchAllLevels(),
Formatter: &messageOnlyFormatter{},
Sink: &failSink{},
},
))
logger.Info("route write error")
waitFor(t, 300*time.Millisecond, func() bool {
return GetWriteErrorCount() > 0
}, "route sink write error observable")
}
func TestRouteHandlerCloseDeduplicatesSameSink(t *testing.T) {
sink := &closeCountSink{}
handler := NewRouteHandler(
Route{
Name: "r1",
Match: MatchLevels(LvInfo),
Formatter: &messageOnlyFormatter{},
Sink: sink,
},
Route{
Name: "r2",
Match: MatchLevels(LvError),
Formatter: &messageOnlyFormatter{},
Sink: sink,
},
)
if err := handler.Close(); err != nil {
t.Fatalf("route handler close failed: %v", err)
}
if atomic.LoadUint64(&sink.closeCount) != 1 {
t.Fatalf("same sink should be closed once, got %d", sink.closeCount)
}
}
func TestRouteHandlerWithRotatingFileSink(t *testing.T) {
var totalBuf safeBuffer
logger := newStructuredTestLogger(&totalBuf)
logger.SetShowStd(false)
logger.SetShowColor(false)
debugPath := filepath.Join(testBinDir(t), "debug.log")
debugSink, err := NewManagedRotateBySizeSink(
debugPath,
true,
128,
10*time.Millisecond,
RotateManageOptions{
MaxBackups: 5,
Pattern: "debug.*.log",
},
)
if err != nil {
t.Fatalf("create rotating sink failed: %v", err)
}
router := NewRouteHandler(
Route{
Name: "debug-info",
Match: MatchLevels(LvDebug, LvInfo),
Formatter: &messageOnlyFormatter{},
Sink: debugSink,
},
)
logger.SetEntryHandler(router)
for idx := 0; idx < 20; idx++ {
logger.Infof("debug payload %02d %s", idx, strings.Repeat("x", 24))
time.Sleep(3 * time.Millisecond)
}
waitFor(t, 2*time.Second, func() bool {
matches, _ := filepath.Glob(filepath.Join(filepath.Dir(debugPath), "debug.*.log"))
return len(matches) > 0
}, "rotating route sink archive creation")
content, readErr := ioutil.ReadFile(debugPath)
if readErr != nil {
t.Fatalf("read debug current log failed: %v", readErr)
}
if len(content) == 0 {
t.Fatalf("debug log should contain routed logs")
}
if err = logger.Close(); err != nil {
t.Fatalf("logger close failed: %v", err)
}
if err = debugSink.Write([]byte("after close")); !errors.Is(err, ErrRotatingFileSinkClosed) {
t.Fatalf("rotating sink should be closed by logger close, got %v", err)
}
}

865
sample_dedup.go Normal file
View File

@ -0,0 +1,865 @@
package starlog
import (
"strconv"
"strings"
"sync"
"time"
)
type SamplingScope int
const (
SamplingScopeGlobal SamplingScope = iota
SamplingScopeByKey
)
type SamplingDropData struct {
Time time.Time
Key string
Reason string
Level int
LevelName string
LoggerName string
Message string
Rate float64
Allowed uint64
Dropped uint64
CurrentKeys int
}
type SamplingStats struct {
Enabled bool
Rate float64
Scope SamplingScope
Allowed uint64
Dropped uint64
LastDropTime time.Time
LastDropKey string
LastReason string
CurrentKeys int
}
type SamplingConfig struct {
Enable bool
Levels []int
Rate float64
Scope SamplingScope
KeyFunc func(*Entry) string
MaxKeys int
KeyTTL time.Duration
OnDrop func(SamplingDropData)
}
type samplingBucket struct {
allowance float64
lastSeen time.Time
}
type sampler struct {
mu sync.Mutex
cfg SamplingConfig
limitedLevel map[int]struct{}
buckets map[string]*samplingBucket
nowFunc func() time.Time
allowedCount uint64
droppedCount uint64
lastDropTime time.Time
lastDropKey string
lastDropReason string
lastCleanupTime time.Time
}
func DefaultSamplingConfig() SamplingConfig {
return SamplingConfig{
Enable: false,
Levels: nil,
Rate: 1,
Scope: SamplingScopeGlobal,
KeyFunc: nil,
MaxKeys: 4096,
KeyTTL: 10 * time.Minute,
OnDrop: nil,
}
}
func cloneSamplingConfig(cfg SamplingConfig) SamplingConfig {
cloned := cfg
cloned.Levels = cloneIntSlice(cfg.Levels)
return cloned
}
func normalizeSamplingConfig(cfg SamplingConfig) SamplingConfig {
if cfg.Rate < 0 {
cfg.Rate = 0
}
if cfg.Rate > 1 {
cfg.Rate = 1
}
switch cfg.Scope {
case SamplingScopeGlobal, SamplingScopeByKey:
default:
cfg.Scope = SamplingScopeGlobal
}
if cfg.MaxKeys <= 0 {
cfg.MaxKeys = 4096
}
if cfg.KeyTTL <= 0 {
cfg.KeyTTL = 10 * time.Minute
}
return cloneSamplingConfig(cfg)
}
func newSampler() *sampler {
cfg := normalizeSamplingConfig(DefaultSamplingConfig())
return &sampler{
cfg: cfg,
limitedLevel: buildLevelSet(cfg.Levels),
buckets: make(map[string]*samplingBucket),
nowFunc: time.Now,
}
}
func (s *sampler) setNowFuncForTest(nowFunc func() time.Time) {
if s == nil {
return
}
s.mu.Lock()
if nowFunc == nil {
s.nowFunc = time.Now
} else {
s.nowFunc = nowFunc
}
s.mu.Unlock()
}
func (s *sampler) now() time.Time {
if s == nil || s.nowFunc == nil {
return time.Now()
}
return s.nowFunc()
}
func (s *sampler) SetConfig(cfg SamplingConfig) {
if s == nil {
return
}
normalized := normalizeSamplingConfig(cfg)
s.mu.Lock()
s.cfg = normalized
s.limitedLevel = buildLevelSet(normalized.Levels)
s.buckets = make(map[string]*samplingBucket)
s.lastCleanupTime = time.Time{}
s.mu.Unlock()
}
func (s *sampler) Config() SamplingConfig {
if s == nil {
return normalizeSamplingConfig(DefaultSamplingConfig())
}
s.mu.Lock()
defer s.mu.Unlock()
return cloneSamplingConfig(s.cfg)
}
func (s *sampler) Stats() SamplingStats {
if s == nil {
return SamplingStats{}
}
s.mu.Lock()
defer s.mu.Unlock()
return SamplingStats{
Enabled: s.cfg.Enable,
Rate: s.cfg.Rate,
Scope: s.cfg.Scope,
Allowed: s.allowedCount,
Dropped: s.droppedCount,
LastDropTime: s.lastDropTime,
LastDropKey: s.lastDropKey,
LastReason: s.lastDropReason,
CurrentKeys: len(s.buckets),
}
}
func (s *sampler) ResetStats() {
if s == nil {
return
}
s.mu.Lock()
s.allowedCount = 0
s.droppedCount = 0
s.lastDropTime = time.Time{}
s.lastDropKey = ""
s.lastDropReason = ""
s.mu.Unlock()
}
func (s *sampler) isLimitedLevel(level int) bool {
if len(s.limitedLevel) == 0 {
return true
}
_, ok := s.limitedLevel[level]
return ok
}
func (s *sampler) resolveKey(entry *Entry) string {
if s.cfg.Scope == SamplingScopeGlobal {
return "__global__"
}
if s.cfg.KeyFunc != nil {
key := strings.TrimSpace(s.cfg.KeyFunc(entry))
if key != "" {
return key
}
}
if entry == nil {
return "__empty__"
}
message := strings.TrimSpace(entry.Message)
if message == "" {
return strconv.Itoa(entry.Level)
}
return strconv.Itoa(entry.Level) + ":" + message
}
func (s *sampler) cleanupBucketsLocked(now time.Time) {
if s.cfg.Scope != SamplingScopeByKey || s.cfg.KeyTTL <= 0 {
return
}
if !s.lastCleanupTime.IsZero() && now.Sub(s.lastCleanupTime) < time.Second {
return
}
for key, bucket := range s.buckets {
if bucket == nil {
delete(s.buckets, key)
continue
}
if now.Sub(bucket.lastSeen) > s.cfg.KeyTTL {
delete(s.buckets, key)
}
}
s.lastCleanupTime = now
}
func (s *sampler) getBucketLocked(key string, now time.Time) *samplingBucket {
if bucket, ok := s.buckets[key]; ok && bucket != nil {
return bucket
}
if s.cfg.Scope == SamplingScopeByKey && s.cfg.MaxKeys > 0 && len(s.buckets) >= s.cfg.MaxKeys {
oldestKey := ""
oldestTime := now
for existingKey, bucket := range s.buckets {
if bucket == nil {
oldestKey = existingKey
break
}
if oldestKey == "" || bucket.lastSeen.Before(oldestTime) {
oldestKey = existingKey
oldestTime = bucket.lastSeen
}
}
if oldestKey != "" {
delete(s.buckets, oldestKey)
}
}
bucket := &samplingBucket{
allowance: 1,
lastSeen: now,
}
s.buckets[key] = bucket
return bucket
}
func (s *sampler) Allow(entry *Entry) bool {
if s == nil || entry == nil {
return true
}
now := s.now()
var callback func(SamplingDropData)
dropData := SamplingDropData{}
allow := true
s.mu.Lock()
if !s.cfg.Enable {
s.allowedCount++
s.mu.Unlock()
return true
}
if !s.isLimitedLevel(entry.Level) {
s.allowedCount++
s.mu.Unlock()
return true
}
if s.cfg.Rate >= 1 {
s.allowedCount++
s.mu.Unlock()
return true
}
key := s.resolveKey(entry)
if s.cfg.Rate <= 0 {
s.droppedCount++
s.lastDropTime = now
s.lastDropKey = key
s.lastDropReason = "sampling_rate_zero"
allow = false
dropData = SamplingDropData{
Time: now,
Key: key,
Reason: s.lastDropReason,
Level: entry.Level,
LevelName: entry.LevelName,
LoggerName: entry.LoggerName,
Message: entry.Message,
Rate: s.cfg.Rate,
Allowed: s.allowedCount,
Dropped: s.droppedCount,
CurrentKeys: len(s.buckets),
}
callback = s.cfg.OnDrop
s.mu.Unlock()
if callback != nil {
entryCopy := cloneEntryForDrop(entry)
dropData.Level = entryCopy.Level
dropData.LevelName = entryCopy.LevelName
dropData.LoggerName = entryCopy.LoggerName
dropData.Message = entryCopy.Message
func() {
defer func() {
recover()
}()
callback(dropData)
}()
}
return allow
}
s.cleanupBucketsLocked(now)
bucket := s.getBucketLocked(key, now)
if bucket.allowance >= 1 {
allow = true
bucket.allowance -= 1
s.allowedCount++
} else {
allow = false
s.droppedCount++
s.lastDropTime = now
s.lastDropKey = key
s.lastDropReason = "sampling_rate_exceeded"
dropData = SamplingDropData{
Time: now,
Key: key,
Reason: s.lastDropReason,
Level: entry.Level,
LevelName: entry.LevelName,
LoggerName: entry.LoggerName,
Message: entry.Message,
Rate: s.cfg.Rate,
Allowed: s.allowedCount,
Dropped: s.droppedCount,
CurrentKeys: len(s.buckets),
}
callback = s.cfg.OnDrop
}
bucket.allowance += s.cfg.Rate
if bucket.allowance > 1 {
bucket.allowance = 1
}
bucket.lastSeen = now
s.mu.Unlock()
if !allow && callback != nil {
entryCopy := cloneEntryForDrop(entry)
dropData.Level = entryCopy.Level
dropData.LevelName = entryCopy.LevelName
dropData.LoggerName = entryCopy.LoggerName
dropData.Message = entryCopy.Message
func() {
defer func() {
recover()
}()
callback(dropData)
}()
}
return allow
}
func (logger *starlog) allowBySampling(entry *Entry) bool {
if logger == nil || logger.sampler == nil {
return true
}
return logger.sampler.Allow(entry)
}
func (logger *StarLogger) SetSamplingConfig(cfg SamplingConfig) {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
if logger.logcore.sampler == nil {
logger.logcore.sampler = newSampler()
}
s := logger.logcore.sampler
logger.logcore.mu.Unlock()
s.SetConfig(cfg)
}
func (logger *StarLogger) GetSamplingConfig() SamplingConfig {
if logger == nil || logger.logcore == nil {
return normalizeSamplingConfig(DefaultSamplingConfig())
}
logger.logcore.mu.Lock()
s := logger.logcore.sampler
logger.logcore.mu.Unlock()
if s == nil {
return normalizeSamplingConfig(DefaultSamplingConfig())
}
return s.Config()
}
func (logger *StarLogger) EnableSampling(enable bool) {
cfg := logger.GetSamplingConfig()
cfg.Enable = enable
logger.SetSamplingConfig(cfg)
}
func (logger *StarLogger) SetSamplingDropHandler(handler func(SamplingDropData)) {
cfg := logger.GetSamplingConfig()
cfg.OnDrop = handler
logger.SetSamplingConfig(cfg)
}
func (logger *StarLogger) GetSamplingStats() SamplingStats {
if logger == nil || logger.logcore == nil {
return SamplingStats{}
}
logger.logcore.mu.Lock()
s := logger.logcore.sampler
logger.logcore.mu.Unlock()
if s == nil {
return SamplingStats{}
}
return s.Stats()
}
func (logger *StarLogger) ResetSamplingStats() {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
s := logger.logcore.sampler
logger.logcore.mu.Unlock()
if s == nil {
return
}
s.ResetStats()
}
type DedupScope int
const (
DedupScopeGlobal DedupScope = iota
DedupScopeByKey
)
type DedupDropData struct {
Time time.Time
Key string
Reason string
Level int
LevelName string
LoggerName string
Message string
Window time.Duration
Allowed uint64
Dropped uint64
CurrentKeys int
}
type DedupStats struct {
Enabled bool
Window time.Duration
Scope DedupScope
Allowed uint64
Dropped uint64
LastDropTime time.Time
LastDropKey string
LastReason string
CurrentKeys int
}
type DedupConfig struct {
Enable bool
Levels []int
Window time.Duration
Scope DedupScope
KeyFunc func(*Entry) string
MaxKeys int
KeyTTL time.Duration
OnDrop func(DedupDropData)
}
type dedupItem struct {
lastSeen time.Time
}
type deduper struct {
mu sync.Mutex
cfg DedupConfig
limitedLevel map[int]struct{}
items map[string]*dedupItem
nowFunc func() time.Time
allowedCount uint64
droppedCount uint64
lastDropTime time.Time
lastDropKey string
lastDropReason string
lastCleanupTime time.Time
}
func DefaultDedupConfig() DedupConfig {
return DedupConfig{
Enable: false,
Levels: nil,
Window: 2 * time.Second,
Scope: DedupScopeByKey,
KeyFunc: nil,
MaxKeys: 4096,
KeyTTL: 10 * time.Second,
OnDrop: nil,
}
}
func cloneDedupConfig(cfg DedupConfig) DedupConfig {
cloned := cfg
cloned.Levels = cloneIntSlice(cfg.Levels)
return cloned
}
func normalizeDedupConfig(cfg DedupConfig) DedupConfig {
if cfg.Window <= 0 {
cfg.Window = 2 * time.Second
}
switch cfg.Scope {
case DedupScopeGlobal, DedupScopeByKey:
default:
cfg.Scope = DedupScopeByKey
}
if cfg.MaxKeys <= 0 {
cfg.MaxKeys = 4096
}
if cfg.KeyTTL <= 0 {
cfg.KeyTTL = cfg.Window * 4
if cfg.KeyTTL < 10*time.Second {
cfg.KeyTTL = 10 * time.Second
}
}
return cloneDedupConfig(cfg)
}
func newDeduper() *deduper {
cfg := normalizeDedupConfig(DefaultDedupConfig())
return &deduper{
cfg: cfg,
limitedLevel: buildLevelSet(cfg.Levels),
items: make(map[string]*dedupItem),
nowFunc: time.Now,
}
}
func (d *deduper) setNowFuncForTest(nowFunc func() time.Time) {
if d == nil {
return
}
d.mu.Lock()
if nowFunc == nil {
d.nowFunc = time.Now
} else {
d.nowFunc = nowFunc
}
d.mu.Unlock()
}
func (d *deduper) now() time.Time {
if d == nil || d.nowFunc == nil {
return time.Now()
}
return d.nowFunc()
}
func (d *deduper) SetConfig(cfg DedupConfig) {
if d == nil {
return
}
normalized := normalizeDedupConfig(cfg)
d.mu.Lock()
d.cfg = normalized
d.limitedLevel = buildLevelSet(normalized.Levels)
d.items = make(map[string]*dedupItem)
d.lastCleanupTime = time.Time{}
d.mu.Unlock()
}
func (d *deduper) Config() DedupConfig {
if d == nil {
return normalizeDedupConfig(DefaultDedupConfig())
}
d.mu.Lock()
defer d.mu.Unlock()
return cloneDedupConfig(d.cfg)
}
func (d *deduper) Stats() DedupStats {
if d == nil {
return DedupStats{}
}
d.mu.Lock()
defer d.mu.Unlock()
return DedupStats{
Enabled: d.cfg.Enable,
Window: d.cfg.Window,
Scope: d.cfg.Scope,
Allowed: d.allowedCount,
Dropped: d.droppedCount,
LastDropTime: d.lastDropTime,
LastDropKey: d.lastDropKey,
LastReason: d.lastDropReason,
CurrentKeys: len(d.items),
}
}
func (d *deduper) ResetStats() {
if d == nil {
return
}
d.mu.Lock()
d.allowedCount = 0
d.droppedCount = 0
d.lastDropTime = time.Time{}
d.lastDropKey = ""
d.lastDropReason = ""
d.mu.Unlock()
}
func (d *deduper) isLimitedLevel(level int) bool {
if len(d.limitedLevel) == 0 {
return true
}
_, ok := d.limitedLevel[level]
return ok
}
func (d *deduper) resolveKey(entry *Entry) string {
if d.cfg.Scope == DedupScopeGlobal {
return "__global__"
}
if d.cfg.KeyFunc != nil {
key := strings.TrimSpace(d.cfg.KeyFunc(entry))
if key != "" {
return key
}
}
if entry == nil {
return "__empty__"
}
message := strings.TrimSpace(entry.Message)
if message == "" {
return strconv.Itoa(entry.Level)
}
return strconv.Itoa(entry.Level) + ":" + message
}
func (d *deduper) cleanupItemsLocked(now time.Time) {
if d.cfg.Scope != DedupScopeByKey || d.cfg.KeyTTL <= 0 {
return
}
if !d.lastCleanupTime.IsZero() && now.Sub(d.lastCleanupTime) < time.Second {
return
}
for key, item := range d.items {
if item == nil {
delete(d.items, key)
continue
}
if now.Sub(item.lastSeen) > d.cfg.KeyTTL {
delete(d.items, key)
}
}
d.lastCleanupTime = now
}
func (d *deduper) getItemLocked(key string, now time.Time) *dedupItem {
if item, ok := d.items[key]; ok && item != nil {
return item
}
if d.cfg.Scope == DedupScopeByKey && d.cfg.MaxKeys > 0 && len(d.items) >= d.cfg.MaxKeys {
oldestKey := ""
oldestTime := now
for existingKey, item := range d.items {
if item == nil {
oldestKey = existingKey
break
}
if oldestKey == "" || item.lastSeen.Before(oldestTime) {
oldestKey = existingKey
oldestTime = item.lastSeen
}
}
if oldestKey != "" {
delete(d.items, oldestKey)
}
}
item := &dedupItem{}
d.items[key] = item
return item
}
func (d *deduper) Allow(entry *Entry) bool {
if d == nil || entry == nil {
return true
}
now := d.now()
var callback func(DedupDropData)
dropData := DedupDropData{}
allow := true
d.mu.Lock()
if !d.cfg.Enable {
d.allowedCount++
d.mu.Unlock()
return true
}
if !d.isLimitedLevel(entry.Level) {
d.allowedCount++
d.mu.Unlock()
return true
}
key := d.resolveKey(entry)
d.cleanupItemsLocked(now)
item := d.getItemLocked(key, now)
if !item.lastSeen.IsZero() && now.Sub(item.lastSeen) < d.cfg.Window {
item.lastSeen = now
d.droppedCount++
d.lastDropTime = now
d.lastDropKey = key
d.lastDropReason = "dedup_window"
allow = false
dropData = DedupDropData{
Time: now,
Key: key,
Reason: d.lastDropReason,
Level: entry.Level,
LevelName: entry.LevelName,
LoggerName: entry.LoggerName,
Message: entry.Message,
Window: d.cfg.Window,
Allowed: d.allowedCount,
Dropped: d.droppedCount,
CurrentKeys: len(d.items),
}
callback = d.cfg.OnDrop
} else {
item.lastSeen = now
d.allowedCount++
}
d.mu.Unlock()
if !allow && callback != nil {
entryCopy := cloneEntryForDrop(entry)
dropData.Level = entryCopy.Level
dropData.LevelName = entryCopy.LevelName
dropData.LoggerName = entryCopy.LoggerName
dropData.Message = entryCopy.Message
func() {
defer func() {
recover()
}()
callback(dropData)
}()
}
return allow
}
func (logger *starlog) allowByDedup(entry *Entry) bool {
if logger == nil || logger.deduper == nil {
return true
}
return logger.deduper.Allow(entry)
}
func (logger *StarLogger) SetDedupConfig(cfg DedupConfig) {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
if logger.logcore.deduper == nil {
logger.logcore.deduper = newDeduper()
}
d := logger.logcore.deduper
logger.logcore.mu.Unlock()
d.SetConfig(cfg)
}
func (logger *StarLogger) GetDedupConfig() DedupConfig {
if logger == nil || logger.logcore == nil {
return normalizeDedupConfig(DefaultDedupConfig())
}
logger.logcore.mu.Lock()
d := logger.logcore.deduper
logger.logcore.mu.Unlock()
if d == nil {
return normalizeDedupConfig(DefaultDedupConfig())
}
return d.Config()
}
func (logger *StarLogger) EnableDedup(enable bool) {
cfg := logger.GetDedupConfig()
cfg.Enable = enable
logger.SetDedupConfig(cfg)
}
func (logger *StarLogger) SetDedupDropHandler(handler func(DedupDropData)) {
cfg := logger.GetDedupConfig()
cfg.OnDrop = handler
logger.SetDedupConfig(cfg)
}
func (logger *StarLogger) GetDedupStats() DedupStats {
if logger == nil || logger.logcore == nil {
return DedupStats{}
}
logger.logcore.mu.Lock()
d := logger.logcore.deduper
logger.logcore.mu.Unlock()
if d == nil {
return DedupStats{}
}
return d.Stats()
}
func (logger *StarLogger) ResetDedupStats() {
if logger == nil || logger.logcore == nil {
return
}
logger.logcore.mu.Lock()
d := logger.logcore.deduper
logger.logcore.mu.Unlock()
if d == nil {
return
}
d.ResetStats()
}

109
sample_dedup_test.go Normal file
View File

@ -0,0 +1,109 @@
package starlog
import (
"bytes"
"sync/atomic"
"testing"
"time"
)
func TestSamplingRateDeterministic(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetSamplingConfig(SamplingConfig{
Enable: true,
Levels: []int{LvInfo},
Rate: 0.5,
Scope: SamplingScopeGlobal,
})
for idx := 0; idx < 5; idx++ {
logger.Infoln("sample")
}
stats := logger.GetSamplingStats()
if stats.Allowed != 3 || stats.Dropped != 2 {
t.Fatalf("unexpected sampling stats: %+v", stats)
}
if got := bytes.Count(buf.Bytes(), []byte("sample")); got != 3 {
t.Fatalf("expected 3 sampled logs in output, got %d", got)
}
}
func TestSamplingDropCallback(t *testing.T) {
logger := newStructuredTestLogger(&bytes.Buffer{})
var callbackCount uint64
logger.SetSamplingConfig(SamplingConfig{
Enable: true,
Levels: []int{LvInfo},
Rate: 0,
Scope: SamplingScopeGlobal,
OnDrop: func(data SamplingDropData) {
if data.Reason == "" {
t.Errorf("drop callback reason should not be empty")
}
atomic.AddUint64(&callbackCount, 1)
},
})
logger.Infoln("drop-all")
logger.Infoln("drop-all")
if atomic.LoadUint64(&callbackCount) != 2 {
t.Fatalf("sampling drop callback should be called twice, got %d", callbackCount)
}
}
func TestDedupWindow(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetDedupConfig(DedupConfig{
Enable: true,
Levels: []int{LvInfo},
Window: time.Hour,
Scope: DedupScopeByKey,
})
logger.Infoln("dup")
logger.Infoln("dup")
logger.Infoln("dup")
logger.Infoln("other")
stats := logger.GetDedupStats()
if stats.Allowed != 2 || stats.Dropped != 2 {
t.Fatalf("unexpected dedup stats: %+v", stats)
}
if got := bytes.Count(buf.Bytes(), []byte("dup")); got != 1 {
t.Fatalf("expected one deduped dup log in output, got %d", got)
}
if got := bytes.Count(buf.Bytes(), []byte("other")); got != 1 {
t.Fatalf("expected one other log in output, got %d", got)
}
}
func TestSamplingDedupConfigSnapshotApply(t *testing.T) {
logger := newStructuredTestLogger(&bytes.Buffer{})
cfg := logger.GetConfig()
cfg.Sampling = SamplingConfig{
Enable: true,
Levels: []int{LvInfo},
Rate: 0.25,
Scope: SamplingScopeByKey,
}
cfg.Dedup = DedupConfig{
Enable: true,
Levels: []int{LvWarning},
Window: 3 * time.Second,
Scope: DedupScopeByKey,
}
logger.ApplyConfig(cfg)
samplingCfg := logger.GetSamplingConfig()
if !samplingCfg.Enable || samplingCfg.Rate != 0.25 || samplingCfg.Scope != SamplingScopeByKey {
t.Fatalf("unexpected sampling config after ApplyConfig: %+v", samplingCfg)
}
dedupCfg := logger.GetDedupConfig()
if !dedupCfg.Enable || dedupCfg.Window != 3*time.Second || dedupCfg.Scope != DedupScopeByKey {
t.Fatalf("unexpected dedup config after ApplyConfig: %+v", dedupCfg)
}
}

75
scripts/test-local.ps1 Normal file
View File

@ -0,0 +1,75 @@
param(
[switch]$SkipRace,
[switch]$SkipFuzz
)
$ErrorActionPreference = "Stop"
if (Get-Variable -Name PSNativeCommandUseErrorActionPreference -ErrorAction SilentlyContinue) {
$PSNativeCommandUseErrorActionPreference = $false
}
function Invoke-Go {
param(
[string[]]$GoArgs,
[switch]$CaptureOutput
)
if ($CaptureOutput) {
$output = & go @GoArgs 2>&1
return @{
Code = $LASTEXITCODE
Output = $output
}
}
& go @GoArgs
if ($LASTEXITCODE -ne 0) {
throw "go $(($GoArgs) -join ' ') failed with exit code $LASTEXITCODE"
}
}
function Run-Step {
param(
[string]$Name,
[scriptblock]$Script
)
Write-Host "==> $Name"
& $Script
Write-Host "OK: $Name"
}
Run-Step "Unit Tests" {
Invoke-Go -GoArgs @("test", "./...")
}
if (-not $SkipRace) {
Write-Host "==> Race Precheck"
$raceResult = Invoke-Go -GoArgs @("test", "-race", "fmt") -CaptureOutput
if ($raceResult.Code -ne 0) {
$raceText = ($raceResult.Output | Out-String)
if ($raceText -match "runtime/race: package testmain: cannot find package") {
Write-Warning "Race environment issue detected: runtime/race cannot build testmain."
Write-Warning "Skip race on this machine. CI race-linux job remains the source of truth."
} else {
Write-Output $raceText
throw "Race precheck failed."
}
} else {
Run-Step "Race Tests" {
Invoke-Go -GoArgs @("test", "-race", "./...")
}
}
}
Run-Step "Benchmark Smoke" {
Invoke-Go -GoArgs @("test", ".", "-run", "^$", "-bench", "Benchmark", "-benchmem", "-benchtime=100x")
}
if (-not $SkipFuzz) {
Run-Step "Fuzz Smoke (Text/JSON)" {
Invoke-Go -GoArgs @("test", ".", "-run", "^$", "-fuzz=FuzzTextAndJSONFormatter", "-fuzztime=2s")
}
Run-Step "Fuzz Smoke (Keyword)" {
Invoke-Go -GoArgs @("test", ".", "-run", "^$", "-fuzz=FuzzKeywordHighlight", "-fuzztime=2s")
}
}
Write-Host "All selected checks completed."

149
stacks.go
View File

@ -1,149 +1,20 @@
package starlog
import (
"errors"
"io"
"os"
"sync"
"sync/atomic"
import "b612.me/starlog/internal/runtimex"
var (
errStackClosed = runtimex.ErrStackClosed
errStackFull = runtimex.ErrStackFull
)
type starMapKV struct {
kvMap map[interface{}]interface{}
mu sync.RWMutex
}
type starMapKV = runtimex.MapKV
type starChanStack = runtimex.ChanStack
func newStarMap() starMapKV {
var mp starMapKV
mp.kvMap = make(map[interface{}]interface{})
return mp
}
func (m *starMapKV) Get(key interface{}) (interface{}, error) {
var err error
m.mu.RLock()
defer m.mu.RUnlock()
data, ok := m.kvMap[key]
if !ok {
err = os.ErrNotExist
}
return data, err
}
func (m *starMapKV) MustGet(key interface{}) interface{} {
result, _ := m.Get(key)
return result
}
func (m *starMapKV) Store(key interface{}, value interface{}) error {
m.mu.Lock()
defer m.mu.Unlock()
m.kvMap[key] = value
return nil
}
func (m *starMapKV) Exists(key interface{}) bool {
m.mu.RLock()
defer m.mu.RUnlock()
_, ok := m.kvMap[key]
return ok
}
func (m *starMapKV) Delete(key interface{}) error {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.kvMap, key)
return nil
}
func (m *starMapKV) Range(run func(k interface{}, v interface{}) bool) error {
for k, v := range m.kvMap {
if !run(k, v) {
break
}
}
return nil
}
type starChanStack struct {
data chan interface{}
cap uint64
current uint64
isClose atomic.Value
return runtimex.NewMapKV()
}
func newStarChanStack(cap uint64) *starChanStack {
rtnBuffer := new(starChanStack)
rtnBuffer.cap = cap
rtnBuffer.isClose.Store(false)
rtnBuffer.data = make(chan interface{}, cap)
return rtnBuffer
}
func (s *starChanStack) init() {
s.cap = 1024
s.data = make(chan interface{}, s.cap)
s.isClose.Store(false)
}
func (s *starChanStack) Free() uint64 {
return s.cap - s.current
}
func (s *starChanStack) Cap() uint64 {
return s.cap
}
func (s *starChanStack) Len() uint64 {
return s.current
}
func (s *starChanStack) Pop() (interface{}, error) {
if s.isClose.Load() == nil {
s.init()
}
if s.isClose.Load().(bool) {
return 0, io.EOF
}
data, ok := <-s.data
if !ok {
s.isClose.Store(true)
return 0, errors.New("channel read error")
}
for {
current := atomic.LoadUint64(&s.current)
if atomic.CompareAndSwapUint64(&s.current, current, current-1) {
break
}
}
return data, nil
}
func (s *starChanStack) Push(data interface{}) error {
defer func() {
recover()
}()
if s.isClose.Load() == nil {
s.init()
}
if s.isClose.Load().(bool) {
return io.EOF
}
s.data <- data
for {
current := atomic.LoadUint64(&s.current)
if atomic.CompareAndSwapUint64(&s.current, current, current+1) {
break
}
}
return nil
}
func (s *starChanStack) Close() error {
if s.isClose.Load() == nil {
s.init()
}
s.isClose.Store(true)
close(s.data)
return nil
return runtimex.NewChanStack(cap)
}

View File

@ -1,8 +1,10 @@
package starlog
import (
"context"
"fmt"
"io"
"log"
"math/rand"
"sync"
"time"
@ -13,7 +15,6 @@ var stdmu sync.Mutex
func init() {
rand.Seed(time.Now().UnixNano())
stackStopChan = make(chan int)
Std = NewStarlog(nil)
}
@ -33,6 +34,360 @@ func GetLevelColor(level int) []Attr {
return Std.GetLevelColor(level)
}
func SetLevel(level int) {
Std.SetLevel(level)
}
func GetLevel() int {
return Std.GetLevel()
}
func IsLevelEnabled(level int) bool {
return Std.IsLevelEnabled(level)
}
func SetColorMode(mode ColorMode) {
Std.SetColorMode(mode)
}
func GetColorMode() ColorMode {
return Std.GetColorMode()
}
func SetKeywordColor(keyword string, color []Attr) {
Std.SetKeywordColor(keyword, color)
}
func RemoveKeywordColor(keyword string) {
Std.RemoveKeywordColor(keyword)
}
func SetKeywordColors(colors map[string][]Attr) {
Std.SetKeywordColors(colors)
}
func GetKeywordColors() map[string][]Attr {
return Std.GetKeywordColors()
}
func ClearKeywordColors() {
Std.ClearKeywordColors()
}
func SetKeywordMatchOptions(opts KeywordMatchOptions) {
Std.SetKeywordMatchOptions(opts)
}
func GetKeywordMatchOptions() KeywordMatchOptions {
return Std.GetKeywordMatchOptions()
}
func SetKeywordIgnoreCase(enable bool) {
Std.SetKeywordIgnoreCase(enable)
}
func GetKeywordIgnoreCase() bool {
return Std.GetKeywordIgnoreCase()
}
func SetKeywordWholeWord(enable bool) {
Std.SetKeywordWholeWord(enable)
}
func GetKeywordWholeWord() bool {
return Std.GetKeywordWholeWord()
}
func ApplyKeywordPreset(preset KeywordPreset) {
Std.ApplyKeywordPreset(preset)
}
func MergeKeywordPreset(preset KeywordPreset) {
Std.MergeKeywordPreset(preset)
}
func SetShowFieldColor(show bool) {
Std.SetShowFieldColor(show)
}
func GetShowFieldColor() bool {
return Std.GetShowFieldColor()
}
func SetFieldKeyColor(color []Attr) {
Std.SetFieldKeyColor(color)
}
func GetFieldKeyColor() []Attr {
return Std.GetFieldKeyColor()
}
func SetFieldTypeColor(fieldType string, color []Attr) {
Std.SetFieldTypeColor(fieldType, color)
}
func GetFieldTypeColors() map[string][]Attr {
return Std.GetFieldTypeColors()
}
func SetFieldValueColor(field string, color []Attr) {
Std.SetFieldValueColor(field, color)
}
func RemoveFieldValueColor(field string) {
Std.RemoveFieldValueColor(field)
}
func ClearFieldValueColors() {
Std.ClearFieldValueColors()
}
func GetFieldValueColors() map[string][]Attr {
return Std.GetFieldValueColors()
}
func WithField(key string, value interface{}) *StarLogger {
return Std.WithField(key, value)
}
func WithFields(fields Fields) *StarLogger {
return Std.WithFields(fields)
}
func WithError(err error) *StarLogger {
return Std.WithError(err)
}
func WithContext(ctx context.Context) *StarLogger {
return Std.WithContext(ctx)
}
func GetConfig() Config {
return Std.GetConfig()
}
func ApplyConfig(cfg Config) {
Std.ApplyConfig(cfg)
}
func UpdateConfig(update func(*Config)) {
Std.UpdateConfig(update)
}
func ApplyProductionConfig() {
Std.ApplyProductionConfig()
}
func ApplyDevelopmentConfig() {
Std.ApplyDevelopmentConfig()
}
func Flush() error {
return Std.Flush()
}
func Sync() error {
return Std.Sync()
}
// Shutdown gracefully drains async handlers and closes Std resources.
func Shutdown(ctx context.Context) error {
return Std.Shutdown(ctx)
}
// Deprecated: use Shutdown(ctx) for graceful exit.
func CloseStd() error {
return Std.Close()
}
func ShutdownStd(ctx context.Context) error {
return Shutdown(ctx)
}
func SetRateLimitConfig(cfg RateLimitConfig) {
Std.SetRateLimitConfig(cfg)
}
func GetRateLimitConfig() RateLimitConfig {
return Std.GetRateLimitConfig()
}
func EnableRateLimit(enable bool) {
Std.EnableRateLimit(enable)
}
func SetRateLimitDropHandler(handler func(RateLimitDropData)) {
Std.SetRateLimitDropHandler(handler)
}
func GetRateLimitStats() RateLimitStats {
return Std.GetRateLimitStats()
}
func ResetRateLimitStats() {
Std.ResetRateLimitStats()
}
func SetSamplingConfig(cfg SamplingConfig) {
Std.SetSamplingConfig(cfg)
}
func GetSamplingConfig() SamplingConfig {
return Std.GetSamplingConfig()
}
func EnableSampling(enable bool) {
Std.EnableSampling(enable)
}
func SetSamplingDropHandler(handler func(SamplingDropData)) {
Std.SetSamplingDropHandler(handler)
}
func GetSamplingStats() SamplingStats {
return Std.GetSamplingStats()
}
func ResetSamplingStats() {
Std.ResetSamplingStats()
}
func SetDedupConfig(cfg DedupConfig) {
Std.SetDedupConfig(cfg)
}
func GetDedupConfig() DedupConfig {
return Std.GetDedupConfig()
}
func EnableDedup(enable bool) {
Std.EnableDedup(enable)
}
func SetDedupDropHandler(handler func(DedupDropData)) {
Std.SetDedupDropHandler(handler)
}
func GetDedupStats() DedupStats {
return Std.GetDedupStats()
}
func ResetDedupStats() {
Std.ResetDedupStats()
}
func GetMetricsSnapshot() MetricsSnapshot {
return Std.GetMetricsSnapshot()
}
func NewStdTestHook() *TestHook {
return NewTestHook(Std)
}
func SetContextFieldExtractor(extractor func(context.Context) Fields) {
Std.SetContextFieldExtractor(extractor)
}
func DebugContext(ctx context.Context, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.DebugContext(ctx, str...)
Std.isStd = false
}
func InfoContext(ctx context.Context, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.InfoContext(ctx, str...)
Std.isStd = false
}
func NoticeContext(ctx context.Context, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.NoticeContext(ctx, str...)
Std.isStd = false
}
func WarningContext(ctx context.Context, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.WarningContext(ctx, str...)
Std.isStd = false
}
func ErrorContext(ctx context.Context, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.ErrorContext(ctx, str...)
Std.isStd = false
}
func CriticalContext(ctx context.Context, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.CriticalContext(ctx, str...)
Std.isStd = false
}
func LogContext(ctx context.Context, isShow bool, level int, str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.LogContext(ctx, isShow, level, str...)
Std.isStd = false
}
func SetRedactor(redactor Redactor) {
Std.SetRedactor(redactor)
}
func GetRedactor() Redactor {
return Std.GetRedactor()
}
func AddRedactRule(rule RedactRule) {
Std.AddRedactRule(rule)
}
func SetRedactRules(rules []RedactRule) {
Std.SetRedactRules(rules)
}
func ClearRedactRules() {
Std.ClearRedactRules()
}
func GetRedactRuleCount() int {
return Std.GetRedactRuleCount()
}
func SetRedactFailMode(mode RedactFailMode) {
Std.SetRedactFailMode(mode)
}
func GetRedactFailMode() RedactFailMode {
return Std.GetRedactFailMode()
}
func SetRedactMaskToken(mask string) {
Std.SetRedactMaskToken(mask)
}
func GetRedactMaskToken() string {
return Std.GetRedactMaskToken()
}
func GetRedactErrorCount() uint64 {
return Std.GetRedactErrorCount()
}
func Debug(str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
@ -197,7 +552,7 @@ func Panicln(str ...interface{}) {
stdmu.Lock()
defer stdmu.Unlock()
Std.isStd = true
Std.Fatalln(str...)
Std.Panicln(str...)
Std.isStd = false
}
@ -259,12 +614,101 @@ func GetWriter() io.Writer {
return Std.GetWriter()
}
func AsWriter(level int) io.Writer {
return Std.AsWriter(level)
}
func AsWriterWithOptions(level int, opts ...StdlibBridgeOption) io.Writer {
return Std.AsWriterWithOptions(level, opts...)
}
func AsStdlibLogger(level int) *log.Logger {
return Std.AsStdlibLogger(level)
}
func AsStdlibLoggerWithOptions(level int, opts ...StdlibBridgeOption) *log.Logger {
return Std.AsStdlibLoggerWithOptions(level, opts...)
}
func SetSink(sink Sink) {
Std.SetSink(sink)
}
func SetSinks(sinks ...Sink) {
Std.SetSinks(sinks...)
}
func GetSink() Sink {
return Std.GetSink()
}
func SetFormatter(formatter Formatter) {
Std.SetFormatter(formatter)
}
func GetFormatter() Formatter {
return Std.GetFormatter()
}
func SetEntryHandler(handler Handler) {
Std.SetEntryHandler(handler)
}
func GetEntryHandler() Handler {
return Std.GetEntryHandler()
}
func AppendEntryHandler(handler Handler) {
Std.AppendEntryHandler(handler)
}
func SetHandler(f func(LogData)) {
Std.SetHandler(f)
}
func GetHandler() func(LogData) {
return Std.GetHandler()
}
func SetPendingWriteLimit(limit int) {
Std.SetPendingWriteLimit(limit)
}
func GetPendingWriteLimit() int {
return Std.GetPendingWriteLimit()
}
func SetPendingDropPolicy(policy PendingDropPolicy) {
Std.SetPendingDropPolicy(policy)
}
func GetPendingDropPolicy() PendingDropPolicy {
return Std.GetPendingDropPolicy()
}
func GetPendingDropCount() uint64 {
return Std.GetPendingDropCount()
}
func GetPendingBlockCount() uint64 {
return Std.GetPendingBlockCount()
}
func GetPendingPeakLength() int {
return Std.GetPendingPeakLength()
}
func GetPendingStats() PendingStats {
return Std.GetPendingStats()
}
func SetEntryHandlerTimeout(timeout time.Duration) {
Std.SetEntryHandlerTimeout(timeout)
}
func GetEntryHandlerTimeout() time.Duration {
return Std.GetEntryHandlerTimeout()
}
func SetSwitching(sw bool) {
Std.SetSwitching(sw)
}
@ -278,11 +722,11 @@ func GetShowOriginFile() bool {
}
func SetShowFuncName(val bool) {
Std.logcore.showFuncName = val
Std.SetShowFuncName(val)
}
func GetShowFuncName() bool {
return Std.logcore.showFuncName
return Std.GetShowFuncName()
}
func SetShowLevel(val bool) {
@ -309,14 +753,32 @@ func GetShowStd() bool {
return Std.GetShowStd()
}
func SetAutoAppendNewline(enable bool) {
Std.SetAutoAppendNewline(enable)
}
func GetAutoAppendNewline() bool {
return Std.GetAutoAppendNewline()
}
func StopWrite() {
Std.StopWrite()
}
func EnableWrite() {
Std.EnableWrite()
}
func IsWriteStopped() bool {
return Std.IsWriteStopped()
}
// Deprecated: use EnableWrite.
func EnbaleWrite() {
Std.EnbaleWrite()
}
// Deprecated: use IsWriteStopped.
func IsWriteStoed() bool {
return Std.IsWriteStoed()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,35 @@
package starlog
import (
"bytes"
"strings"
"sync"
"testing"
"time"
)
func Test_LOG(t *testing.T) {
go Debugln("nb")
Debugln("nb2")
time.Sleep(time.Second)
var buf bytes.Buffer
logger := NewStarlog(&buf)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
var wait sync.WaitGroup
wait.Add(2)
go func() {
defer wait.Done()
logger.Debugln("nb")
}()
go func() {
defer wait.Done()
logger.Debugln("nb2")
}()
wait.Wait()
logStr := buf.String()
if !strings.Contains(logStr, "nb") || !strings.Contains(logStr, "nb2") {
t.Fatalf("expected both logs in output, got %q", logStr)
}
}

118
stdlib_bridge.go Normal file
View File

@ -0,0 +1,118 @@
package starlog
import (
"errors"
"io"
"log"
"b612.me/starlog/internal/stdlibx"
)
type StdlibLevelMapper = stdlibx.LevelMapper
type StdlibBridgeOptions = stdlibx.Options
type StdlibBridgeOption = stdlibx.Option
func DefaultStdlibBridgeOptions() StdlibBridgeOptions { return stdlibx.DefaultOptions() }
func WithStdlibPrefix(prefix string) StdlibBridgeOption {
return stdlibx.WithPrefix(prefix)
}
func WithStdlibFlags(flags int) StdlibBridgeOption {
return stdlibx.WithFlags(flags)
}
func WithStdlibShowStd(show bool) StdlibBridgeOption {
return stdlibx.WithShowStd(show)
}
func WithStdlibTrimNewline(trim bool) StdlibBridgeOption {
return stdlibx.WithTrimNewline(trim)
}
func WithStdlibLevelMapper(mapper StdlibLevelMapper) StdlibBridgeOption {
return stdlibx.WithLevelMapper(mapper)
}
func normalizeStdlibBridgeOptions(opts []StdlibBridgeOption) StdlibBridgeOptions {
return stdlibx.NormalizeOptions(opts)
}
type LevelWriter struct {
logger *StarLogger
core *stdlibx.Writer
}
func NewLevelWriter(logger *StarLogger, level int) *LevelWriter {
return NewLevelWriterWithOptions(logger, level)
}
func NewLevelWriterWithOptions(logger *StarLogger, level int, opts ...StdlibBridgeOption) *LevelWriter {
options := normalizeStdlibBridgeOptions(opts)
var emit stdlibx.EmitFunc
if logger != nil {
emit = func(mappedLevel int, showStd bool, text string) {
logger.emit(mappedLevel, showStd, text)
}
}
return &LevelWriter{
logger: logger,
core: stdlibx.NewWriter(level, options, emit),
}
}
func NewLevelWriterBridge(logger *StarLogger, level int) io.Writer {
return NewLevelWriter(logger, level)
}
func NewStdlibLogger(logger *StarLogger, level int) *log.Logger {
return NewStdlibLoggerWithOptions(logger, level)
}
func NewStdlibLoggerWithOptions(logger *StarLogger, level int, opts ...StdlibBridgeOption) *log.Logger {
options := normalizeStdlibBridgeOptions(opts)
if logger == nil {
return log.New(io.Discard, options.Prefix, options.Flags)
}
return logger.AsStdlibLoggerWithOptions(level, opts...)
}
func (writer *LevelWriter) SetShowStd(show bool) {
if writer == nil || writer.core == nil {
return
}
writer.core.SetShowStd(show)
}
func (writer *LevelWriter) SetTrimNewline(trim bool) {
if writer == nil || writer.core == nil {
return
}
writer.core.SetTrimNewline(trim)
}
func (writer *LevelWriter) Write(data []byte) (int, error) {
if writer == nil || writer.logger == nil || writer.core == nil {
return 0, errors.New("level writer logger is nil")
}
return writer.core.Write(data)
}
func (logger *StarLogger) AsWriter(level int) io.Writer {
return logger.AsWriterWithOptions(level)
}
func (logger *StarLogger) AsWriterWithOptions(level int, opts ...StdlibBridgeOption) io.Writer {
return NewLevelWriterWithOptions(logger, level, opts...)
}
func (logger *StarLogger) AsStdlibLogger(level int) *log.Logger {
return logger.AsStdlibLoggerWithOptions(level)
}
func (logger *StarLogger) AsStdlibLoggerWithOptions(level int, opts ...StdlibBridgeOption) *log.Logger {
options := normalizeStdlibBridgeOptions(opts)
return log.New(logger.AsWriterWithOptions(level, opts...), options.Prefix, options.Flags)
}

101
stdlib_bridge_test.go Normal file
View File

@ -0,0 +1,101 @@
package starlog
import (
"bytes"
"log"
"strings"
"testing"
)
func TestAsWriterBridge(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
writer := logger.AsWriter(LvInfo)
n, err := writer.Write([]byte("bridge writer\n"))
if err != nil {
t.Fatalf("writer bridge returned error: %v", err)
}
if n != len("bridge writer\n") {
t.Fatalf("writer bridge returned wrong length: %d", n)
}
got := buf.String()
if !strings.Contains(got, "bridge writer") {
t.Fatalf("bridge writer output missing message, got %q", got)
}
}
func TestAsStdlibLoggerBridge(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
std := logger.AsStdlibLogger(LvError)
std.Print("stdlib logger line")
got := buf.String()
if !strings.Contains(got, "stdlib logger line") {
t.Fatalf("stdlib logger bridge output missing message, got %q", got)
}
}
func TestAsStdlibLoggerWithOptionsPrefixAndFlags(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetShowLevel(false)
logger.SetShowFlag(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
std := logger.AsStdlibLoggerWithOptions(LvInfo,
WithStdlibPrefix("SDK "),
WithStdlibFlags(log.Lmsgprefix),
)
std.Print("line")
got := buf.String()
if !strings.Contains(got, "SDK line") {
t.Fatalf("stdlib logger with prefix/flags should include configured prefix, got %q", got)
}
}
func TestAsWriterWithOptionsLevelMapper(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetShowLevel(true)
logger.SetShowFlag(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
writer := logger.AsWriterWithOptions(LvInfo,
WithStdlibLevelMapper(func(text string, fallbackLevel int) int {
if strings.Contains(text, "[ERR]") {
return LvError
}
return fallbackLevel
}),
)
_, err := writer.Write([]byte("[ERR] dynamic level"))
if err != nil {
t.Fatalf("AsWriterWithOptions returned error: %v", err)
}
got := buf.String()
if !strings.Contains(got, "[ERROR]") {
t.Fatalf("level mapper should map to error level, got %q", got)
}
if !strings.Contains(got, "[ERR] dynamic level") {
t.Fatalf("expected original message in log, got %q", got)
}
}
func TestNewStdlibLoggerWithOptionsNilLogger(t *testing.T) {
std := NewStdlibLoggerWithOptions(nil, LvInfo,
WithStdlibPrefix("NIL "),
WithStdlibFlags(log.LstdFlags),
)
if std == nil {
t.Fatalf("expected non-nil std logger for nil starlog")
}
std.Print("discard")
}

201
structured_test.go Normal file
View File

@ -0,0 +1,201 @@
package starlog
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"os"
"regexp"
"strings"
"testing"
)
var ansiRegex = regexp.MustCompile(`\x1b\[[0-9;]*m`)
func newStructuredTestLogger(output io.Writer) *StarLogger {
logger := NewStarlog(output)
logger.SetShowStd(false)
logger.SetShowColor(false)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
return logger
}
func TestWithFieldAndWithFields(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.WithField("user_id", 42).WithFields(Fields{
"module": "auth",
"ip": "127.0.0.1",
}).Info("login ok")
logStr := buf.String()
if !strings.Contains(logStr, "login ok") {
t.Fatalf("expected message in log, got %q", logStr)
}
if !strings.Contains(logStr, "user_id=42") || !strings.Contains(logStr, "module=auth") || !strings.Contains(logStr, "ip=127.0.0.1") {
t.Fatalf("expected structured fields in log, got %q", logStr)
}
}
func TestWithFieldIsolation(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.Info("base")
baseLog := buf.String()
if strings.Contains(baseLog, "req_id=") {
t.Fatalf("base logger should not include req_id field, got %q", baseLog)
}
buf.Reset()
logger.WithField("req_id", "r-1").Info("child")
childLog := buf.String()
if !strings.Contains(childLog, "req_id=r-1") {
t.Fatalf("child logger should include req_id field, got %q", childLog)
}
buf.Reset()
logger.Info("base-again")
baseAgain := buf.String()
if strings.Contains(baseAgain, "req_id=r-1") {
t.Fatalf("base logger should remain clean after WithField, got %q", baseAgain)
}
}
func TestWithError(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.WithError(errors.New("boom")).Error("request failed")
logStr := buf.String()
if !strings.Contains(logStr, "request failed") || !strings.Contains(logStr, "error=boom") {
t.Fatalf("expected error details in log, got %q", logStr)
}
}
func TestWithContextExtractor(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetContextFieldExtractor(func(ctx context.Context) Fields {
traceID, _ := ctx.Value("trace_id").(string)
if traceID == "" {
return nil
}
return Fields{"trace_id": traceID}
})
ctx := context.WithValue(context.Background(), "trace_id", "trace-001")
logger.WithContext(ctx).Info("context log")
logStr := buf.String()
if !strings.Contains(logStr, "context log") || !strings.Contains(logStr, "trace_id=trace-001") {
t.Fatalf("expected context extracted fields in log, got %q", logStr)
}
}
func TestJSONFormatterWithStructuredFields(t *testing.T) {
var buf bytes.Buffer
logger := newStructuredTestLogger(&buf)
logger.SetFormatter(NewJSONFormatter())
logger.SetShowColor(false)
logger.WithField("user_id", 7).WithError(errors.New("db down")).Error("save failed")
payload := make(map[string]interface{})
if err := json.Unmarshal(buf.Bytes(), &payload); err != nil {
t.Fatalf("json unmarshal failed: %v, raw=%q", err, buf.String())
}
if payload["msg"] != "save failed" {
t.Fatalf("unexpected msg: %v", payload["msg"])
}
if payload["error"] != "db down" {
t.Fatalf("unexpected error field: %v", payload["error"])
}
fieldsObj, ok := payload["fields"].(map[string]interface{})
if !ok {
t.Fatalf("fields should be object, got %T", payload["fields"])
}
if fieldsObj["user_id"] != float64(7) {
t.Fatalf("unexpected user_id value: %v", fieldsObj["user_id"])
}
}
func TestLevelOnlyFieldColorRender(t *testing.T) {
oldNoColor := NoColor
NoColor = false
defer func() {
NoColor = oldNoColor
}()
logger := NewStarlog(nil)
logger.SetShowStd(true)
logger.SetShowColor(true)
logger.SetColorMode(ColorModeLevelOnly)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
logger.SetShowFieldColor(true)
var out bytes.Buffer
oldStd := stdScreen
oldErr := errScreen
stdScreen = &out
errScreen = io.Discard
defer func() {
stdScreen = oldStd
errScreen = oldErr
}()
logger.WithFields(Fields{
"user": "alice",
"ok": true,
"cnt": 3,
}).Info("login")
rendered := out.String()
if !strings.Contains(rendered, "\x1b[") {
t.Fatalf("expected ansi colors in rendered log, got %q", rendered)
}
clean := ansiRegex.ReplaceAllString(rendered, "")
if !strings.Contains(clean, "user=alice") || !strings.Contains(clean, "ok=true") || !strings.Contains(clean, "cnt=3") {
t.Fatalf("expected fields in rendered log, got %q", clean)
}
}
func TestDisableFieldColorRender(t *testing.T) {
oldNoColor := NoColor
NoColor = false
defer func() {
NoColor = oldNoColor
}()
logger := NewStarlog(nil)
logger.SetShowStd(true)
logger.SetShowColor(true)
logger.SetColorMode(ColorModeLevelOnly)
logger.SetShowOriginFile(false)
logger.SetShowFuncName(false)
logger.SetShowFlag(false)
logger.SetShowFieldColor(false)
var out bytes.Buffer
oldStd := stdScreen
oldErr := errScreen
stdScreen = &out
errScreen = os.Stderr
defer func() {
stdScreen = oldStd
errScreen = oldErr
}()
logger.WithField("user", "alice").Info("login")
rendered := out.String()
if strings.Count(rendered, "\x1b[") > 2 {
t.Fatalf("field color should be disabled, got %q", rendered)
}
}

32
test_paths_test.go Normal file
View File

@ -0,0 +1,32 @@
package starlog
import (
"os"
"path/filepath"
"strings"
"testing"
)
func sanitizeTestName(name string) string {
replacer := strings.NewReplacer(
"/", "_",
"\\", "_",
":", "_",
" ", "_",
"\t", "_",
".", "_",
)
return replacer.Replace(name)
}
func testBinDir(t *testing.T) string {
t.Helper()
dir := filepath.Join("bin", "tests", sanitizeTestName(t.Name()))
if err := os.RemoveAll(dir); err != nil {
t.Fatalf("cleanup test bin dir failed: %v", err)
}
if err := os.MkdirAll(dir, 0755); err != nil {
t.Fatalf("create test bin dir failed: %v", err)
}
return dir
}

429
typed.go
View File

@ -1,10 +1,15 @@
package starlog
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"b612.me/starlog/colorable"
@ -21,7 +26,47 @@ const (
LvFatal
)
type ColorMode int
const (
ColorModeOff ColorMode = iota
ColorModeFullLine
ColorModeLevelOnly
)
type PendingDropPolicy int
const (
PendingDropOldest PendingDropPolicy = iota
PendingDropNewest
PendingBlock
)
type RedactFailMode int
const (
RedactFailMaskAll RedactFailMode = iota
RedactFailOpen
RedactFailDrop
)
const (
FieldTypeString = "string"
FieldTypeNumber = "number"
FieldTypeBool = "bool"
FieldTypeError = "error"
FieldTypeNil = "nil"
FieldTypeOther = "other"
)
var (
ErrAsyncHandlerPanic = errors.New("async handler panic")
ErrAsyncHandlerTimeout = errors.New("async handler timeout")
ErrAsyncQueueFull = errors.New("async queue full")
ErrPendingWriteDropped = errors.New("pending write dropped")
ErrInvalidLevel = errors.New("invalid log level")
ErrRedactionFailed = errors.New("redaction failed")
levels = map[int]string{
LvDebug: "DEBUG",
LvInfo: "INFO",
@ -32,31 +77,86 @@ var (
LvPanic: "PANIC",
LvFatal: "FATAL",
}
levelAliases = map[string]int{
"debug": LvDebug,
"info": LvInfo,
"notice": LvNotice,
"warn": LvWarning,
"warning": LvWarning,
"err": LvError,
"error": LvError,
"critical": LvCritical,
"crit": LvCritical,
"panic": LvPanic,
"fatal": LvFatal,
}
stacks *starChanStack
stackStarted bool = false
stackStopChan chan int
stackStopChan chan struct{}
stackDoneChan chan struct{}
stackMu sync.Mutex
stdScreen io.Writer = colorable.NewColorableStdout()
errScreen io.Writer = colorable.NewColorableStderr()
stackDrop uint64
stackAlert func(error, LogData)
stackAlertMu sync.RWMutex
stackFallback uint32 = 1
stackTimeout int64
writeErrCount uint64
writeErrHandler func(error, LogData)
writeErrMu sync.RWMutex
stdScreen io.Writer = colorable.NewColorableStdout()
errScreen io.Writer = colorable.NewColorableStderr()
)
type starlog struct {
mu *sync.Mutex
output io.Writer
errOutputLevel int
showFuncName bool
showThread bool
showLevel bool
showDeatilFile bool
showColor bool
switching bool
showStd bool
onlyColorLevel bool
stopWriter bool
id string
name string
colorList map[int][]Attr
colorMe map[int]*Color
mu *sync.Mutex
output io.Writer
minLevel int
errOutputLevel int
showFuncName bool
showThread bool
showLevel bool
showDeatilFile bool
showColor bool
switching bool
showStd bool
onlyColorLevel bool
autoAppendNewline bool
stopWriter bool
id string
name string
colorList map[int][]Attr
colorMe map[int]*Color
keywordColors map[string][]Attr
keywordOrder []string
keywordColorizers map[string]*Color
keywordMatcher *keywordMatcher
keywordMatchOptions KeywordMatchOptions
showFieldColor bool
fieldKeyColor []Attr
fieldTypeColors map[string][]Attr
fieldValueColors map[string][]Attr
entryHandler Handler
redactor Redactor
redactRules []RedactRule
redactFailMode RedactFailMode
redactMaskToken string
redactErrorCount uint64
formatter Formatter
sink Sink
pendingCond *sync.Cond
pendingWrites []string
pendingWriteLimit int
pendingDropPolicy PendingDropPolicy
pendingDropCount uint64
pendingBlockCount uint64
pendingPeakLen uint64
rateLimiter *rateLimiter
sampler *sampler
deduper *deduper
contextFields func(context.Context) Fields
entryHandlerTimeout time.Duration
}
type StarLogger struct {
@ -64,6 +164,9 @@ type StarLogger struct {
handlerFunc func(LogData)
logcore *starlog
isStd bool
fields Fields
logErr error
logCtx context.Context
}
type logTransfer struct {
@ -77,10 +180,71 @@ type LogData struct {
Colors []Attr
}
type PendingStats struct {
Limit int
Length int
PeakLength int
DropCount uint64
BlockCount uint64
Policy PendingDropPolicy
Switching bool
}
type KeywordMatchOptions struct {
IgnoreCase bool
WholeWord bool
}
// Config is a logger core snapshot that can be read and applied atomically.
// Prefer GetConfig + UpdateConfig/ApplyConfig for multi-field configuration.
type Config struct {
Name string
Level int
StdErrLevel int
ShowFuncName bool
ShowFlag bool
ShowLevel bool
ShowOriginFile bool
ShowColor bool
OnlyColorLevel bool
ShowStd bool
StopWriter bool
AutoAppendNewline bool
Switching bool
LevelColors map[int][]Attr
KeywordColors map[string][]Attr
KeywordMatch KeywordMatchOptions
ShowFieldColor bool
FieldKeyColor []Attr
FieldTypeColors map[string][]Attr
FieldValueColors map[string][]Attr
EntryHandler Handler
EntryHandlerTimeout time.Duration
Formatter Formatter
Sink Sink
Writer io.Writer
PendingWriteLimit int
PendingDropPolicy PendingDropPolicy
Redactor Redactor
RedactRules []RedactRule
RedactFailMode RedactFailMode
RedactMaskToken string
RateLimit RateLimitConfig
Sampling SamplingConfig
Dedup DedupConfig
ContextFieldExtractor func(context.Context) Fields
}
func newLogCore(out io.Writer) *starlog {
return &starlog{
core := &starlog{
mu: &sync.Mutex{},
output: out,
minLevel: LvDebug,
errOutputLevel: LvError,
showFuncName: true,
showThread: true,
@ -111,7 +275,50 @@ func newLogCore(out io.Writer) *starlog {
LvPanic: NewColor([]Attr{FgRed, Bold}...),
LvFatal: NewColor([]Attr{FgRed}...),
},
keywordColors: make(map[string][]Attr),
keywordOrder: nil,
keywordColorizers: nil,
keywordMatcher: nil,
showFieldColor: true,
fieldKeyColor: []Attr{FgHiBlue},
fieldTypeColors: map[string][]Attr{
FieldTypeString: []Attr{FgGreen},
FieldTypeNumber: []Attr{FgYellow},
FieldTypeBool: []Attr{FgMagenta},
FieldTypeError: []Attr{FgRed},
FieldTypeNil: []Attr{FgHiBlack},
FieldTypeOther: []Attr{FgCyan},
},
fieldValueColors: make(map[string][]Attr),
redactRules: make([]RedactRule, 0, 4),
redactFailMode: RedactFailMaskAll,
redactMaskToken: "[REDACTED]",
pendingWrites: make([]string, 0, 16),
pendingWriteLimit: 1024,
pendingDropPolicy: PendingDropOldest,
rateLimiter: newRateLimiter(),
sampler: newSampler(),
deduper: newDeduper(),
entryHandlerTimeout: 0,
}
core.rebuildKeywordCachesLocked()
core.pendingCond = sync.NewCond(core.mu)
return core
}
func ParseLevel(level string) (int, error) {
val := strings.TrimSpace(strings.ToLower(level))
if val == "" {
return 0, fmt.Errorf("%w: empty", ErrInvalidLevel)
}
if parsed, ok := levelAliases[val]; ok {
return parsed, nil
}
num, err := strconv.Atoi(val)
if err == nil {
return num, nil
}
return 0, fmt.Errorf("%w: %s", ErrInvalidLevel, level)
}
func NewStarlog(out io.Writer) *StarLogger {
@ -120,6 +327,9 @@ func NewStarlog(out io.Writer) *StarLogger {
thread: "MAN",
logcore: newLogCore(out),
isStd: false,
fields: nil,
logErr: nil,
logCtx: nil,
}
}
@ -141,6 +351,9 @@ func (logger *StarLogger) NewFlag() *StarLogger {
handlerFunc: logger.handlerFunc,
logcore: logger.logcore,
isStd: false,
fields: cloneFields(logger.fields),
logErr: logger.logErr,
logCtx: logger.logCtx,
}
}
func (logger *StarLogger) SetNewRandomFlag() {
@ -154,6 +367,8 @@ func (logger *StarLogger) SetName(name string) {
}
func (logger *StarLogger) GetName() string {
logger.logcore.mu.Lock()
defer logger.logcore.mu.Unlock()
return logger.logcore.name
}
@ -180,42 +395,190 @@ func StartStacks() {
stackMu.Unlock()
return
}
unlock := make(chan struct{})
go func() {
stackStarted = true
stacks = newStarChanStack(1024)
stackMu.Unlock()
unlock <- struct{}{}
stackStarted = true
stackStopChan = make(chan struct{})
stackDoneChan = make(chan struct{})
stacks = newStarChanStack(1024)
stopChan := stackStopChan
doneChan := stackDoneChan
stackMu.Unlock()
go func(stop <-chan struct{}, done chan struct{}) {
defer close(done)
defer func() {
stackMu.Lock()
stackStarted = false
stackMu.Unlock()
}()
for {
select {
case <-stackStopChan:
case <-stop:
return
default:
}
poped, err := stacks.Pop()
if err != nil {
if errors.Is(err, io.EOF) {
return
}
return
}
val := poped.(logTransfer)
val, ok := poped.(logTransfer)
if !ok {
continue
}
if val.handlerFunc != nil {
val.handlerFunc(val.LogData)
invokeAsyncHandlerSafely(val.handlerFunc, val.LogData)
}
}
}()
<-unlock
}(stopChan, doneChan)
}
func StopStacks() {
stackMu.Lock()
if !stackStarted {
stackMu.Unlock()
return
}
stackStopChan <- 1
stopChan := stackStopChan
doneChan := stackDoneChan
current := stacks
stackStopChan = nil
stackDoneChan = nil
stackMu.Unlock()
if stopChan != nil {
func() {
defer func() {
recover()
}()
close(stopChan)
}()
}
if current != nil {
_ = current.Close()
}
if doneChan != nil {
<-doneChan
}
}
func Stop() {
stacks.Close()
StopStacks()
}
func SetAsyncErrorHandler(alert func(error, LogData)) {
stackAlertMu.Lock()
defer stackAlertMu.Unlock()
stackAlert = alert
}
func SetAsyncFallbackToSync(enable bool) {
if enable {
atomic.StoreUint32(&stackFallback, 1)
return
}
atomic.StoreUint32(&stackFallback, 0)
}
func GetAsyncFallbackToSync() bool {
return atomic.LoadUint32(&stackFallback) == 1
}
func SetAsyncHandlerTimeout(timeout time.Duration) {
if timeout < 0 {
timeout = 0
}
atomic.StoreInt64(&stackTimeout, int64(timeout))
}
func GetAsyncHandlerTimeout() time.Duration {
return time.Duration(atomic.LoadInt64(&stackTimeout))
}
func GetAsyncDropCount() uint64 {
return atomic.LoadUint64(&stackDrop)
}
func reportAsyncDrop(err error, data LogData) {
atomic.AddUint64(&stackDrop, 1)
stackAlertMu.RLock()
alert := stackAlert
stackAlertMu.RUnlock()
if alert != nil {
func() {
defer func() {
recover()
}()
alert(err, data)
}()
}
}
func invokeAsyncHandlerSafely(handler func(LogData), data LogData) bool {
if handler == nil {
return true
}
timeout := GetAsyncHandlerTimeout()
if timeout <= 0 {
return invokeAsyncHandlerDirect(handler, data)
}
done := make(chan bool, 1)
go func() {
done <- invokeAsyncHandlerDirect(handler, data)
}()
select {
case ok := <-done:
return ok
case <-time.After(timeout):
reportAsyncDrop(ErrAsyncHandlerTimeout, data)
return false
}
}
func invokeAsyncHandlerDirect(handler func(LogData), data LogData) (ok bool) {
defer func() {
if panicErr := recover(); panicErr != nil {
reportAsyncDrop(fmt.Errorf("%w: %v", ErrAsyncHandlerPanic, panicErr), data)
ok = false
}
}()
handler(data)
return true
}
func SetWriteErrorHandler(alert func(error, LogData)) {
writeErrMu.Lock()
defer writeErrMu.Unlock()
writeErrHandler = alert
}
func GetWriteErrorCount() uint64 {
return atomic.LoadUint64(&writeErrCount)
}
func reportWriteError(err error, data LogData) {
if err == nil {
return
}
atomic.AddUint64(&writeErrCount, 1)
writeErrMu.RLock()
alert := writeErrHandler
writeErrMu.RUnlock()
if alert != nil {
func() {
defer func() {
recover()
}()
alert(err, data)
}()
}
}
func resetAsyncMetricsForTest() {
atomic.StoreUint64(&stackDrop, 0)
SetAsyncErrorHandler(nil)
SetAsyncFallbackToSync(true)
SetAsyncHandlerTimeout(0)
atomic.StoreUint64(&writeErrCount, 0)
SetWriteErrorHandler(nil)
}