mirror of https://github.com/loggie-io/loggie.git
Compare commits
31 Commits
Author | SHA1 | Date |
---|---|---|
|
ec6c44733d | |
|
31bec67423 | |
|
d9c57b9202 | |
|
04da98fff9 | |
|
f27f8d336a | |
|
b6f758bafc | |
|
fef96ad0d2 | |
|
e21f3d0df1 | |
|
b584302836 | |
|
ba7e228e20 | |
|
7d03ad88dd | |
|
76fe3f4801 | |
|
342e58a413 | |
|
8e89d72644 | |
|
ae4a7250f9 | |
|
fa19c9ef53 | |
|
c7cdbac92a | |
|
95a00ef1dd | |
|
2f3a2104d1 | |
|
083d9601a6 | |
|
71e3a7ea7f | |
|
d933083227 | |
|
350b21ec57 | |
|
d024a5052a | |
|
9d7b9a54b5 | |
|
2520f55bfd | |
|
6d18d0f847 | |
|
6a5b1e5ce6 | |
|
c452784e11 | |
|
96516da425 | |
|
1a7b7dc24c |
10
Makefile
10
Makefile
|
@ -82,13 +82,13 @@ benchmark: ## Run benchmark
|
|||
|
||||
##@ Build
|
||||
|
||||
build: ## go build
|
||||
CGO_ENABLED=1 GOOS=${GOOS} GOARCH=${GOARCH} go build -mod=vendor -a ${extra_flags} -o loggie cmd/loggie/main.go
|
||||
build: ## go build, EXT_BUILD_TAGS=include_core would only build core package
|
||||
CGO_ENABLED=1 GOOS=${GOOS} GOARCH=${GOARCH} go build -tags ${EXT_BUILD_TAGS} -mod=vendor -a ${extra_flags} -o loggie cmd/loggie/main.go
|
||||
|
||||
##@ Build(without sqlite)
|
||||
|
||||
build-in-badger: ## go build without sqlite
|
||||
GOOS=${GOOS} GOARCH=${GOARCH} go build -tags driver_badger -mod=vendor -a -ldflags '-X github.com/loggie-io/loggie/pkg/core/global._VERSION_=${TAG} -X github.com/loggie-io/loggie/pkg/util/persistence._DRIVER_=badger -s -w' -o loggie cmd/loggie/main.go
|
||||
build-in-badger: ## go build without sqlite, EXT_BUILD_TAGS=include_core would only build core package
|
||||
GOOS=${GOOS} GOARCH=${GOARCH} go build -tags driver_badger,${EXT_BUILD_TAGS} -mod=vendor -a -ldflags '-X github.com/loggie-io/loggie/pkg/core/global._VERSION_=${TAG} -X github.com/loggie-io/loggie/pkg/util/persistence._DRIVER_=badger -s -w' -o loggie cmd/loggie/main.go
|
||||
|
||||
##@ Images
|
||||
|
||||
|
@ -98,7 +98,7 @@ docker-build: ## Docker build -t ${REPO}:${TAG}, try: make docker-build REPO=<Yo
|
|||
docker-push: ## Docker push ${REPO}:${TAG}
|
||||
docker push ${REPO}:${TAG}
|
||||
|
||||
docker-multi-arch: ## Docker buildx, try: make docker-build REPO=<YourRepoHost>, ${TAG} generated by git
|
||||
docker-multi-arch: ## Docker buildx, try: make docker-multi-arch REPO=<YourRepoHost>, ${TAG} generated by git
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${REPO}:${TAG} . --push
|
||||
|
||||
LOG_DIR ?= /tmp/log ## log directory
|
||||
|
|
|
@ -19,9 +19,7 @@ package main
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/ops"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -39,9 +37,13 @@ import (
|
|||
"github.com/loggie-io/loggie/pkg/discovery/kubernetes"
|
||||
"github.com/loggie-io/loggie/pkg/eventbus"
|
||||
_ "github.com/loggie-io/loggie/pkg/include"
|
||||
"github.com/loggie-io/loggie/pkg/ops"
|
||||
"github.com/loggie-io/loggie/pkg/ops/helper"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/persistence"
|
||||
"github.com/loggie-io/loggie/pkg/util/yaml"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -86,7 +88,8 @@ func main() {
|
|||
// system config file
|
||||
syscfg := sysconfig.Config{}
|
||||
cfg.UnpackTypeDefaultsAndValidate(strings.ToLower(configType), globalConfigFile, &syscfg)
|
||||
|
||||
// register jsonEngine
|
||||
json.SetDefaultEngine(syscfg.Loggie.JSONEngine)
|
||||
// start eventBus listeners
|
||||
eventbus.StartAndRun(syscfg.Loggie.MonitorEventBus)
|
||||
// init log after error func
|
||||
|
@ -137,8 +140,18 @@ func main() {
|
|||
|
||||
if syscfg.Loggie.Http.Enabled {
|
||||
go func() {
|
||||
if err = http.ListenAndServe(fmt.Sprintf("%s:%d", syscfg.Loggie.Http.Host, syscfg.Loggie.Http.Port), nil); err != nil {
|
||||
log.Fatal("http listen and serve err: %v", err)
|
||||
if syscfg.Loggie.Http.RandPort {
|
||||
syscfg.Loggie.Http.Port = 0
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", syscfg.Loggie.Http.Host, syscfg.Loggie.Http.Port))
|
||||
if err != nil {
|
||||
log.Fatal("http listen err: %v", err)
|
||||
}
|
||||
|
||||
log.Info("http listen addr %s", listener.Addr().String())
|
||||
if err = http.Serve(listener, nil); err != nil {
|
||||
log.Fatal("http serve err: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -1,3 +1,94 @@
|
|||
# Release v1.5.0-rc.0
|
||||
|
||||
### :star2: Features
|
||||
- [breaking]: The `db` in `file source` is moved to the [`loggie.yml`](https://loggie-io.github.io/docs/main/reference/global/db/). If upgrading from an earlier version to v1.5, be sure to check whether `db` has been configured for `file source`. If it is not configured, you can just ignore it, and the default value will remain compatible.
|
||||
|
||||
- Added rocketmq sink (#530)
|
||||
- Added franzKafka source (#573)
|
||||
- Added kata runtime (#554)
|
||||
- `typePodFields`/`typeNodeFields` is supported in LogConfig/ClusterLogConfig (#450)
|
||||
- sink codec support printEvents (#448)
|
||||
- Added queue in LogConfig/ClusterLogConfig (#457)
|
||||
- Changed `olivere/elastic` to the official elasticsearch go client (#581)
|
||||
- Supported `copytruncate` in file source (#571)
|
||||
- Added `genfiles` sub command (#471)
|
||||
- Added queue in LogConfig/ClusterLogConfig queue (#457)
|
||||
- Added `sortBy` field in elasticsearch source (#473)
|
||||
- Added host VM mode with Kubernetes as the configuration center (#449) (#489)
|
||||
- New `addHostMeta` interceptor (#474)
|
||||
- Added persistence driver `badger` (#475) (#584)
|
||||
- Ignore LogConfig with sidecar injection annotation (#478)
|
||||
- Added `toStr` action in transformer interceptor (#482)
|
||||
- You can mount the root directory of a node to the Loggie container without mounting additional Loggie volumes (#460)
|
||||
- Get loggie version with api and sub command (#496) (#508)
|
||||
- Added the `worker` and the `clientId` in Kafka source (#506) (#507)
|
||||
- Upgrade `kafka-go` version (#506) (#567)
|
||||
- Added resultStatus in dev sink which can be used to simulate failure, drop (#531)
|
||||
- Pretty error when unmarshal yaml configuration failed (#539)
|
||||
- Added default topic if render kafka topic failed (#550)
|
||||
- Added `ignoreUnknownTopicOrPartition` in kafka sink (#560)
|
||||
- Supported multiple topics in kafka source (#548)
|
||||
- Added default index if render elasticsearch index failed (#551) (#553)
|
||||
- The default `maxOpenFds` is set to 4096 (#559)
|
||||
- Supported default `sinkRef` in kubernetes discovery (#555)
|
||||
- Added `${_k8s.clusterlogconfig}` in `typePodFields` (#569)
|
||||
- Supported omit empty fields in Kubernetes discovery (#570)
|
||||
- Optimizes `maxbytes` interceptors (#575)
|
||||
- Moved `readFromTail`, `cleanFiles`, `fdHoldTimeoutWhenInactive`, `fdHoldTimeoutWhenRemove` from watcher to outer layer in `file source` (#579) (#585)
|
||||
- Added `cheanUnfinished` in cleanFiles (#580)
|
||||
- Added `target` in `maxbyte` interceptor (#588)
|
||||
- Added `partionKey` in franzKafka (#562)
|
||||
- Added `highPrecision` in `rateLimit` interceptor (#525)
|
||||
|
||||
### :bug: Bug Fixes
|
||||
- Fixed panic when kubeEvent Series is nil (#459)
|
||||
- Upgraded `automaxprocs` version to v1.5.1 (#488)
|
||||
- Fixed set defaults failed in `fieldsUnderKey` (#513)
|
||||
- Fixed parse condition failed when contain ERROR in transformer interceptor (#514) (#515)
|
||||
- Fixed grpc batch out-of-order data streams (#517)
|
||||
- Fixed large line may cause oom (#529)
|
||||
- Fixed duplicated batchSize in queue (#533)
|
||||
- Fixed sqlite locked panic (#524)
|
||||
- Fixed command can't be used in multi-arch container (#541)
|
||||
- Fixed `logger listener` may cause block (#561) (#552)
|
||||
- Fixed `sink concurrency` deepCopy failed (#563)
|
||||
- Drop events when partial error in elasticsearch sink (#572)
|
||||
|
||||
# Release v1.4.0
|
||||
|
||||
### :star2: Features
|
||||
|
||||
- Added Loggie dashboard feature for easier troubleshooting (#416)
|
||||
- Enhanced log alerting function with more flexible log alert detection rules and added alertWebhook sink (#392)
|
||||
- Added sink concurrency support for automatic adaptation based on downstream delay (#376)
|
||||
- Added franzKafka sink for users who prefer the franz kafka library (#423)
|
||||
- Added elasticsearch source (#345)
|
||||
- Added zinc sink (#254)
|
||||
- Added pulsar sink (#417)
|
||||
- Added grok action to transformer interceptor (#418)
|
||||
- Added split action to transformer interceptor (#411)
|
||||
- Added jsonEncode action to transformer interceptor (#421)
|
||||
- Added fieldsFromPath configuration to source for obtaining fields from file content (#401)
|
||||
- Added fieldsRef parameter to filesource listener for obtaining key value from fields configuration and adding to metrics as label (#402)
|
||||
- In transformer interceptor, added dropIfError support to drop event if action execution fails (#409)
|
||||
- Added info listener which currently exposes loggie_info_stat metrics and displays version label (#410)
|
||||
- Added support for customized kafka sink partition key
|
||||
- Added sasl support to Kafka source (#415)
|
||||
- Added https insecureSkipVerify support to loki sink (#422)
|
||||
- Optimized file source for large files (#430)
|
||||
- Changed default value of file source maxOpenFds to 1024 (#437)
|
||||
- ContainerRuntime can now be set to none (#439)
|
||||
- Upgraded to go 1.18 (#440)
|
||||
- Optimize the configuration parameters to remove the redundancy generated by rendering
|
||||
|
||||
### :bug: Bug Fixes
|
||||
|
||||
- Added source fields to filesource listener (#402)
|
||||
- Fixed issue of transformer copy action not copying non-string body (#420)
|
||||
- Added fetching of logs file from UpperDir when rootfs collection is enabled (#414)
|
||||
- Fix pipeline restart npe (#454)
|
||||
- Fix create dir soft link job (#453)
|
||||
|
||||
# Release v1.4.0-rc.0
|
||||
|
||||
### :star2: Features
|
||||
|
|
30
go.mod
30
go.mod
|
@ -32,23 +32,22 @@ require (
|
|||
github.com/segmentio/kafka-go v0.4.39
|
||||
github.com/shirou/gopsutil/v3 v3.22.2
|
||||
github.com/smartystreets-prototypes/go-disruptor v0.0.0-20200316140655-c96477fd7a6a
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/twmb/franz-go v1.10.4
|
||||
github.com/twmb/franz-go/pkg/sasl/kerberos v1.1.0
|
||||
go.uber.org/atomic v1.7.0
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
golang.org/x/net v0.8.0
|
||||
golang.org/x/text v0.8.0
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/text v0.13.0
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858
|
||||
google.golang.org/grpc v1.47.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
google.golang.org/grpc v1.54.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.25.4
|
||||
k8s.io/apimachinery v0.25.4
|
||||
k8s.io/client-go v0.25.4
|
||||
k8s.io/code-generator v0.25.4
|
||||
k8s.io/cri-api v0.24.0
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
|
@ -59,9 +58,9 @@ require (
|
|||
github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e // indirect
|
||||
github.com/ardielle/ardielle-go v1.5.2 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/danieljoos/wincred v1.0.2 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect
|
||||
github.com/emirpasic/gods v1.12.0 // indirect
|
||||
github.com/fatih/color v1.10.0 // indirect
|
||||
|
@ -78,6 +77,7 @@ require (
|
|||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||
github.com/linkedin/goavro/v2 v2.9.8 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
|
@ -92,13 +92,15 @@ require (
|
|||
github.com/tidwall/gjson v1.13.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/twmb/franz-go/pkg/kmsg v1.2.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.7.0 // indirect
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
@ -114,7 +116,7 @@ require (
|
|||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
|
@ -130,7 +132,7 @@ require (
|
|||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
|
@ -174,9 +176,13 @@ require (
|
|||
|
||||
require (
|
||||
github.com/apache/rocketmq-client-go/v2 v2.1.1
|
||||
github.com/bytedance/sonic v1.9.2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10
|
||||
github.com/goccy/go-json v0.10.2
|
||||
github.com/goccy/go-yaml v1.11.0
|
||||
github.com/mattn/go-sqlite3 v1.11.0
|
||||
k8s.io/cri-api v0.28.3
|
||||
k8s.io/metrics v0.25.4
|
||||
sigs.k8s.io/controller-runtime v0.13.1
|
||||
)
|
||||
|
|
57
go.sum
57
go.sum
|
@ -145,6 +145,9 @@ github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTS
|
|||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
|
||||
github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
|
@ -156,6 +159,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
|
@ -201,8 +207,9 @@ github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8
|
|||
github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
|
||||
github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA=
|
||||
github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
|
||||
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
|
@ -380,6 +387,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
|
|||
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
|
||||
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54=
|
||||
github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng=
|
||||
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
|
||||
|
@ -425,8 +434,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -615,6 +625,8 @@ github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
|||
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -625,8 +637,8 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
|
@ -860,6 +872,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
|
||||
|
@ -930,8 +943,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
@ -940,8 +954,10 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
|||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tidwall/gjson v1.13.0 h1:3TFY9yxOQShrvmjdM76K+jc66zJeT6D3/VFFYCGQf7M=
|
||||
github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
|
@ -957,6 +973,8 @@ github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2bi
|
|||
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/twmb/franz-go v1.7.0/go.mod h1:PMze0jNfNghhih2XHbkmTFykbMF5sJqmNJB31DOOzro=
|
||||
github.com/twmb/franz-go v1.10.4 h1:1PGpRG0uGTSSZCBV6lAMYcuVsyReMqdNBQRd8QCzw9U=
|
||||
github.com/twmb/franz-go v1.10.4/go.mod h1:PMze0jNfNghhih2XHbkmTFykbMF5sJqmNJB31DOOzro=
|
||||
|
@ -1027,6 +1045,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670 h1:18EFjUmQOcUvxNYSkA6jO9VAiXCnxFY6NyDX0bHDmkU=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
@ -1049,8 +1069,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -1143,12 +1163,11 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM=
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1258,22 +1277,20 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1283,8 +1300,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1517,8 +1534,8 @@ k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8=
|
|||
k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw=
|
||||
k8s.io/code-generator v0.25.4 h1:tjQ7/+9eN7UOiU2DP+0v4ntTI4JZLi2c1N0WllpFhTc=
|
||||
k8s.io/code-generator v0.25.4/go.mod h1:9F5fuVZOMWRme7MYj2YT3L9ropPWPokd9VRhVyD3+0w=
|
||||
k8s.io/cri-api v0.24.0 h1:PZ/MqhgYq4rxCarYe2rGNmd8G9ZuyS1NU9igolbkqlI=
|
||||
k8s.io/cri-api v0.24.0/go.mod h1:t3tImFtGeStN+ES69bQUX9sFg67ek38BM9YIJhMmuig=
|
||||
k8s.io/cri-api v0.28.3 h1:84ifk56rAy7yYI1zYqTjLLishpFgs3q7BkCKhoLhmFA=
|
||||
k8s.io/cri-api v0.28.3/go.mod h1:MTdJO2fikImnX+YzE2Ccnosj3Hw2Cinw2fXYV3ppUIE=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
|
|
18
loggie.yml
18
loggie.yml
|
@ -14,7 +14,7 @@ loggie:
|
|||
sink: ~
|
||||
queue: ~
|
||||
pipeline: ~
|
||||
normalize: ~
|
||||
sys: ~
|
||||
|
||||
discovery:
|
||||
enabled: false
|
||||
|
@ -31,15 +31,17 @@ loggie:
|
|||
defaults:
|
||||
sink:
|
||||
type: dev
|
||||
interceptors:
|
||||
- type: schema
|
||||
name: global
|
||||
order: 700
|
||||
addMeta:
|
||||
timestamp:
|
||||
key: "@timestamp"
|
||||
sources:
|
||||
- type: file
|
||||
timestampKey: "@timestamp"
|
||||
bodyKey: "message"
|
||||
fieldsUnderRoot: true
|
||||
addonMeta: true
|
||||
addonMetaSchema:
|
||||
underRoot: true
|
||||
fields:
|
||||
filename: "${_meta.filename}"
|
||||
line: "${_meta.line}"
|
||||
watcher:
|
||||
maxOpenFds: 6000
|
||||
http:
|
||||
|
|
|
@ -130,11 +130,21 @@ func ReadPipelineConfigFromFile(path string, ignore FileIgnore) (*PipelineConfig
|
|||
for _, fn := range all {
|
||||
pipes := &PipelineConfig{}
|
||||
unpack := cfg.UnPackFromFile(fn, pipes)
|
||||
if err = unpack.Defaults().Validate().Do(); err != nil {
|
||||
log.Error("invalid pipeline configs: %v, \n%s", err, unpack.Contents())
|
||||
if err = unpack.Do(); err != nil {
|
||||
log.Error("read pipeline configs from path %s failed: %v", path, err)
|
||||
continue
|
||||
}
|
||||
pipecfgs.AddPipelines(pipes.Pipelines)
|
||||
|
||||
for _, p := range pipes.Pipelines {
|
||||
pip := p
|
||||
if err := cfg.NewUnpack(nil, &pip, nil).Defaults().Validate().Do(); err != nil {
|
||||
// ignore invalid pipeline, but continue to read other pipelines
|
||||
// invalid pipeline will check by reloader later
|
||||
log.Error("pipeline: %s configs invalid: %v", p.Name, err)
|
||||
continue
|
||||
}
|
||||
pipecfgs.AddPipelines([]pipeline.Config{pip})
|
||||
}
|
||||
}
|
||||
return pipecfgs, nil
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ package event
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -128,7 +128,7 @@ func (de *DefaultEvent) Release() {
|
|||
func (de *DefaultEvent) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString(`header:`)
|
||||
header, _ := jsoniter.Marshal(de.Header())
|
||||
header, _ := json.Marshal(de.Header())
|
||||
sb.Write(header)
|
||||
sb.WriteString(`, body:"`)
|
||||
sb.WriteString(string(de.Body()))
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package reloader
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -49,7 +48,7 @@ func (r *reloader) readPipelineConfigHandler(writer http.ResponseWriter, request
|
|||
continue
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(m)
|
||||
content, err := os.ReadFile(m)
|
||||
if err != nil {
|
||||
log.Warn("read config error. err: %v", err)
|
||||
return
|
||||
|
|
|
@ -38,6 +38,11 @@ type Config struct {
|
|||
FieldsFromEnv map[string]string `yaml:"fieldsFromEnv,omitempty"`
|
||||
FieldsFromPath map[string]string `yaml:"fieldsFromPath,omitempty"`
|
||||
Codec *codec.Config `yaml:"codec,omitempty"`
|
||||
|
||||
TimestampKey string `yaml:"timestampKey,omitempty"`
|
||||
TimestampLocation string `yaml:"timestampLocation,omitempty"`
|
||||
TimestampLayout string `yaml:"timestampLayout,omitempty"`
|
||||
BodyKey string `yaml:"bodyKey,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Config) DeepCopy() *Config {
|
||||
|
@ -82,6 +87,11 @@ func (c *Config) DeepCopy() *Config {
|
|||
FieldsFromEnv: newFieldsFromEnv,
|
||||
FieldsFromPath: newFieldsFromPath,
|
||||
Codec: c.Codec.DeepCopy(),
|
||||
|
||||
TimestampKey: c.TimestampKey,
|
||||
TimestampLocation: c.TimestampLocation,
|
||||
TimestampLayout: c.TimestampLayout,
|
||||
BodyKey: c.BodyKey,
|
||||
}
|
||||
|
||||
return out
|
||||
|
@ -155,6 +165,19 @@ func (c *Config) Merge(from *Config) {
|
|||
} else {
|
||||
c.Codec.Merge(from.Codec)
|
||||
}
|
||||
|
||||
if c.TimestampKey == "" {
|
||||
c.TimestampKey = from.TimestampKey
|
||||
}
|
||||
if c.TimestampLocation == "" {
|
||||
c.TimestampLocation = from.TimestampLocation
|
||||
}
|
||||
if c.TimestampLayout == "" {
|
||||
c.TimestampLayout = from.TimestampLayout
|
||||
}
|
||||
if c.BodyKey == "" {
|
||||
c.BodyKey = from.BodyKey
|
||||
}
|
||||
}
|
||||
|
||||
func MergeSourceList(base []*Config, from []*Config) []*Config {
|
||||
|
|
|
@ -45,6 +45,7 @@ type Loggie struct {
|
|||
Defaults Defaults `yaml:"defaults"`
|
||||
Db persistence.DbConfig `yaml:"db"`
|
||||
ErrorAlertConfig log.AfterErrorConfiguration `yaml:"errorAlert"`
|
||||
JSONEngine string `yaml:"jsonEngine,omitempty" default:"jsoniter" validate:"oneof=jsoniter sonic std go-json"`
|
||||
}
|
||||
|
||||
type Defaults struct {
|
||||
|
@ -87,7 +88,8 @@ func (d *Defaults) SetDefaults() {
|
|||
}
|
||||
|
||||
type Http struct {
|
||||
Enabled bool `yaml:"enabled" default:"false"`
|
||||
Host string `yaml:"host" default:"0.0.0.0"`
|
||||
Port int `yaml:"port" default:"9196"`
|
||||
Enabled bool `yaml:"enabled" default:"false"`
|
||||
Host string `yaml:"host" default:"0.0.0.0"`
|
||||
Port int `yaml:"port" default:"9196"`
|
||||
RandPort bool `yaml:"randPort" default:"false"`
|
||||
}
|
||||
|
|
|
@ -23,11 +23,12 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
SelectorTypePod = "pod"
|
||||
SelectorTypeNode = "node"
|
||||
SelectorTypeCluster = "cluster"
|
||||
SelectorTypeVm = "vm"
|
||||
SelectorTypeAll = "all"
|
||||
SelectorTypePod = "pod"
|
||||
SelectorTypeNode = "node"
|
||||
SelectorTypeCluster = "cluster"
|
||||
SelectorTypeVm = "vm"
|
||||
SelectorTypeWorkload = "workload"
|
||||
SelectorTypeAll = "all"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
|
@ -48,10 +49,12 @@ type Spec struct {
|
|||
}
|
||||
|
||||
type Selector struct {
|
||||
Cluster string `json:"cluster,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
PodSelector `json:",inline"`
|
||||
NodeSelector `json:",inline"`
|
||||
Cluster string `json:"cluster,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
PodSelector `json:",inline"`
|
||||
NodeSelector `json:",inline"`
|
||||
NamespaceSelector `json:",inline"`
|
||||
WorkloadSelector []WorkloadSelector `json:"workloadSelector,omitempty"`
|
||||
}
|
||||
|
||||
type PodSelector struct {
|
||||
|
@ -62,6 +65,18 @@ type NodeSelector struct {
|
|||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
}
|
||||
|
||||
type NamespaceSelector struct {
|
||||
NamespaceSelector []string `json:"namespaceSelector,omitempty"`
|
||||
ExcludeNamespaceSelector []string `json:"excludeNamespaceSelector,omitempty"`
|
||||
}
|
||||
|
||||
type WorkloadSelector struct {
|
||||
Type []string `json:"type,omitempty"`
|
||||
NameSelector []string `json:"nameSelector,omitempty"`
|
||||
NamespaceSelector []string `json:"namespaceSelector,omitempty"`
|
||||
ExcludeNamespaceSelector []string `json:"excludeNamespaceSelector,omitempty"`
|
||||
}
|
||||
|
||||
type Pipeline struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Sources string `json:"sources,omitempty"`
|
||||
|
@ -91,7 +106,7 @@ func (in *ClusterLogConfig) Validate() error {
|
|||
}
|
||||
|
||||
tp := in.Spec.Selector.Type
|
||||
if tp != SelectorTypePod && tp != SelectorTypeNode && tp != SelectorTypeCluster && tp != SelectorTypeVm {
|
||||
if tp != SelectorTypePod && tp != SelectorTypeNode && tp != SelectorTypeCluster && tp != SelectorTypeVm && tp != SelectorTypeWorkload {
|
||||
return errors.New("spec.selector.type is invalidate")
|
||||
}
|
||||
|
||||
|
|
|
@ -238,6 +238,32 @@ func (in *Message) DeepCopy() *Message {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) {
|
||||
*out = *in
|
||||
if in.NamespaceSelector != nil {
|
||||
in, out := &in.NamespaceSelector, &out.NamespaceSelector
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludeNamespaceSelector != nil {
|
||||
in, out := &in.ExcludeNamespaceSelector, &out.ExcludeNamespaceSelector
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector.
|
||||
func (in *NamespaceSelector) DeepCopy() *NamespaceSelector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespaceSelector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
|
||||
*out = *in
|
||||
|
@ -321,6 +347,14 @@ func (in *Selector) DeepCopyInto(out *Selector) {
|
|||
*out = *in
|
||||
in.PodSelector.DeepCopyInto(&out.PodSelector)
|
||||
in.NodeSelector.DeepCopyInto(&out.NodeSelector)
|
||||
in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector)
|
||||
if in.WorkloadSelector != nil {
|
||||
in, out := &in.WorkloadSelector, &out.WorkloadSelector
|
||||
*out = make([]WorkloadSelector, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -550,3 +584,39 @@ func (in *VmStatus) DeepCopy() *VmStatus {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadSelector) DeepCopyInto(out *WorkloadSelector) {
|
||||
*out = *in
|
||||
if in.Type != nil {
|
||||
in, out := &in.Type, &out.Type
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NameSelector != nil {
|
||||
in, out := &in.NameSelector, &out.NameSelector
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NamespaceSelector != nil {
|
||||
in, out := &in.NamespaceSelector, &out.NamespaceSelector
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludeNamespaceSelector != nil {
|
||||
in, out := &in.ExcludeNamespaceSelector, &out.ExcludeNamespaceSelector
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSelector.
|
||||
func (in *WorkloadSelector) DeepCopy() *WorkloadSelector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadSelector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -36,14 +36,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
|
|
@ -36,14 +36,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
|
|
@ -397,7 +397,7 @@ func (c *Controller) handleLogConfigSelectorHasChange(new *logconfigv1beta1.LogC
|
|||
|
||||
lgcKey := helper.MetaNamespaceKey(old.Namespace, old.Name)
|
||||
switch new.Spec.Selector.Type {
|
||||
case logconfigv1beta1.SelectorTypePod:
|
||||
case logconfigv1beta1.SelectorTypePod, logconfigv1beta1.SelectorTypeWorkload:
|
||||
if !helper.MatchStringMap(new.Spec.Selector.LabelSelector,
|
||||
old.Spec.Selector.LabelSelector) {
|
||||
err = c.handleAllTypesDelete(lgcKey, logconfigv1beta1.SelectorTypePod)
|
||||
|
|
|
@ -268,7 +268,7 @@ func (c *Controller) handleAllTypesAddOrUpdate(lgc *logconfigv1beta1.LogConfig)
|
|||
|
||||
lgc = lgc.DeepCopy()
|
||||
switch lgc.Spec.Selector.Type {
|
||||
case logconfigv1beta1.SelectorTypePod:
|
||||
case logconfigv1beta1.SelectorTypePod, logconfigv1beta1.SelectorTypeWorkload:
|
||||
return c.handleLogConfigTypePodAddOrUpdate(lgc)
|
||||
|
||||
case logconfigv1beta1.SelectorTypeNode:
|
||||
|
@ -311,7 +311,7 @@ func (c *Controller) reconcileLogConfigDelete(key string, selectorType string) e
|
|||
|
||||
func (c *Controller) handleAllTypesDelete(key string, selectorType string) error {
|
||||
switch selectorType {
|
||||
case logconfigv1beta1.SelectorTypePod:
|
||||
case logconfigv1beta1.SelectorTypePod, logconfigv1beta1.SelectorTypeWorkload:
|
||||
if ok := c.typePodIndex.DeletePipeConfigsByLogConfigKey(key); !ok {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ type fmtKey struct {
|
|||
|
||||
func (c *Controller) handleLogConfigTypePodAddOrUpdate(lgc *logconfigv1beta1.LogConfig) (err error, podsName []string) {
|
||||
// find pods related in the node
|
||||
podList, err := helper.GetLogConfigRelatedPod(lgc, c.podsLister)
|
||||
podList, err := helper.GetLogConfigRelatedPod(lgc, c.podsLister, c.kubeClientset)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func (c *Controller) handlePodAddOrUpdate(pod *corev1.Pod) error {
|
|||
|
||||
func (c *Controller) handlePodAddOrUpdateOfLogConfig(pod *corev1.Pod) {
|
||||
// label selected logConfigs
|
||||
lgcList, err := helper.GetPodRelatedLogConfigs(pod, c.logConfigLister)
|
||||
lgcList, err := helper.GetPodRelatedLogConfigs(pod, c.logConfigLister, c.kubeClientset)
|
||||
if err != nil || len(lgcList) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func (c *Controller) handlePodAddOrUpdateOfLogConfig(pod *corev1.Pod) {
|
|||
|
||||
func (c *Controller) handlePodAddOrUpdateOfClusterLogConfig(pod *corev1.Pod) {
|
||||
// label selected clusterLogConfigs
|
||||
clgcList, err := helper.GetPodRelatedClusterLogConfigs(pod, c.clusterLogConfigLister)
|
||||
clgcList, err := helper.GetPodRelatedClusterLogConfigs(pod, c.clusterLogConfigLister, c.kubeClientset)
|
||||
if err != nil || len(clgcList) == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ func (c *Controller) makeConfigPerSource(s *source.Config, pod *corev1.Pod, lgc
|
|||
}
|
||||
|
||||
// change the source name, add pod.Name-containerName as prefix, since there maybe multiple containers in pod
|
||||
filesrc.Name = helper.GenTypePodSourceName(pod.Name, status.Name, filesrc.Name)
|
||||
filesrc.Name = helper.GenTypePodSourceName(lgc.Namespace, pod.Namespace, pod.Name, status.Name, filesrc.Name)
|
||||
|
||||
// inject default pod metadata
|
||||
if err := c.injectTypePodFields(c.config.DynamicContainerLog, filesrc, extra, pod, lgc, status.Name); err != nil {
|
||||
|
|
|
@ -122,7 +122,12 @@ func ToPipelineInterceptor(interceptorsRaw string, interceptorRef string, interc
|
|||
return interConfList, nil
|
||||
}
|
||||
|
||||
func GenTypePodSourceName(podName string, containerName string, sourceName string) string {
|
||||
func GenTypePodSourceName(lgcNamespace string, podNamespace string, podName string, containerName string, sourceName string) string {
|
||||
// if lgcNamespace is empty, we use podNamespace as the first part of the source name,
|
||||
// because this is the pod matched by clusterLogConfig, if the pod namespace is not added, it may cause the source to be duplicated
|
||||
if lgcNamespace == "" {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", podNamespace, podName, containerName, sourceName)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s/%s", podName, containerName, sourceName)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,210 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
logconfigv1beta1 "github.com/loggie-io/loggie/pkg/discovery/kubernetes/apis/loggie/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeclientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type workloadFilterInfo struct {
|
||||
// namespace list
|
||||
namespaces map[string]struct{}
|
||||
// exclude namespace list
|
||||
excludeNamespaces map[string]struct{}
|
||||
// workload name
|
||||
names map[string]struct{}
|
||||
}
|
||||
|
||||
type filterCacheChecker struct {
|
||||
namespaces map[string]string
|
||||
excludeNamespaces map[string]string
|
||||
// workload(type) => workloadFilterInfo
|
||||
workloadSelector map[string]workloadFilterInfo
|
||||
clientSet kubeclientset.Interface
|
||||
lgc *logconfigv1beta1.LogConfig
|
||||
}
|
||||
|
||||
func newFilterCacheChecker(lgc *logconfigv1beta1.LogConfig, clientSet kubeclientset.Interface) *filterCacheChecker {
|
||||
f := &filterCacheChecker{
|
||||
clientSet: clientSet,
|
||||
lgc: lgc,
|
||||
}
|
||||
if lgc.Spec.Selector == nil {
|
||||
return f
|
||||
}
|
||||
|
||||
if len(lgc.Spec.Selector.NamespaceSelector.NamespaceSelector) != 0 {
|
||||
f.namespaces = make(map[string]string)
|
||||
for _, v := range lgc.Spec.Selector.NamespaceSelector.NamespaceSelector {
|
||||
f.namespaces[v] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(lgc.Spec.Selector.NamespaceSelector.ExcludeNamespaceSelector) != 0 {
|
||||
f.excludeNamespaces = make(map[string]string)
|
||||
for _, v := range lgc.Spec.Selector.NamespaceSelector.ExcludeNamespaceSelector {
|
||||
f.excludeNamespaces[v] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(lgc.Spec.Selector.NamespaceSelector.ExcludeNamespaceSelector) != 0 {
|
||||
f.excludeNamespaces = make(map[string]string)
|
||||
for _, v := range lgc.Spec.Selector.NamespaceSelector.ExcludeNamespaceSelector {
|
||||
f.excludeNamespaces[v] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(lgc.Spec.Selector.WorkloadSelector) != 0 {
|
||||
f.workloadSelector = make(map[string]workloadFilterInfo)
|
||||
for _, v := range lgc.Spec.Selector.WorkloadSelector {
|
||||
for _, workloadType := range v.Type {
|
||||
_, ok := f.workloadSelector[workloadType]
|
||||
if !ok {
|
||||
f.workloadSelector[workloadType] = workloadFilterInfo{
|
||||
namespaces: make(map[string]struct{}),
|
||||
excludeNamespaces: make(map[string]struct{}),
|
||||
names: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.NamespaceSelector) != 0 {
|
||||
for _, namespace := range v.NamespaceSelector {
|
||||
f.workloadSelector[workloadType].namespaces[namespace] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.ExcludeNamespaceSelector) != 0 {
|
||||
for _, namespace := range v.ExcludeNamespaceSelector {
|
||||
f.workloadSelector[workloadType].excludeNamespaces[namespace] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.NameSelector) != 0 {
|
||||
for _, name := range v.NameSelector {
|
||||
f.workloadSelector[workloadType].names[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// checkNamespace Check whether the namespace is legal
|
||||
func (p *filterCacheChecker) checkNamespace(pod *corev1.Pod) bool {
|
||||
if len(p.namespaces) == 0 && len(p.excludeNamespaces) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(p.excludeNamespaces) != 0 {
|
||||
_, ok := p.excludeNamespaces[pod.GetNamespace()]
|
||||
if ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if len(p.namespaces) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
_, ok := p.namespaces[pod.GetNamespace()]
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *filterCacheChecker) checkOwner(owner metav1.OwnerReference, namespace string) (bool, error) {
|
||||
|
||||
// If workloadSelector is not selected, then all are consistent by default.
|
||||
if len(p.workloadSelector) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
kind := owner.Kind
|
||||
name := owner.Name
|
||||
if owner.Kind == "ReplicaSet" {
|
||||
rs, err := p.clientSet.AppsV1().ReplicaSets(namespace).Get(context.TODO(), owner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(rs.GetOwnerReferences()) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
deploymentOwner := rs.GetOwnerReferences()[0]
|
||||
if deploymentOwner.Kind != "Deployment" {
|
||||
return false, nil
|
||||
}
|
||||
kind = "Deployment"
|
||||
name = deploymentOwner.Name
|
||||
}
|
||||
|
||||
workloadInfo, ok := p.workloadSelector[kind]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(workloadInfo.namespaces) != 0 {
|
||||
_, ok = workloadInfo.namespaces[namespace]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(workloadInfo.excludeNamespaces) != 0 {
|
||||
_, ok = workloadInfo.excludeNamespaces[namespace]
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(workloadInfo.names) != 0 {
|
||||
_, ok = workloadInfo.names[name]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (p *filterCacheChecker) checkWorkload(pod *corev1.Pod) bool {
|
||||
owners := pod.GetOwnerReferences()
|
||||
if len(owners) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, owner := range owners {
|
||||
ret, err := p.checkOwner(owner, pod.GetNamespace())
|
||||
if err != nil {
|
||||
log.Error("check owner error:%s", err)
|
||||
return false
|
||||
}
|
||||
if !ret {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *filterCacheChecker) checkLabels(pod *corev1.Pod) bool {
|
||||
if p.lgc.Spec.Selector == nil {
|
||||
return true
|
||||
}
|
||||
if len(p.lgc.Spec.Selector.LabelSelector) != 0 {
|
||||
if LabelsSubset(p.lgc.Spec.Selector.LabelSelector, pod.Labels) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -19,14 +19,19 @@ package helper
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/loggie-io/loggie/pkg/discovery/kubernetes/runtime"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/loggie-io/loggie/pkg/discovery/kubernetes/runtime"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
criapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -63,18 +68,13 @@ func MetaNamespaceKey(namespace string, name string) string {
|
|||
|
||||
type FuncGetRelatedPod func() ([]*corev1.Pod, error)
|
||||
|
||||
func GetLogConfigRelatedPod(lgc *logconfigv1beta1.LogConfig, podsLister corev1listers.PodLister) ([]*corev1.Pod, error) {
|
||||
|
||||
sel, err := Selector(lgc.Spec.Selector.LabelSelector)
|
||||
func GetLogConfigRelatedPod(lgc *logconfigv1beta1.LogConfig, podsLister corev1listers.PodLister, clientSet kubernetes.Interface) ([]*corev1.Pod, error) {
|
||||
filter := NewPodFilter(lgc, podsLister, clientSet)
|
||||
pods, err := filter.Filter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret, err := podsLister.Pods(lgc.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "%s/%s cannot find pod by labelSelector %#v", lgc.Namespace, lgc.Name, lgc.Spec.Selector.PodSelector.LabelSelector)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func Selector(labelSelector map[string]string) (labels.Selector, error) {
|
||||
|
@ -107,7 +107,7 @@ func Selector(labelSelector map[string]string) (labels.Selector, error) {
|
|||
return selector, nil
|
||||
}
|
||||
|
||||
func GetPodRelatedLogConfigs(pod *corev1.Pod, lgcLister logconfigLister.LogConfigLister) ([]*logconfigv1beta1.LogConfig, error) {
|
||||
func GetPodRelatedLogConfigs(pod *corev1.Pod, lgcLister logconfigLister.LogConfigLister, clientSet kubernetes.Interface) ([]*logconfigv1beta1.LogConfig, error) {
|
||||
lgcList, err := lgcLister.LogConfigs(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -119,14 +119,19 @@ func GetPodRelatedLogConfigs(pod *corev1.Pod, lgcLister logconfigLister.LogConfi
|
|||
continue
|
||||
}
|
||||
|
||||
if LabelsSubset(lgc.Spec.Selector.LabelSelector, pod.Labels) {
|
||||
confirm := NewPodsConfirm(lgc, clientSet)
|
||||
result, err := confirm.Confirm(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result {
|
||||
ret = append(ret, lgc)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func GetPodRelatedClusterLogConfigs(pod *corev1.Pod, clgcLister logconfigLister.ClusterLogConfigLister) ([]*logconfigv1beta1.ClusterLogConfig, error) {
|
||||
func GetPodRelatedClusterLogConfigs(pod *corev1.Pod, clgcLister logconfigLister.ClusterLogConfigLister, clientSet kubernetes.Interface) ([]*logconfigv1beta1.ClusterLogConfig, error) {
|
||||
clgcList, err := clgcLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -134,11 +139,18 @@ func GetPodRelatedClusterLogConfigs(pod *corev1.Pod, clgcLister logconfigLister.
|
|||
|
||||
ret := make([]*logconfigv1beta1.ClusterLogConfig, 0)
|
||||
for _, lgc := range clgcList {
|
||||
if lgc.Spec.Selector == nil || lgc.Spec.Selector.Type != logconfigv1beta1.SelectorTypePod {
|
||||
if lgc.Spec.Selector.Type != logconfigv1beta1.SelectorTypeWorkload && (lgc.Spec.Selector == nil || lgc.Spec.Selector.Type != logconfigv1beta1.SelectorTypePod) {
|
||||
continue
|
||||
}
|
||||
|
||||
if LabelsSubset(lgc.Spec.Selector.LabelSelector, pod.Labels) {
|
||||
logConfig := lgc.ToLogConfig()
|
||||
confirm := NewPodsConfirm(logConfig, clientSet)
|
||||
result, err := confirm.Confirm(pod)
|
||||
if err != nil {
|
||||
log.Error("filter pod error:%s", err)
|
||||
continue
|
||||
}
|
||||
if result {
|
||||
ret = append(ret, lgc)
|
||||
}
|
||||
}
|
||||
|
@ -437,8 +449,12 @@ func nodePathByContainerPath(pathPattern string, pod *corev1.Pod, volumeName str
|
|||
return getEmptyDirNodePath(pathPattern, pod, volumeName, volumeMountPath, kubeletRootDir, subPathRes), nil
|
||||
}
|
||||
|
||||
// If pod mount pvc as log path,we need set rootFsCollectionEnabled to true, and container runtime should be docker.
|
||||
if vol.PersistentVolumeClaim != nil && rootFsCollectionEnabled && containerRuntime.Name() == runtime.RuntimeDocker {
|
||||
if vol.NFS != nil && containerRuntime.Name() == runtime.RuntimeDocker {
|
||||
return getNfsPath(pathPattern, pod, volumeName, volumeMountPath, kubeletRootDir, subPathRes), nil
|
||||
}
|
||||
|
||||
// If pod mount pvc as log path,we need set rootFsCollectionEnabled to true.
|
||||
if vol.PersistentVolumeClaim != nil && rootFsCollectionEnabled {
|
||||
return getPVNodePath(pathPattern, volumeMountPath, containerId, containerRuntime)
|
||||
}
|
||||
|
||||
|
@ -460,28 +476,98 @@ func getEmptyDirNodePath(pathPattern string, pod *corev1.Pod, volumeName string,
|
|||
return filepath.Join(emptyDirPath, subPath, pathSuffix)
|
||||
}
|
||||
|
||||
// refers to https://github.com/kubernetes/kubernetes/blob/6aac45ff1e99068e834ba3b93b673530cf62c007/pkg/volume/nfs/nfs.go#L202
|
||||
func getNfsPath(pathPattern string, pod *corev1.Pod, volumeName string, volumeMountPath string, kubeletRootDir string, subPath string) string {
|
||||
emptyDirPath := filepath.Join(kubeletRootDir, "pods", string(pod.UID), "volumes/kubernetes.io~nfs", volumeName)
|
||||
pathSuffix := strings.TrimPrefix(pathPattern, volumeMountPath)
|
||||
return filepath.Join(emptyDirPath, subPath, pathSuffix)
|
||||
}
|
||||
|
||||
// Find the actual path on the node based on pvc.
|
||||
func getPVNodePath(pathPattern string, volumeMountPath string, containerId string, containerRuntime runtime.Runtime) (string, error) {
|
||||
ctx := context.Background()
|
||||
if containerRuntime == nil {
|
||||
return "", errors.New("docker runtime is not initial")
|
||||
}
|
||||
|
||||
cli := containerRuntime.Client().(*dockerclient.Client)
|
||||
containerJson, err := cli.ContainerInspect(ctx, containerId)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("containerId: %s, docker inspect error: %s", containerId, err)
|
||||
}
|
||||
|
||||
for _, mnt := range containerJson.Mounts {
|
||||
if !PathEqual(mnt.Destination, volumeMountPath) {
|
||||
continue
|
||||
if containerRuntime.Name() == runtime.RuntimeDocker {
|
||||
cli := containerRuntime.Client().(*dockerclient.Client)
|
||||
containerJson, err := cli.ContainerInspect(ctx, containerId)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("containerId: %s, docker inspect error: %s", containerId, err)
|
||||
}
|
||||
|
||||
pathSuffix := strings.TrimPrefix(pathPattern, volumeMountPath)
|
||||
return filepath.Join(mnt.Source, pathSuffix), nil
|
||||
for _, mnt := range containerJson.Mounts {
|
||||
if !PathEqual(mnt.Destination, volumeMountPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
pathSuffix := strings.TrimPrefix(pathPattern, volumeMountPath)
|
||||
return filepath.Join(mnt.Source, pathSuffix), nil
|
||||
}
|
||||
return "", errors.New("cannot find pv volume path in node")
|
||||
} else if containerRuntime.Name() == runtime.RuntimeContainerd {
|
||||
cli := containerRuntime.Client().(criapi.RuntimeServiceClient)
|
||||
|
||||
request := &criapi.ContainerStatusRequest{
|
||||
ContainerId: containerId,
|
||||
Verbose: true,
|
||||
}
|
||||
|
||||
response, err := cli.ContainerStatus(ctx, request)
|
||||
if err != nil {
|
||||
return "", errors.WithMessagef(err, "get container(id: %s) status failed", containerId)
|
||||
}
|
||||
|
||||
infoStr, ok := response.GetInfo()["info"]
|
||||
if !ok {
|
||||
if log.IsDebugLevel() {
|
||||
info, _ := json.Marshal(response.GetInfo())
|
||||
log.Debug("get info: %s from container(id: %s)", string(info), containerId)
|
||||
}
|
||||
return "", errors.Errorf("cannot get info from container(id: %s) status", containerId)
|
||||
}
|
||||
|
||||
infoMap := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(infoStr), &infoMap); err != nil {
|
||||
return "", errors.WithMessagef(err, "get info from container(id: %s)", containerId)
|
||||
}
|
||||
configIf, ok := infoMap["config"]
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get config from container(id: %s) status", containerId)
|
||||
}
|
||||
configMap, ok := configIf.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get config map from container(id: %s) status", containerId)
|
||||
}
|
||||
mountsIf, ok := configMap["mounts"]
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get mounts from container(id: %s) status", containerId)
|
||||
}
|
||||
mountsSlice, ok := mountsIf.([]interface{})
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get mounts slice from container(id: %s) status", containerId)
|
||||
}
|
||||
for _, mntIf := range mountsSlice {
|
||||
mnt, ok := mntIf.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get mount from container(id: %s) status", containerId)
|
||||
}
|
||||
container_path, ok := mnt["container_path"].(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get container_path from container(id: %s) status", containerId)
|
||||
}
|
||||
host_path, ok := mnt["host_path"].(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("cannot get host_path from container(id: %s) status", containerId)
|
||||
}
|
||||
if !PathEqual(container_path, volumeMountPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
pathSuffix := strings.TrimPrefix(pathPattern, volumeMountPath)
|
||||
return filepath.Join(host_path, pathSuffix), nil
|
||||
}
|
||||
return "", errors.New("cannot find pv volume path in node")
|
||||
} else {
|
||||
return "", errors.New("docker or containerd runtime is not initial")
|
||||
}
|
||||
return "", errors.New("cannot find pv volume path in node")
|
||||
}
|
||||
|
||||
func GetMatchedPodLabel(labelKeys []string, pod *corev1.Pod) map[string]string {
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
logconfigv1beta1 "github.com/loggie-io/loggie/pkg/discovery/kubernetes/apis/loggie/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeclientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// PodsConfirm 检查pod是否符合logConfig的规则
|
||||
type PodsConfirm struct {
|
||||
lgc *logconfigv1beta1.LogConfig
|
||||
clientSet kubeclientset.Interface
|
||||
cache *filterCacheChecker
|
||||
}
|
||||
|
||||
func NewPodsConfirm(lgc *logconfigv1beta1.LogConfig, clientSet kubeclientset.Interface) *PodsConfirm {
|
||||
return &PodsConfirm{
|
||||
lgc: lgc,
|
||||
clientSet: clientSet,
|
||||
cache: newFilterCacheChecker(lgc, clientSet),
|
||||
}
|
||||
}
|
||||
|
||||
// Confirm Confirm whether the pod meets the lgc rules
|
||||
func (p *PodsConfirm) Confirm(pod *corev1.Pod) (bool, error) {
|
||||
if pod == nil {
|
||||
return false, errors.New("confirm pod error;pod is nil")
|
||||
}
|
||||
|
||||
if !IsPodReady(pod) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// check label
|
||||
if !p.cache.checkLabels(pod) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !p.cache.checkNamespace(pod) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !p.cache.checkWorkload(pod) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
logconfigv1beta1 "github.com/loggie-io/loggie/pkg/discovery/kubernetes/apis/loggie/v1beta1"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
kubeclientset "k8s.io/client-go/kubernetes"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
)
|
||||
|
||||
type PodsFilter struct {
|
||||
podsLister corev1listers.PodLister
|
||||
lgc *logconfigv1beta1.LogConfig
|
||||
clientSet kubeclientset.Interface
|
||||
cache *filterCacheChecker
|
||||
}
|
||||
|
||||
func NewPodFilter(lgc *logconfigv1beta1.LogConfig, podsLister corev1listers.PodLister, clientSet kubeclientset.Interface) *PodsFilter {
|
||||
p := &PodsFilter{
|
||||
lgc: lgc,
|
||||
clientSet: clientSet,
|
||||
podsLister: podsLister,
|
||||
cache: newFilterCacheChecker(lgc, clientSet),
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PodsFilter) getLabelSelector(lgc *logconfigv1beta1.LogConfig) (labels.Selector, error) {
|
||||
var matchExpressions []metav1.LabelSelectorRequirement
|
||||
for key, val := range lgc.Spec.Selector.LabelSelector {
|
||||
if val != MatchAllToken {
|
||||
continue
|
||||
}
|
||||
sel := metav1.LabelSelectorRequirement{
|
||||
Key: key,
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
}
|
||||
matchExpressions = append(matchExpressions, sel)
|
||||
}
|
||||
|
||||
for k, v := range lgc.Spec.Selector.LabelSelector {
|
||||
if v == MatchAllToken {
|
||||
delete(lgc.Spec.Selector.LabelSelector, k)
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||
MatchLabels: lgc.Spec.Selector.LabelSelector,
|
||||
MatchExpressions: matchExpressions,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "make LabelSelector error")
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// GetPodsByLabelSelector select pod by label
|
||||
func (p *PodsFilter) getPodsByLabelSelector() ([]*corev1.Pod, error) {
|
||||
// By default read all
|
||||
if p.lgc.Spec.Selector == nil || (len(p.lgc.Spec.Selector.PodSelector.LabelSelector) == 0) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{})
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "make LabelSelector error")
|
||||
}
|
||||
ret, err := p.podsLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "%s/%s cannot find pod by labelSelector %#v", p.lgc.Namespace, p.lgc.Name, p.lgc.Spec.Selector.PodSelector.LabelSelector)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Prefer labelSelector
|
||||
labelSelectors, err := p.getLabelSelector(p.lgc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret, err := p.podsLister.List(labelSelectors)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "%s/%s cannot find pod by labelSelector %#v", p.lgc.Namespace, p.lgc.Name, p.lgc.Spec.Selector.PodSelector.LabelSelector)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Filter Filter pods
|
||||
func (p *PodsFilter) Filter() ([]*corev1.Pod, error) {
|
||||
pods, err := p.getPodsByLabelSelector()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(p.cache.namespaces) == 0 && len(p.cache.excludeNamespaces) == 0 && len(p.cache.workloadSelector) == 0 {
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
result := make([]*corev1.Pod, 0)
|
||||
|
||||
for _, pod := range pods {
|
||||
|
||||
if !IsPodReady(pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !p.cache.checkNamespace(pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !p.cache.checkWorkload(pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, pod)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -18,12 +18,12 @@ package runtime
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
logconfigv1beta1 "github.com/loggie-io/loggie/pkg/discovery/kubernetes/apis/loggie/v1beta1"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
criapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
criapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"path"
|
||||
)
|
||||
|
||||
|
@ -115,13 +115,11 @@ func (c *ContainerD) GetRootfsPath(ctx context.Context, containerId string, cont
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if runtime == string(RuncRuntimeType) {
|
||||
} else {
|
||||
prefix, err = c.getRuncRuntimeRootfsPath(infoMap, containerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, errors.Errorf("Unknown runtime type from container(id: %s) status", containerId)
|
||||
}
|
||||
|
||||
var rootfsPaths []string
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
criapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
criapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
|
|
|
@ -18,7 +18,7 @@ package alertmanager
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package logger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"time"
|
||||
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package filesource
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package filewatcher
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package normalize
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/eventbus/export/logger"
|
||||
promeExporter "github.com/loggie-io/loggie/pkg/eventbus/export/prometheus"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"time"
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package queue
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package reload
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"time"
|
||||
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package sink
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
|
@ -17,8 +17,9 @@ limitations under the License.
|
|||
package sys
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -48,8 +49,9 @@ func makeListener() eventbus.Listener {
|
|||
}
|
||||
|
||||
type sysData struct {
|
||||
MemoryRss uint64 `json:"memRss"`
|
||||
CPUPercent float64 `json:"cpuPercent"`
|
||||
MemoryRss uint64 `json:"-"`
|
||||
MemoryRssHumanize string `json:"memRss"`
|
||||
CPUPercent float64 `json:"cpuPercent"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
|
@ -122,6 +124,7 @@ func (l *Listener) getSysStat() error {
|
|||
return err
|
||||
}
|
||||
l.data.MemoryRss = mem.RSS
|
||||
l.data.MemoryRssHumanize = humanize.Bytes(mem.RSS)
|
||||
|
||||
cpuPer, err := l.proc.Percent(1 * time.Second)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
//go:build !include_core
|
||||
|
||||
/*
|
||||
Copyright 2021 Loggie Authors
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
|
@ -0,0 +1,62 @@
|
|||
//go:build include_core
|
||||
|
||||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package include
|
||||
|
||||
import (
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/export/prometheus"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/filesource"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/filewatcher"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/info"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/logalerting"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/pipeline"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/queue"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/reload"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/sink"
|
||||
_ "github.com/loggie-io/loggie/pkg/eventbus/listener/sys"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/addhostmeta"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/addk8smeta"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/limit"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/logalert"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/logalert/condition"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/maxbytes"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/metric"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/retry"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/schema"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/transformer"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/transformer/action"
|
||||
_ "github.com/loggie-io/loggie/pkg/interceptor/transformer/condition"
|
||||
_ "github.com/loggie-io/loggie/pkg/queue/channel"
|
||||
_ "github.com/loggie-io/loggie/pkg/queue/memory"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/alertwebhook"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/codec/json"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/codec/raw"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/dev"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/elasticsearch"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/file"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/franz"
|
||||
_ "github.com/loggie-io/loggie/pkg/sink/kafka"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/codec/json"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/codec/regex"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/dev"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/elasticsearch"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/file"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/file/process"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/franz"
|
||||
_ "github.com/loggie-io/loggie/pkg/source/kafka"
|
||||
)
|
|
@ -18,9 +18,9 @@ package json_decode
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"strings"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
|
@ -35,10 +35,6 @@ func init() {
|
|||
pipeline.Register(api.INTERCEPTOR, Type, makeInterceptor)
|
||||
}
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
)
|
||||
|
||||
func makeInterceptor(info pipeline.Info) api.Component {
|
||||
return &Interceptor{
|
||||
config: &Config{},
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package normalize
|
||||
|
||||
import (
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
|
@ -17,18 +17,14 @@ limitations under the License.
|
|||
package normalize
|
||||
|
||||
import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/runtime"
|
||||
)
|
||||
|
||||
const ProcessorJsonDecode = "jsonDecode"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
)
|
||||
|
||||
type JsonDecodeProcessor struct {
|
||||
config *JsonDecodeConfig
|
||||
interceptor *Interceptor
|
||||
|
|
|
@ -144,6 +144,7 @@ func (i *Interceptor) Intercept(invoker sink.Invoker, invocation sink.Invocation
|
|||
rm := i.retryMeta(batch)
|
||||
retryMaxCount := i.config.RetryMaxCount
|
||||
if rm != nil && retryMaxCount > 0 && retryMaxCount < rm.count {
|
||||
i.signChan <- Reset
|
||||
return result.DropWith(errors.New(fmt.Sprintf("retry reaches the limit: retryMaxCount(%d)", retryMaxCount)))
|
||||
}
|
||||
i.in <- batch
|
||||
|
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package action
|
||||
|
||||
import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/cfg"
|
||||
"github.com/loggie-io/loggie/pkg/util/eventops"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -29,10 +29,6 @@ const (
|
|||
JsonDecodeUsageMsg = "usage: jsonDecode(key) or jsonDecode(key, to)"
|
||||
)
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterAction(JsonDecodeName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewJsonDecode(args)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/loggie-io/loggie/pkg/core/cfg"
|
||||
"github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/util/eventops"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/runtime"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/loggie-io/loggie/pkg/core/cfg"
|
||||
eventer "github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package action
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/cfg"
|
||||
"github.com/loggie-io/loggie/pkg/util/eventops"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
ReplaceName = "replace"
|
||||
ReplaceUsageMsg = "usage: replace(key)"
|
||||
|
||||
ReplaceRegexName = "replaceRegex"
|
||||
ReplaceRegexUsageMsg = "usage: replaceRegex(key)"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterAction(ReplaceName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewReplace(args, extra)
|
||||
})
|
||||
|
||||
RegisterAction(ReplaceRegexName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewReplaceRegex(args, extra)
|
||||
})
|
||||
}
|
||||
|
||||
type Replace struct {
|
||||
key string
|
||||
extra *ReplaceExtra
|
||||
}
|
||||
|
||||
type ReplaceExtra struct {
|
||||
Old string `yaml:"old,omitempty" validate:"required"`
|
||||
New string `yaml:"new,omitempty" validate:"required"`
|
||||
Max int `yaml:"max,omitempty" default:"-1"`
|
||||
}
|
||||
|
||||
func NewReplace(args []string, extra cfg.CommonCfg) (*Replace, error) {
|
||||
aCount := len(args)
|
||||
if aCount != 1 {
|
||||
return nil, errors.Errorf("invalid args, %s", ReplaceRegexUsageMsg)
|
||||
}
|
||||
|
||||
extraCfg := &ReplaceExtra{}
|
||||
if err := cfg.UnpackFromCommonCfg(extra, extraCfg).Validate().Defaults().Do(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extraCfg.Max == 0 {
|
||||
extraCfg.Max = -1
|
||||
}
|
||||
|
||||
return &Replace{
|
||||
key: args[0],
|
||||
extra: extraCfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Replace) act(e api.Event) error {
|
||||
val := eventops.GetString(e, r.key)
|
||||
replaceResult := strings.Replace(val, r.extra.Old, r.extra.New, r.extra.Max)
|
||||
eventops.Set(e, r.key, replaceResult)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ReplaceRegex struct {
|
||||
key string
|
||||
reg *regexp.Regexp
|
||||
extra *ReplaceRegexExtra
|
||||
}
|
||||
|
||||
type ReplaceRegexExtra struct {
|
||||
Expression string `yaml:"expression,omitempty" validate:"required"`
|
||||
Replace string `yaml:"replace,omitempty" validate:"required"`
|
||||
}
|
||||
|
||||
func NewReplaceRegex(args []string, extra cfg.CommonCfg) (*ReplaceRegex, error) {
|
||||
aCount := len(args)
|
||||
if aCount != 1 {
|
||||
return nil, errors.Errorf("invalid args, %s", ReplaceUsageMsg)
|
||||
}
|
||||
|
||||
extraCfg := &ReplaceRegexExtra{}
|
||||
if err := cfg.UnpackFromCommonCfg(extra, extraCfg).Validate().Defaults().Do(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
expr, err := extraCfg.compile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ReplaceRegex{
|
||||
key: args[0],
|
||||
reg: expr,
|
||||
extra: extraCfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *ReplaceRegex) act(e api.Event) error {
|
||||
val := eventops.GetString(e, r.key)
|
||||
match := r.reg.ReplaceAllString(val, r.extra.Replace)
|
||||
eventops.Set(e, r.key, match)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReplaceRegexExtra) compile() (*regexp.Regexp, error) {
|
||||
if r.Expression == "" {
|
||||
return nil, errors.New("regex expression is required")
|
||||
}
|
||||
|
||||
expr, err := regexp.Compile(r.Expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return expr, nil
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package action
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/cfg"
|
||||
"github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReplace_act(t *testing.T) {
|
||||
log.InitDefaultLogger()
|
||||
type fields struct {
|
||||
key string
|
||||
extra cfg.CommonCfg
|
||||
}
|
||||
type args struct {
|
||||
e api.Event
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want api.Event
|
||||
}{
|
||||
{
|
||||
name: "replace body",
|
||||
fields: fields{
|
||||
key: "body",
|
||||
extra: cfg.CommonCfg{
|
||||
"old": "c",
|
||||
"new": "C",
|
||||
"max": 3,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{}, []byte(`2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cd375289334`)),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"body": "2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54Ce5d87-b94C-C40a-74a7-9cd375289334",
|
||||
}, []byte(`2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cd375289334`)),
|
||||
},
|
||||
{
|
||||
name: "replace all",
|
||||
fields: fields{
|
||||
key: "body",
|
||||
extra: cfg.CommonCfg{
|
||||
"old": "3",
|
||||
"new": "W",
|
||||
"max": -1,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{}, []byte(`2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cd375289334`)),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"body": "202W/09/05 12:W2:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cdW75289WW4",
|
||||
}, []byte(`2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cd375289334`)),
|
||||
},
|
||||
{
|
||||
name: "replace all with Max default -1",
|
||||
fields: fields{
|
||||
key: "body",
|
||||
extra: cfg.CommonCfg{
|
||||
"old": "3",
|
||||
"new": "W",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{}, []byte(`2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cd375289334`)),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"body": "202W/09/05 12:W2:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cdW75289WW4",
|
||||
}, []byte(`2023/09/05 12:32:01 error zap.go:66: 192.168.0.1 54ce5d87-b94c-c40a-74a7-9cd375289334`)),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r, _ := NewReplace([]string{tt.fields.key}, tt.fields.extra)
|
||||
err := r.act(tt.args.e)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.want, tt.args.e)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceRegexAct(t *testing.T) {
|
||||
log.InitDefaultLogger()
|
||||
type fields struct {
|
||||
key string
|
||||
extra cfg.CommonCfg
|
||||
}
|
||||
type args struct {
|
||||
e api.Event
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want api.Event
|
||||
}{
|
||||
{
|
||||
name: "replaceRegex body",
|
||||
fields: fields{
|
||||
key: "body",
|
||||
extra: cfg.CommonCfg{
|
||||
"replace": "ERROR",
|
||||
"expression": "E(\\S+)",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{}, []byte(`E0906 02:09:23.872499 1 controller.go:114] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable`)),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"body": "ERROR 02:09:23.872499 1 controller.go:114] loading OpenAPI spec for \"v1beta1.metrics.k8s.io\" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable",
|
||||
}, []byte(`E0906 02:09:23.872499 1 controller.go:114] loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503, Body: service unavailable`)),
|
||||
},
|
||||
{
|
||||
name: "replaceRegex body Password",
|
||||
fields: fields{
|
||||
key: "body",
|
||||
extra: cfg.CommonCfg{
|
||||
"replace": "******",
|
||||
"expression": "password (\\S+)",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{}, []byte(`2023-09-06T11:12:00.000000001Z stderr P i'm a log message who has sensitive information with password xyz!`)),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"body": "2023-09-06T11:12:00.000000001Z stderr P i'm a log message who has sensitive information with ******",
|
||||
}, []byte(`2023-09-06T11:12:00.000000001Z stderr P i'm a log message who has sensitive information with password xyz!`)),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r, _ := NewReplaceRegex([]string{tt.fields.key}, tt.fields.extra)
|
||||
err := r.act(tt.args.e)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.want, tt.args.e)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
|
@ -26,12 +26,30 @@ import (
|
|||
const (
|
||||
SetName = "set"
|
||||
SetUsageMsg = "usage: set(key, value)"
|
||||
|
||||
SetFloatName = "setFloat"
|
||||
SetFloatMsg = "usage: setFloat(key, value)"
|
||||
|
||||
SetIntName = "setInt"
|
||||
SetIntMsg = "usage: setInt(key, value)"
|
||||
|
||||
SetBoolName = "setBool"
|
||||
SetBoolMsg = "usage: setBool(key, value)"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterAction(SetName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewSet(args)
|
||||
})
|
||||
RegisterAction(SetFloatName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewStrConvSet(args, "float", SetFloatMsg)
|
||||
})
|
||||
RegisterAction(SetIntName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewStrConvSet(args, "int", SetIntMsg)
|
||||
})
|
||||
RegisterAction(SetBoolName, func(args []string, extra cfg.CommonCfg) (Action, error) {
|
||||
return NewStrConvSet(args, "bool", SetBoolMsg)
|
||||
})
|
||||
}
|
||||
|
||||
// Set is same with Add
|
||||
|
@ -55,3 +73,31 @@ func (a *Set) act(e api.Event) error {
|
|||
eventops.Set(e, a.key, a.value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type StrConvSet struct {
|
||||
key string
|
||||
value string
|
||||
dstType string
|
||||
}
|
||||
|
||||
func NewStrConvSet(args []string, destType, msg string) (*StrConvSet, error) {
|
||||
if len(args) != 2 {
|
||||
return nil, errors.Errorf("invalid args, %s", msg)
|
||||
}
|
||||
|
||||
return &StrConvSet{
|
||||
key: args[0],
|
||||
value: args[1],
|
||||
dstType: destType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *StrConvSet) act(e api.Event) error {
|
||||
dstVal, err := convert(a.value, a.dstType)
|
||||
if err != nil {
|
||||
return errors.Errorf("convert field %s value %v to type %s error: %v", a.key, a.value, a.dstType, err)
|
||||
}
|
||||
|
||||
eventops.Set(e, a.key, dstVal)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package action
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStrConvertSet_Act(t *testing.T) {
|
||||
assertions := assert.New(t)
|
||||
|
||||
type fields struct {
|
||||
key string
|
||||
value string
|
||||
dstType string
|
||||
}
|
||||
type args struct {
|
||||
e api.Event
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want api.Event
|
||||
}{
|
||||
{
|
||||
name: "set bool",
|
||||
fields: fields{
|
||||
key: "a.b",
|
||||
value: "true",
|
||||
dstType: typeBoolean,
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{
|
||||
"a": map[string]interface{}{},
|
||||
}, []byte("this is body")),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"a": map[string]interface{}{
|
||||
"b": true,
|
||||
},
|
||||
}, []byte("this is body")),
|
||||
},
|
||||
{
|
||||
name: "set int",
|
||||
fields: fields{
|
||||
key: "a.b",
|
||||
value: "200",
|
||||
dstType: typeInteger,
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{
|
||||
"a": map[string]interface{}{},
|
||||
}, []byte("this is body")),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"a": map[string]interface{}{
|
||||
"b": int64(200),
|
||||
},
|
||||
}, []byte("this is body")),
|
||||
},
|
||||
{
|
||||
name: "set float",
|
||||
fields: fields{
|
||||
key: "a.b",
|
||||
value: "200",
|
||||
dstType: typeFloat,
|
||||
},
|
||||
args: args{
|
||||
e: event.NewEvent(map[string]interface{}{
|
||||
"a": map[string]interface{}{},
|
||||
}, []byte("this is body")),
|
||||
},
|
||||
want: event.NewEvent(map[string]interface{}{
|
||||
"a": map[string]interface{}{
|
||||
"b": float64(200),
|
||||
},
|
||||
}, []byte("this is body")),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &StrConvSet{
|
||||
key: tt.fields.key,
|
||||
value: tt.fields.value,
|
||||
dstType: tt.fields.dstType,
|
||||
}
|
||||
err := s.act(tt.args.e)
|
||||
assertions.NoError(err)
|
||||
assertions.Equal(tt.want, tt.args.e)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -51,5 +51,10 @@ func NewEqual(args []string) (*Equal, error) {
|
|||
}
|
||||
|
||||
func (eq *Equal) Check(e api.Event) bool {
|
||||
return eq.value == eventops.Get(e, eq.field)
|
||||
value := eventops.Get(e, eq.field)
|
||||
if byteValue, ok := value.([]byte); ok {
|
||||
value = string(byteValue)
|
||||
}
|
||||
|
||||
return eq.value == value
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/loggie-io/loggie/pkg/util/eventops"
|
||||
"github.com/pkg/errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -41,10 +42,26 @@ func init() {
|
|||
}
|
||||
|
||||
func NewMatch(args []string) (*Match, error) {
|
||||
if len(args) != 2 {
|
||||
if len(args) < 2 {
|
||||
return nil, errors.Errorf("invalid args, %s", MatchUsageMsg)
|
||||
}
|
||||
|
||||
if len(args) > 2 {
|
||||
key := args[0]
|
||||
|
||||
var matchValue strings.Builder
|
||||
for i := 1; i < len(args); i++ {
|
||||
matchValue.WriteString(args[i])
|
||||
if i != len(args)-1 {
|
||||
matchValue.WriteString(",")
|
||||
}
|
||||
}
|
||||
args = []string{
|
||||
key,
|
||||
matchValue.String(),
|
||||
}
|
||||
}
|
||||
|
||||
regex, err := regexp.Compile(args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -19,13 +19,13 @@ package loggie
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/ops/dashboard/content"
|
||||
"github.com/loggie-io/loggie/pkg/ops/dashboard/gui"
|
||||
"github.com/loggie-io/loggie/pkg/ops/helper"
|
||||
"github.com/loggie-io/loggie/pkg/util"
|
||||
jsoniter "github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/rivo/tview"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -91,7 +91,7 @@ func (p *PipelineDetailPanel) SetData() {
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
out, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
p.SetText(err.Error()).SetTextColor(tcell.ColorRed)
|
||||
return
|
||||
|
@ -234,7 +234,7 @@ func (p *LogStatusPanel) SetData() {
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
out, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -174,6 +174,9 @@ func diffPipes(request *http.Request) string {
|
|||
|
||||
func queryPipelineConfig(cfgInPath *control.PipelineConfig, pipelineQuery string, sourceQuery string) map[string]pipeline.Config {
|
||||
result := make(map[string]pipeline.Config)
|
||||
if cfgInPath == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
setResult := func(pipData pipeline.Config, srcData ...*source.Config) {
|
||||
pip, ok := result[pipData.Name]
|
||||
|
@ -354,7 +357,7 @@ func fileInfoMetrics(request *http.Request) (active int, inActive int, metric ma
|
|||
}
|
||||
|
||||
func CRLF() string {
|
||||
return fmt.Sprintf("\n")
|
||||
return "\n"
|
||||
}
|
||||
|
||||
func SprintfWithLF(format string, a ...interface{}) string {
|
||||
|
|
|
@ -18,7 +18,7 @@ package helper
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
|
@ -158,7 +158,7 @@ func (h *Helper) helperLogCollectionHandler(writer http.ResponseWriter, request
|
|||
}
|
||||
|
||||
func resultReturn(writer http.ResponseWriter, result interface{}) {
|
||||
out, err := jsoniter.Marshal(result)
|
||||
out, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
writer.Write([]byte(err.Error()))
|
||||
|
|
|
@ -18,7 +18,7 @@ package pipeline
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
timeutil "github.com/loggie-io/loggie/pkg/util/time"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -46,10 +46,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
FieldsUnderRoot = event.PrivateKeyPrefix + "FieldsUnderRoot"
|
||||
FieldsUnderKey = event.PrivateKeyPrefix + "FieldsUnderKey"
|
||||
|
||||
fieldsFromPathMaxBytes = 1024
|
||||
|
||||
defaultTsLayout = "2006-01-02T15:04:05.000Z"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -1032,7 +1031,7 @@ func (p *Pipeline) initFieldsFromPath(fieldsFromPath map[string]string) {
|
|||
}
|
||||
|
||||
for k, pathKey := range fieldsFromPath {
|
||||
out, err := ioutil.ReadFile(pathKey)
|
||||
out, err := os.ReadFile(pathKey)
|
||||
if err != nil {
|
||||
log.Error("init fieldsFromPath %s failed, read file %s err: %v", k, pathKey, err)
|
||||
continue
|
||||
|
@ -1054,11 +1053,10 @@ func (p *Pipeline) initFieldsFromPath(fieldsFromPath map[string]string) {
|
|||
|
||||
func (p *Pipeline) fillEventMetaAndHeader(e api.Event, config source.Config) {
|
||||
// add meta fields
|
||||
e.Meta().Set(event.SystemProductTimeKey, time.Now())
|
||||
now := time.Now()
|
||||
e.Meta().Set(event.SystemProductTimeKey, now)
|
||||
e.Meta().Set(event.SystemPipelineKey, p.name)
|
||||
e.Meta().Set(event.SystemSourceKey, config.Name)
|
||||
e.Meta().Set(FieldsUnderRoot, config.FieldsUnderRoot)
|
||||
e.Meta().Set(FieldsUnderKey, config.FieldsUnderKey)
|
||||
|
||||
header := e.Header()
|
||||
if header == nil {
|
||||
|
@ -1073,6 +1071,28 @@ func (p *Pipeline) fillEventMetaAndHeader(e api.Event, config source.Config) {
|
|||
|
||||
// add header source fields from file
|
||||
AddSourceFields(header, p.pathMap, config.FieldsUnderRoot, config.FieldsUnderKey)
|
||||
|
||||
// remap timestamp
|
||||
if config.TimestampKey != "" {
|
||||
layout := config.TimestampLayout
|
||||
if layout == "" {
|
||||
layout = defaultTsLayout
|
||||
}
|
||||
|
||||
// conf.Location could be "" or "UTC" or "Local"
|
||||
// default "" indicate "UTC"
|
||||
ts, err := timeutil.Format(now, config.TimestampLocation, layout)
|
||||
if err != nil {
|
||||
log.Warn("time format system product timestamp err: %+v", err)
|
||||
return
|
||||
}
|
||||
header[config.TimestampKey] = ts
|
||||
}
|
||||
|
||||
if config.BodyKey != "" {
|
||||
header[config.BodyKey] = util.ByteToStringUnsafe(e.Body())
|
||||
e.Fill(e.Meta(), header, []byte{})
|
||||
}
|
||||
}
|
||||
|
||||
func AddSourceFields(header map[string]interface{}, fields map[string]interface{}, underRoot bool, fieldsKey string) {
|
||||
|
|
|
@ -18,8 +18,8 @@ package alertwebhook
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
|
@ -18,16 +18,13 @@ package json
|
|||
|
||||
import (
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/util"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
eventer "github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/sink/codec"
|
||||
)
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
)
|
||||
|
||||
type Json struct {
|
||||
|
@ -73,7 +70,7 @@ func (j *Json) Encode(e api.Event) ([]byte, error) {
|
|||
beatsFormat(e)
|
||||
} else if len(e.Body()) != 0 {
|
||||
// put body in header
|
||||
header[eventer.Body] = string(e.Body())
|
||||
header[eventer.Body] = util.ByteToStringUnsafe(e.Body())
|
||||
}
|
||||
|
||||
var result []byte
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package elasticsearch
|
||||
|
||||
// BulkIndexerResponse represents the Elasticsearch response.
|
||||
//
|
||||
type BulkIndexerResponse struct {
|
||||
Took int `json:"took"`
|
||||
HasErrors bool `json:"errors"`
|
||||
|
@ -25,7 +24,6 @@ type BulkIndexerResponse struct {
|
|||
}
|
||||
|
||||
// BulkIndexerResponseItem represents the Elasticsearch response item.
|
||||
//
|
||||
type BulkIndexerResponseItem struct {
|
||||
Index string `json:"_index"`
|
||||
DocumentID string `json:"_id"`
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
es "github.com/elastic/go-elasticsearch/v7"
|
||||
"github.com/elastic/go-elasticsearch/v7/esapi"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
eventer "github.com/loggie-io/loggie/pkg/core/event"
|
||||
|
@ -30,7 +29,7 @@ import (
|
|||
"github.com/loggie-io/loggie/pkg/util/pattern"
|
||||
"github.com/loggie-io/loggie/pkg/util/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -44,8 +43,6 @@ type ClientSet struct {
|
|||
cli *es.Client
|
||||
opType string
|
||||
|
||||
buf *bytes.Buffer
|
||||
aux []byte
|
||||
reqCount int
|
||||
|
||||
codec codec.Codec
|
||||
|
@ -54,6 +51,70 @@ type ClientSet struct {
|
|||
documentIdPattern *pattern.Pattern
|
||||
}
|
||||
|
||||
type bulkRequest struct {
|
||||
lines []line
|
||||
}
|
||||
|
||||
type line struct {
|
||||
meta []byte
|
||||
body []byte
|
||||
}
|
||||
|
||||
func (b *bulkRequest) body() []byte {
|
||||
var buf bytes.Buffer
|
||||
size := 0
|
||||
for _, l := range b.lines {
|
||||
size += len(l.meta) + len(l.body) + 1
|
||||
}
|
||||
buf.Grow(size)
|
||||
|
||||
for _, l := range b.lines {
|
||||
buf.Write(l.meta)
|
||||
buf.Write(l.body)
|
||||
buf.WriteRune('\n')
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (b *bulkRequest) add(body []byte, action string, documentID string, index string) {
|
||||
if len(body) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var aux []byte
|
||||
|
||||
// { "index" : { "_index" : "test", "_id" : "1" } }
|
||||
buf.WriteRune('{')
|
||||
aux = strconv.AppendQuote(aux, action)
|
||||
buf.Write(aux)
|
||||
aux = aux[:0]
|
||||
buf.WriteRune(':')
|
||||
buf.WriteRune('{')
|
||||
if documentID != "" {
|
||||
buf.WriteString(`"_id":`)
|
||||
aux = strconv.AppendQuote(aux, documentID)
|
||||
buf.Write(aux)
|
||||
aux = aux[:0]
|
||||
}
|
||||
|
||||
if index != "" {
|
||||
buf.WriteString(`"_index":`)
|
||||
aux = strconv.AppendQuote(aux, index)
|
||||
buf.Write(aux)
|
||||
}
|
||||
buf.WriteRune('}')
|
||||
buf.WriteRune('}')
|
||||
buf.WriteRune('\n')
|
||||
|
||||
l := line{
|
||||
meta: buf.Bytes(),
|
||||
body: body,
|
||||
}
|
||||
|
||||
b.lines = append(b.lines, l)
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
Bulk(ctx context.Context, batch api.Batch) error
|
||||
Stop()
|
||||
|
@ -68,7 +129,7 @@ func NewClient(config *Config, cod codec.Codec, indexPattern *pattern.Pattern, d
|
|||
}
|
||||
var ca []byte
|
||||
if config.CACertPath != "" {
|
||||
caData, err := ioutil.ReadFile(config.CACertPath)
|
||||
caData, err := os.ReadFile(config.CACertPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -76,14 +137,16 @@ func NewClient(config *Config, cod codec.Codec, indexPattern *pattern.Pattern, d
|
|||
}
|
||||
|
||||
cfg := es.Config{
|
||||
Addresses: config.Hosts,
|
||||
DisableRetry: true,
|
||||
Username: config.UserName,
|
||||
Password: config.Password,
|
||||
APIKey: config.APIKey,
|
||||
ServiceToken: config.ServiceToken,
|
||||
CompressRequestBody: config.Compress,
|
||||
CACert: ca,
|
||||
Addresses: config.Hosts,
|
||||
DisableRetry: true,
|
||||
Username: config.UserName,
|
||||
Password: config.Password,
|
||||
APIKey: config.APIKey,
|
||||
ServiceToken: config.ServiceToken,
|
||||
CompressRequestBody: config.Compress,
|
||||
DiscoverNodesOnStart: config.DiscoverNodesOnStart,
|
||||
DiscoverNodesInterval: config.DiscoverNodesInterval,
|
||||
CACert: ca,
|
||||
}
|
||||
cli, err := es.NewClient(cfg)
|
||||
if err != nil {
|
||||
|
@ -94,8 +157,6 @@ func NewClient(config *Config, cod codec.Codec, indexPattern *pattern.Pattern, d
|
|||
config: config,
|
||||
cli: cli,
|
||||
opType: config.OpType,
|
||||
buf: bytes.NewBuffer(make([]byte, 0, config.SendBuffer)),
|
||||
aux: make([]byte, 0, 512),
|
||||
reqCount: 0,
|
||||
codec: cod,
|
||||
indexPattern: indexPattern,
|
||||
|
@ -109,16 +170,11 @@ func (c *ClientSet) Bulk(ctx context.Context, batch api.Batch) error {
|
|||
return errors.WithMessagef(eventer.ErrorDropEvent, "request to elasticsearch bulk is null")
|
||||
}
|
||||
|
||||
bulkReq := esapi.BulkRequest{}
|
||||
|
||||
if c.config.Etype != "" {
|
||||
bulkReq.DocumentType = c.config.Etype
|
||||
}
|
||||
defer func() {
|
||||
c.buf.Reset()
|
||||
c.reqCount = 0
|
||||
}()
|
||||
|
||||
req := bulkRequest{}
|
||||
for _, event := range batch.Events() {
|
||||
headerObj := runtime.NewObject(event.Header())
|
||||
|
||||
|
@ -160,19 +216,14 @@ func (c *ClientSet) Bulk(ctx context.Context, batch api.Batch) error {
|
|||
}
|
||||
|
||||
c.reqCount++
|
||||
if err := c.writeMeta(c.opType, docId, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.writeBody(data); err != nil {
|
||||
return err
|
||||
}
|
||||
req.add(data, c.opType, docId, idx)
|
||||
}
|
||||
|
||||
if c.reqCount == 0 {
|
||||
return errors.WithMessagef(eventer.ErrorDropEvent, "request to elasticsearch bulk is null")
|
||||
}
|
||||
|
||||
resp, err := c.cli.Bulk(bytes.NewReader(c.buf.Bytes()),
|
||||
resp, err := c.cli.Bulk(bytes.NewReader(req.body()),
|
||||
c.cli.Bulk.WithDocumentType(c.config.Etype),
|
||||
c.cli.Bulk.WithParameters(c.config.Params),
|
||||
c.cli.Bulk.WithHeader(c.config.Headers))
|
||||
|
@ -213,39 +264,3 @@ func (c *ClientSet) Bulk(ctx context.Context, batch api.Batch) error {
|
|||
func (c *ClientSet) Stop() {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
// { "index" : { "_index" : "test", "_id" : "1" } }
|
||||
func (c *ClientSet) writeMeta(action string, documentID string, index string) error {
|
||||
c.buf.WriteRune('{')
|
||||
c.aux = strconv.AppendQuote(c.aux, action)
|
||||
c.buf.Write(c.aux)
|
||||
c.aux = c.aux[:0]
|
||||
c.buf.WriteRune(':')
|
||||
c.buf.WriteRune('{')
|
||||
if documentID != "" {
|
||||
c.buf.WriteString(`"_id":`)
|
||||
c.aux = strconv.AppendQuote(c.aux, documentID)
|
||||
c.buf.Write(c.aux)
|
||||
c.aux = c.aux[:0]
|
||||
}
|
||||
|
||||
if index != "" {
|
||||
c.buf.WriteString(`"_index":`)
|
||||
c.aux = strconv.AppendQuote(c.aux, index)
|
||||
c.buf.Write(c.aux)
|
||||
c.aux = c.aux[:0]
|
||||
}
|
||||
c.buf.WriteRune('}')
|
||||
c.buf.WriteRune('}')
|
||||
c.buf.WriteRune('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClientSet) writeBody(body []byte) error {
|
||||
if len(body) == 0 {
|
||||
return nil
|
||||
}
|
||||
c.buf.Write(body)
|
||||
c.buf.WriteRune('\n')
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16,33 +16,29 @@ limitations under the License.
|
|||
|
||||
package elasticsearch
|
||||
|
||||
import "github.com/loggie-io/loggie/pkg/util/pattern"
|
||||
import (
|
||||
"github.com/loggie-io/loggie/pkg/util/pattern"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Hosts []string `yaml:"hosts,omitempty" validate:"required"`
|
||||
UserName string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Index string `yaml:"index,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
Params map[string]string `yaml:"parameters,omitempty"`
|
||||
IfRenderIndexFailed RenderIndexFail `yaml:"ifRenderIndexFailed,omitempty"`
|
||||
Etype string `yaml:"etype,omitempty"` // elasticsearch type, for v5.* backward compatibility
|
||||
DocumentId string `yaml:"documentId,omitempty"`
|
||||
Sniff *bool `yaml:"sniff,omitempty"` // deprecated
|
||||
APIKey string `yaml:"apiKey,omitempty"`
|
||||
ServiceToken string `yaml:"serviceToken,omitempty"`
|
||||
CACertPath string `yaml:"caCertPath,omitempty"`
|
||||
Compress bool `yaml:"compress,omitempty"`
|
||||
Gzip *bool `yaml:"gzip,omitempty"` // deprecated, use compress above
|
||||
OpType string `yaml:"opType,omitempty" default:"index"`
|
||||
|
||||
SendBuffer int `yaml:"sendBufferBytes,omitempty" default:"131072" validate:"gte=0"`
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
CAFile string `yaml:"caFile,omitempty"`
|
||||
CertFile string `yaml:"certFile,omitempty"`
|
||||
KeyFile string `yaml:"keyFile,omitempty"`
|
||||
Hosts []string `yaml:"hosts,omitempty" validate:"required"`
|
||||
UserName string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Index string `yaml:"index,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
Params map[string]string `yaml:"parameters,omitempty"`
|
||||
IfRenderIndexFailed RenderIndexFail `yaml:"ifRenderIndexFailed,omitempty"`
|
||||
Etype string `yaml:"etype,omitempty"` // elasticsearch type, for v5.* backward compatibility
|
||||
DocumentId string `yaml:"documentId,omitempty"`
|
||||
APIKey string `yaml:"apiKey,omitempty"`
|
||||
ServiceToken string `yaml:"serviceToken,omitempty"`
|
||||
CACertPath string `yaml:"caCertPath,omitempty"`
|
||||
Compress bool `yaml:"compress,omitempty"`
|
||||
Gzip *bool `yaml:"gzip,omitempty"` // deprecated, use compress above
|
||||
OpType string `yaml:"opType,omitempty" default:"index"`
|
||||
DiscoverNodesOnStart bool `yaml:"discoverNodesOnStart,omitempty"`
|
||||
DiscoverNodesInterval time.Duration `yaml:"discoverNodesInterval,omitempty"`
|
||||
}
|
||||
|
||||
type RenderIndexFail struct {
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package franz
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/twmb/franz-go/pkg/kgo"
|
||||
"strings"
|
||||
|
|
|
@ -99,6 +99,7 @@ func (s *Sink) Start() error {
|
|||
kgo.SeedBrokers(c.Brokers...),
|
||||
kgo.ProducerBatchCompression(getCompression(c.Compression)),
|
||||
kgo.WithLogger(&logger),
|
||||
kgo.AllowAutoTopicCreation(),
|
||||
}
|
||||
|
||||
if c.BatchSize > 0 {
|
||||
|
@ -222,7 +223,7 @@ func (s *Sink) Consume(batch api.Batch) api.Result {
|
|||
}
|
||||
|
||||
func (s *Sink) selectTopic(e api.Event) (string, error) {
|
||||
return s.topicPattern.WithObject(runtime.NewObject(e.Header())).Render()
|
||||
return s.topicPattern.WithObject(runtime.NewObject(e.Header())).RenderWithStrict()
|
||||
}
|
||||
|
||||
func (s *Sink) getPartitionKey(e api.Event) (string, error) {
|
||||
|
|
|
@ -23,12 +23,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/core/result"
|
||||
"github.com/loggie-io/loggie/pkg/pipeline"
|
||||
pb "github.com/loggie-io/loggie/pkg/sink/grpc/pb"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/resolver"
|
||||
|
@ -36,10 +36,6 @@ import (
|
|||
|
||||
const Type = "grpc"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
)
|
||||
|
||||
func init() {
|
||||
pipeline.Register(api.SINK, Type, makeSink)
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@ package loki
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
|
@ -17,10 +17,13 @@ limitations under the License.
|
|||
package sls
|
||||
|
||||
type Config struct {
|
||||
Endpoint string `yaml:"endpoint,omitempty" validate:"required"`
|
||||
AccessKeyId string `yaml:"accessKeyId,omitempty" validate:"required"`
|
||||
AccessKeySecret string `yaml:"accessKeySecret,omitempty" validate:"required"`
|
||||
Project string `yaml:"project,omitempty" validate:"required"`
|
||||
LogStore string `yaml:"logstore,omitempty" validate:"required"`
|
||||
Topic string `yaml:"topic,omitempty"` // empty topic is supported in sls storage
|
||||
Endpoint string `yaml:"endpoint,omitempty" validate:"required"`
|
||||
AccessKeyId string `yaml:"accessKeyId,omitempty"`
|
||||
AccessKeySecret string `yaml:"accessKeySecret,omitempty"`
|
||||
CredentialProviderCommand string `yaml:"cridentialProviderCommand,omitempty"`
|
||||
CredentialProviderArgs []string `yaml:"credentialProviderArgs,omitempty"`
|
||||
CredentialProviderTimeout int `yaml:"credentialProviderTimeout,omitempty" default:"5"`
|
||||
Project string `yaml:"project,omitempty" validate:"required"`
|
||||
LogStore string `yaml:"logstore,omitempty" validate:"required"`
|
||||
Topic string `yaml:"topic,omitempty"` // empty topic is supported in sls storage
|
||||
}
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
package sls
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type credentialProvider struct {
|
||||
command string
|
||||
arguments []string
|
||||
timeout int
|
||||
}
|
||||
|
||||
type stsCredential struct {
|
||||
AccessKeyId string `json:"AccessKeyId"`
|
||||
AccessKeySecret string `json:"AccessKeySecret"`
|
||||
SecurityToken string `json:"SecurityToken"`
|
||||
Expiration string `json:"Expiration"`
|
||||
}
|
||||
|
||||
func newCredentialProvider(command string, arguments []string, timeout int) *credentialProvider {
|
||||
return &credentialProvider{
|
||||
command: command,
|
||||
arguments: arguments,
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *credentialProvider) GetCredentials() (accessKeyId string, accessKeySecret string, securityToken string, expiration time.Time, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.timeout)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var stdout strings.Builder
|
||||
cmd := exec.CommandContext(ctx, c.command, c.arguments...)
|
||||
cmd.Stdout = &stdout
|
||||
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return "", "", "", time.Time{}, fmt.Errorf("run credential provider command failed: %w", err)
|
||||
}
|
||||
|
||||
var sts stsCredential
|
||||
err = json.Unmarshal([]byte(stdout.String()), &sts)
|
||||
if err != nil {
|
||||
fmt.Printf("stdout: %s\n", stdout.String())
|
||||
return "", "", "", time.Time{}, fmt.Errorf("unmarshal sts credential failed: %w", err)
|
||||
}
|
||||
|
||||
expiration, err = time.Parse(time.RFC3339, sts.Expiration)
|
||||
if err != nil {
|
||||
return "", "", "", time.Time{}, fmt.Errorf("parse sts credential expiration failed: %w", err)
|
||||
}
|
||||
|
||||
return sts.AccessKeyId, sts.AccessKeySecret, sts.SecurityToken, expiration, nil
|
||||
}
|
|
@ -46,6 +46,7 @@ type Sink struct {
|
|||
config *Config
|
||||
|
||||
client sls.ClientInterface
|
||||
shutdownChan chan struct{}
|
||||
}
|
||||
|
||||
func NewSink() *Sink {
|
||||
|
@ -77,7 +78,22 @@ func (s *Sink) Init(context api.Context) error {
|
|||
func (s *Sink) Start() error {
|
||||
log.Info("starting %s", s.String())
|
||||
conf := s.config
|
||||
s.client = sls.CreateNormalInterface(conf.Endpoint, conf.AccessKeyId, conf.AccessKeySecret, "")
|
||||
if (conf.AccessKeyId == "" || conf.AccessKeySecret == "") && conf.CredentialProviderCommand == "" {
|
||||
return errors.New("Neither access key pair nor credential provider command is provided")
|
||||
}
|
||||
|
||||
if conf.AccessKeyId != "" {
|
||||
s.client = sls.CreateNormalInterface(conf.Endpoint, conf.AccessKeyId, conf.AccessKeySecret, "")
|
||||
} else {
|
||||
var err error
|
||||
s.shutdownChan = make(chan struct{})
|
||||
|
||||
provider := newCredentialProvider(conf.CredentialProviderCommand, conf.CredentialProviderArgs, conf.CredentialProviderTimeout)
|
||||
s.client, err = sls.CreateTokenAutoUpdateClient(conf.Endpoint, provider.GetCredentials, s.shutdownChan)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Create sls client failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Check if project exist
|
||||
exist, err := s.client.CheckProjectExist(conf.Project)
|
||||
|
@ -106,6 +122,11 @@ func (s *Sink) Stop() {
|
|||
if s.client != nil {
|
||||
s.client.Close()
|
||||
}
|
||||
|
||||
if s.shutdownChan != nil {
|
||||
close(s.shutdownChan)
|
||||
s.shutdownChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sink) Consume(batch api.Batch) api.Result {
|
||||
|
|
|
@ -17,17 +17,13 @@ limitations under the License.
|
|||
package json
|
||||
|
||||
import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/source/codec"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
)
|
||||
|
||||
const (
|
||||
Type = "json"
|
||||
)
|
||||
|
|
|
@ -18,7 +18,7 @@ package elasticsearch
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/olivere/elastic/v7"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
|
|
@ -45,6 +45,7 @@ type CollectConfig struct {
|
|||
RereadTruncated bool `yaml:"rereadTruncated,omitempty" default:"true"` // Read from the beginning when the file is truncated
|
||||
FirstNBytesForIdentifier int `yaml:"firstNBytesForIdentifier,omitempty" default:"128" validate:"gte=10"` // If the file size is smaller than `firstNBytesForIdentifier`, it will not be collected
|
||||
AddonMeta bool `yaml:"addonMeta,omitempty"`
|
||||
AddonMetaSchema AddonMetaSchema `yaml:"addonMetaSchema,omitempty"`
|
||||
excludeFilePatterns []*regexp.Regexp
|
||||
Charset string `yaml:"charset,omitempty" default:"utf-8"`
|
||||
|
||||
|
@ -54,6 +55,12 @@ type CollectConfig struct {
|
|||
FdHoldTimeoutWhenRemove time.Duration `yaml:"fdHoldTimeoutWhenRemove,omitempty" default:"5m"`
|
||||
}
|
||||
|
||||
type AddonMetaSchema struct {
|
||||
Fields map[string]string `yaml:"fields,omitempty"`
|
||||
FieldsUnderRoot bool `yaml:"underRoot,omitempty"`
|
||||
FieldsUnderKey string `yaml:"key,omitempty" default:"state"`
|
||||
}
|
||||
|
||||
type LineDelimiterValue struct {
|
||||
Charset string `yaml:"charset,omitempty" default:"utf-8"`
|
||||
LineType string `yaml:"type,omitempty" default:"auto"`
|
||||
|
|
|
@ -18,8 +18,8 @@ package file
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
|
|
@ -71,6 +71,19 @@ type Source struct {
|
|||
multilineProcessor *MultiProcessor
|
||||
mTask *MultiTask
|
||||
codec codec.Codec
|
||||
|
||||
addonMetaField *AddonMetaFields
|
||||
}
|
||||
|
||||
type AddonMetaFields struct {
|
||||
Pipeline string `yaml:"pipeline,omitempty"`
|
||||
Source string `yaml:"source,omitempty"`
|
||||
Filename string `yaml:"filename,omitempty"`
|
||||
Timestamp string `yaml:"timestamp,omitempty"`
|
||||
Offset string `yaml:"offset,omitempty"`
|
||||
Bytes string `yaml:"bytes,omitempty"`
|
||||
Line string `yaml:"line,omitempty"`
|
||||
Hostname string `yaml:"hostname,omitempty"`
|
||||
}
|
||||
|
||||
func (s *Source) Config() interface{} {
|
||||
|
@ -109,6 +122,10 @@ func (s *Source) Init(context api.Context) error {
|
|||
s.config.ReaderConfig.MultiConfig.Timeout = 2 * inactiveTimeout
|
||||
}
|
||||
|
||||
if s.config.CollectConfig.AddonMeta {
|
||||
s.addonMetaField = addonMetaFieldsConvert(s.config.CollectConfig.AddonMetaSchema.Fields)
|
||||
}
|
||||
|
||||
// init reader chan size
|
||||
s.config.ReaderConfig.readChanSize = s.config.WatchConfig.MaxOpenFds
|
||||
|
||||
|
@ -186,7 +203,7 @@ func (s *Source) ProductLoop(productFunc api.ProductFunc) {
|
|||
s.productFunc = productFunc
|
||||
s.productFunc = jobFieldsProductFunc(s.productFunc, s.rawSourceConfig)
|
||||
if s.config.CollectConfig.AddonMeta {
|
||||
s.productFunc = addonMetaProductFunc(s.productFunc)
|
||||
s.productFunc = addonMetaProductFunc(s.productFunc, s.addonMetaField, s.config.CollectConfig.AddonMetaSchema)
|
||||
}
|
||||
if s.config.ReaderConfig.MultiConfig.Active {
|
||||
s.mTask = NewMultiTask(s.epoch, s.name, s.config.ReaderConfig.MultiConfig, s.eventPool, s.productFunc)
|
||||
|
@ -238,21 +255,95 @@ func jobFieldsProductFunc(productFunc api.ProductFunc, srcCfg *source.Config) ap
|
|||
}
|
||||
}
|
||||
|
||||
func addonMetaProductFunc(productFunc api.ProductFunc) api.ProductFunc {
|
||||
func addonMetaProductFunc(productFunc api.ProductFunc, fields *AddonMetaFields, schema AddonMetaSchema) api.ProductFunc {
|
||||
return func(event api.Event) api.Result {
|
||||
s, _ := event.Meta().Get(SystemStateKey)
|
||||
state := s.(*persistence.State)
|
||||
addonMeta := make(map[string]interface{})
|
||||
addonMeta["pipeline"] = state.PipelineName
|
||||
addonMeta["source"] = state.SourceName
|
||||
addonMeta["filename"] = state.Filename
|
||||
addonMeta["timestamp"] = state.CollectTime.Local().Format(tsLayout)
|
||||
addonMeta["offset"] = state.Offset
|
||||
addonMeta["bytes"] = state.ContentBytes
|
||||
addonMeta["hostname"] = global.NodeName
|
||||
|
||||
event.Header()["state"] = addonMeta
|
||||
// if fields is nil, use default config
|
||||
if fields == nil {
|
||||
addonMeta["pipeline"] = state.PipelineName
|
||||
addonMeta["source"] = state.SourceName
|
||||
addonMeta["filename"] = state.Filename
|
||||
addonMeta["timestamp"] = state.CollectTime.Local().Format(tsLayout)
|
||||
addonMeta["offset"] = state.Offset
|
||||
addonMeta["bytes"] = state.ContentBytes
|
||||
addonMeta["hostname"] = global.NodeName
|
||||
} else {
|
||||
|
||||
if fields.Pipeline != "" {
|
||||
addonMeta[fields.Pipeline] = state.PipelineName
|
||||
}
|
||||
if fields.Source != "" {
|
||||
addonMeta[fields.Source] = state.SourceName
|
||||
}
|
||||
if fields.Filename != "" {
|
||||
addonMeta[fields.Filename] = state.Filename
|
||||
}
|
||||
if fields.Timestamp != "" {
|
||||
addonMeta[fields.Timestamp] = state.CollectTime.Local().Format(tsLayout)
|
||||
}
|
||||
if fields.Offset != "" {
|
||||
addonMeta[fields.Offset] = state.Offset
|
||||
}
|
||||
if fields.Bytes != "" {
|
||||
addonMeta[fields.Bytes] = state.ContentBytes
|
||||
}
|
||||
if fields.Line != "" {
|
||||
addonMeta[fields.Line] = state.LineNumber
|
||||
}
|
||||
if fields.Hostname != "" {
|
||||
addonMeta[fields.Hostname] = global.NodeName
|
||||
}
|
||||
}
|
||||
|
||||
if schema.FieldsUnderRoot {
|
||||
for k, v := range addonMeta {
|
||||
event.Header()[k] = v
|
||||
}
|
||||
} else {
|
||||
event.Header()[schema.FieldsUnderKey] = addonMeta
|
||||
}
|
||||
|
||||
productFunc(event)
|
||||
return result.Success()
|
||||
}
|
||||
}
|
||||
|
||||
func addonMetaFieldsConvert(fields map[string]string) *AddonMetaFields {
|
||||
if len(fields) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
amf := &AddonMetaFields{}
|
||||
for k, v := range fields {
|
||||
switch v {
|
||||
case "${_meta.pipeline}":
|
||||
amf.Pipeline = k
|
||||
|
||||
case "${_meta.source}":
|
||||
amf.Source = k
|
||||
|
||||
case "${_meta.filename}":
|
||||
amf.Filename = k
|
||||
|
||||
case "${_meta.timestamp}":
|
||||
amf.Timestamp = k
|
||||
|
||||
case "${_meta.offset}":
|
||||
amf.Offset = k
|
||||
|
||||
case "${_meta.bytes}":
|
||||
amf.Bytes = k
|
||||
|
||||
case "${_meta.line}":
|
||||
amf.Line = k
|
||||
|
||||
case "${_meta.hostname}":
|
||||
amf.Hostname = k
|
||||
}
|
||||
}
|
||||
|
||||
return amf
|
||||
}
|
||||
|
|
|
@ -18,12 +18,13 @@ package file
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/persistence/reg"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/loggie-io/loggie/pkg/util/persistence/reg"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/discovery/kubernetes/external"
|
||||
|
@ -318,7 +319,7 @@ func (w *Watcher) eventBus(e jobEvent) {
|
|||
}
|
||||
}
|
||||
// Pre-allocation offset
|
||||
if existAckOffset == 0 {
|
||||
if existAckOffset == 0 || e.job.task.config.ReadFromTail {
|
||||
if e.job.task.config.ReadFromTail {
|
||||
existAckOffset = fileSize
|
||||
}
|
||||
|
@ -728,6 +729,9 @@ func (w *Watcher) run() {
|
|||
case <-w.done:
|
||||
return
|
||||
case watchTaskEvent := <-w.watchTaskEventChan:
|
||||
if watchTaskEvent.watchTaskType == START {
|
||||
w.scanNewFiles()
|
||||
}
|
||||
w.handleWatchTaskEvent(watchTaskEvent)
|
||||
case job := <-w.zombieJobChan:
|
||||
w.decideZombieJob(job)
|
||||
|
@ -996,6 +1000,9 @@ func ExportWatchMetric() map[string]eventbus.WatchMetricData {
|
|||
|
||||
watchLock.Lock()
|
||||
defer watchLock.Unlock()
|
||||
if globalWatcher == nil {
|
||||
return watcherMetrics
|
||||
}
|
||||
for _, watchTask := range globalWatcher.sourceWatchTasks {
|
||||
paths := getPathsIfDynamicContainerLogs(watchTask.config.Paths, watchTask.pipelineName, watchTask.sourceName)
|
||||
m := globalWatcher.reportWatchMetric(watchTask, paths, watchTask.pipelineName, watchTask.sourceName)
|
||||
|
|
|
@ -21,22 +21,18 @@ import (
|
|||
"io"
|
||||
"net"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/loggie-io/loggie/pkg/core/api"
|
||||
"github.com/loggie-io/loggie/pkg/core/event"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/pipeline"
|
||||
pb "github.com/loggie-io/loggie/pkg/sink/grpc/pb"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const Type = "grpc"
|
||||
|
||||
var (
|
||||
json = jsoniter.ConfigFastest
|
||||
)
|
||||
|
||||
func init() {
|
||||
pipeline.Register(api.SOURCE, Type, makeSource)
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ package kubernetes_event
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
|
@ -2,12 +2,11 @@ package prometheus_exporter
|
|||
|
||||
import (
|
||||
ctx "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/pattern"
|
||||
timeutil "github.com/loggie-io/loggie/pkg/util/time"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -161,7 +160,7 @@ func (e *PromExporter) scrape(c ctx.Context, req *http.Request) ([]byte, error)
|
|||
return out, nil
|
||||
}
|
||||
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
out, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "read response body failed")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gojson
|
||||
|
||||
import (
|
||||
json "github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type Gojson struct{}
|
||||
|
||||
func (g *Gojson) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (g *Gojson) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func (g *Gojson) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return json.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
func (g *Gojson) MarshalToString(v interface{}) (string, error) {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"github.com/loggie-io/loggie/pkg/util/json/gojson"
|
||||
"github.com/loggie-io/loggie/pkg/util/json/jsoniter"
|
||||
"github.com/loggie-io/loggie/pkg/util/json/sonic"
|
||||
"github.com/loggie-io/loggie/pkg/util/json/std"
|
||||
)
|
||||
|
||||
const (
|
||||
Decoderjsoniter = "jsoniter"
|
||||
Decodersonic = "sonic"
|
||||
Decoderstd = "std"
|
||||
Decodergojson = "go-json"
|
||||
defaultCoderName = "default"
|
||||
)
|
||||
|
||||
var decoderSet = map[string]JSON{
|
||||
Decoderjsoniter: &jsoniter.Jsoniter{},
|
||||
Decodersonic: &sonic.Sonic{},
|
||||
Decoderstd: &std.Std{},
|
||||
Decodergojson: &gojson.Gojson{},
|
||||
}
|
||||
|
||||
func init() {
|
||||
for name, decoder := range decoderSet {
|
||||
Register(name, decoder)
|
||||
}
|
||||
Register(defaultCoderName, &jsoniter.Jsoniter{})
|
||||
}
|
||||
|
||||
type JSON interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
|
||||
MarshalToString(v interface{}) (string, error)
|
||||
}
|
||||
|
||||
var JSONFactory = make(map[string]JSON)
|
||||
|
||||
func Register(name string, factory JSON) {
|
||||
JSONFactory[name] = factory
|
||||
}
|
||||
|
||||
func SetDefaultEngine(name string) {
|
||||
JSONFactory[defaultCoderName] = JSONFactory[name]
|
||||
}
|
||||
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
return JSONFactory[defaultCoderName].Marshal(v)
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
return JSONFactory[defaultCoderName].Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return JSONFactory[defaultCoderName].MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
func MarshalToString(v interface{}) (string, error) {
|
||||
return JSONFactory[defaultCoderName].MarshalToString(v)
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jsoniter
|
||||
|
||||
import (
|
||||
json "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
type Jsoniter struct {
|
||||
}
|
||||
|
||||
func (j *Jsoniter) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.ConfigFastest.Marshal(v)
|
||||
}
|
||||
|
||||
func (j *Jsoniter) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.ConfigFastest.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func (j *Jsoniter) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return json.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
func (j *Jsoniter) MarshalToString(v interface{}) (string, error) {
|
||||
return json.MarshalToString(v)
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sonic
|
||||
|
||||
import (
|
||||
json "github.com/bytedance/sonic"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
type Sonic struct {
|
||||
}
|
||||
|
||||
func (s *Sonic) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.ConfigFastest.Marshal(v)
|
||||
}
|
||||
|
||||
func (s *Sonic) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.ConfigFastest.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func (s *Sonic) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
// SIMD json marshal not support this feature
|
||||
return jsoniter.ConfigFastest.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
func (s *Sonic) MarshalToString(v interface{}) (string, error) {
|
||||
return json.ConfigFastest.MarshalToString(v)
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sonic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotSupportArch = errors.New("not support arch")
|
||||
)
|
||||
|
||||
type Sonic struct {
|
||||
}
|
||||
|
||||
func (s *Sonic) Marshal(v interface{}) ([]byte, error) {
|
||||
return nil, ErrNotSupportArch
|
||||
}
|
||||
|
||||
func (s *Sonic) Unmarshal(data []byte, v interface{}) error {
|
||||
return ErrNotSupportArch
|
||||
}
|
||||
|
||||
func (s *Sonic) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return nil, ErrNotSupportArch
|
||||
}
|
||||
|
||||
func (s *Sonic) MarshalToString(v interface{}) (string, error) {
|
||||
return "", ErrNotSupportArch
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2023 Loggie Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package std
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type Std struct {
|
||||
}
|
||||
|
||||
func (s *Std) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (s *Std) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func (s *Std) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return json.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
func (s *Std) MarshalToString(v interface{}) (string, error) {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
|
@ -34,7 +34,6 @@ type DbConfig struct {
|
|||
File string `yaml:"file,omitempty"`
|
||||
FlushTimeout time.Duration `yaml:"flushTimeout,omitempty" default:"2s"`
|
||||
BufferSize int `yaml:"bufferSize,omitempty" default:"2048"`
|
||||
TableName string `yaml:"tableName,omitempty" default:"registry"`
|
||||
CleanInactiveTimeout time.Duration `yaml:"cleanInactiveTimeout,omitempty" default:"504h"` // default records not updated in 21 days will be deleted
|
||||
CleanScanInterval time.Duration `yaml:"cleanScanInterval,omitempty" default:"1h"`
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ limitations under the License.
|
|||
package driver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/dgraph-io/badger/v3"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/persistence/reg"
|
||||
"github.com/pkg/errors"
|
||||
"math"
|
||||
|
|
|
@ -18,7 +18,7 @@ package reg
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/loggie-io/loggie/pkg/util/json"
|
||||
)
|
||||
|
||||
type Registry struct {
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/loggie-io/loggie/pkg/control"
|
||||
"github.com/loggie-io/loggie/pkg/core/cfg"
|
||||
"github.com/loggie-io/loggie/pkg/core/interceptor"
|
||||
"github.com/loggie-io/loggie/pkg/core/log"
|
||||
"github.com/loggie-io/loggie/pkg/core/queue"
|
||||
"github.com/loggie-io/loggie/pkg/eventbus"
|
||||
"github.com/loggie-io/loggie/pkg/eventbus/export/logger"
|
||||
"github.com/loggie-io/loggie/pkg/interceptor/maxbytes"
|
||||
"github.com/loggie-io/loggie/pkg/interceptor/metric"
|
||||
"github.com/loggie-io/loggie/pkg/interceptor/retry"
|
||||
"github.com/loggie-io/loggie/pkg/pipeline"
|
||||
"github.com/loggie-io/loggie/pkg/queue/channel"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
_ "github.com/loggie-io/loggie/pkg/include"
|
||||
)
|
||||
|
||||
const pipe1 = `
|
||||
pipelines:
|
||||
- name: test
|
||||
sources:
|
||||
- type: dev
|
||||
name: test
|
||||
qps: 100
|
||||
byteSize: 10240
|
||||
eventsTotal: 10000
|
||||
sink:
|
||||
type: elasticsearch
|
||||
parallelism: 3
|
||||
hosts: ["localhost:9200"]
|
||||
index: "loggie-benchmark-${+YYYY.MM.DD}"
|
||||
`
|
||||
|
||||
func main() {
|
||||
log.InitDefaultLogger()
|
||||
pipeline.SetDefaultConfigRaw(pipeline.Config{
|
||||
Queue: &queue.Config{
|
||||
Type: channel.Type,
|
||||
},
|
||||
Interceptors: []*interceptor.Config{
|
||||
{
|
||||
Type: metric.Type,
|
||||
},
|
||||
{
|
||||
Type: maxbytes.Type,
|
||||
},
|
||||
{
|
||||
Type: retry.Type,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
eventbus.StartAndRun(eventbus.Config{
|
||||
LoggerConfig: logger.Config{
|
||||
Enabled: true,
|
||||
Period: 5 * time.Second,
|
||||
Pretty: false,
|
||||
},
|
||||
ListenerConfigs: map[string]cfg.CommonCfg{
|
||||
"sink": map[string]interface{}{
|
||||
"period": 5 * time.Second,
|
||||
},
|
||||
"sys": map[string]interface{}{
|
||||
"period": 5 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
pipecfgs := &control.PipelineConfig{}
|
||||
if err := cfg.UnPackFromRaw([]byte(pipe1), pipecfgs).Defaults().Validate().Do(); err != nil {
|
||||
log.Panic("pipeline configs invalid: %v", err)
|
||||
}
|
||||
|
||||
controller := control.NewController()
|
||||
controller.Start(pipecfgs)
|
||||
|
||||
if err := http.ListenAndServe(":9196", nil); err != nil {
|
||||
log.Fatal("http listen and serve err: %v", err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
*.o
|
||||
*.swp
|
||||
*.swm
|
||||
*.swn
|
||||
*.a
|
||||
*.so
|
||||
_obj
|
||||
_test
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
_testmain.go
|
||||
*.exe
|
||||
*.exe~
|
||||
*.test
|
||||
*.prof
|
||||
*.rar
|
||||
*.zip
|
||||
*.gz
|
||||
*.psd
|
||||
*.bmd
|
||||
*.cfg
|
||||
*.pptx
|
||||
*.log
|
||||
*nohup.out
|
||||
*settings.pyc
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.DS_Store
|
||||
/.idea/
|
||||
/.vscode/
|
||||
/output/
|
||||
/vendor/
|
||||
/Gopkg.lock
|
||||
/Gopkg.toml
|
||||
coverage.html
|
||||
coverage.out
|
||||
coverage.xml
|
||||
junit.xml
|
||||
*.profile
|
||||
*.svg
|
||||
*.out
|
||||
ast/test.out
|
||||
ast/bench.sh
|
||||
|
||||
!testdata/*.json.gz
|
||||
fuzz/testdata
|
||||
*__debug_bin
|
|
@ -0,0 +1,3 @@
|
|||
[submodule "tools/asm2asm"]
|
||||
path = tools/asm2asm
|
||||
url = https://github.com/chenzhuoyu/asm2asm
|
|
@ -0,0 +1,24 @@
|
|||
header:
|
||||
license:
|
||||
spdx-id: Apache-2.0
|
||||
copyright-owner: ByteDance Inc.
|
||||
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- '**/*.s'
|
||||
|
||||
paths-ignore:
|
||||
- 'ast/asm.s' # empty file
|
||||
- 'decoder/asm.s' # empty file
|
||||
- 'encoder/asm.s' # empty file
|
||||
- 'internal/caching/asm.s' # empty file
|
||||
- 'internal/jit/asm.s' # empty file
|
||||
- 'internal/native/avx/native_amd64.s' # auto-generated by asm2asm
|
||||
- 'internal/native/avx/native_subr_amd64.go' # auto-generated by asm2asm
|
||||
- 'internal/native/avx2/native_amd64.s' # auto-generated by asm2asm
|
||||
- 'internal/native/avx2/native_subr_amd64.go' # auto-generated by asm2asm
|
||||
- 'internal/resolver/asm.s' # empty file
|
||||
- 'internal/rt/asm.s' # empty file
|
||||
- 'internal/loader/asm.s' # empty file
|
||||
|
||||
comment: on-failure
|
|
@ -0,0 +1,128 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
wudi.daniel@bytedance.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
|
@ -0,0 +1,63 @@
|
|||
# How to Contribute
|
||||
|
||||
## Your First Pull Request
|
||||
We use GitHub for our codebase. You can start by reading [How To Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests).
|
||||
|
||||
## Without Semantic Versioning
|
||||
We keep the stable code in branch `main` like `golang.org/x`. Development base on branch `develop`. We promise the **Forward Compatibility** by adding new package directory with suffix `v2/v3` when code has break changes.
|
||||
|
||||
## Branch Organization
|
||||
We use [git-flow](https://nvie.com/posts/a-successful-git-branching-model/) as our branch organization, as known as [FDD](https://en.wikipedia.org/wiki/Feature-driven_development)
|
||||
|
||||
|
||||
## Bugs
|
||||
### 1. How to Find Known Issues
|
||||
We are using [Github Issues](https://github.com/bytedance/sonic/issues) for our public bugs. We keep a close eye on this and try to make it clear when we have an internal fix in progress. Before filing a new task, try to make sure your problem doesn’t already exist.
|
||||
|
||||
### 2. Reporting New Issues
|
||||
Providing a reduced test code is a recommended way for reporting issues. Then can be placed in:
|
||||
- Just in issues
|
||||
- [Golang Playground](https://play.golang.org/)
|
||||
|
||||
### 3. Security Bugs
|
||||
Please do not report the safe disclosure of bugs to public issues. Contact us by [Support Email](mailto:sonic@bytedance.com)
|
||||
|
||||
## How to Get in Touch
|
||||
- [Email](mailto:wudi.daniel@bytedance.com)
|
||||
|
||||
## Submit a Pull Request
|
||||
Before you submit your Pull Request (PR) consider the following guidelines:
|
||||
1. Search [GitHub](https://github.com/bytedance/sonic/pulls) for an open or closed PR that relates to your submission. You don't want to duplicate existing efforts.
|
||||
2. Be sure that an issue describes the problem you're fixing, or documents the design for the feature you'd like to add. Discussing the design upfront helps to ensure that we're ready to accept your work.
|
||||
3. [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) the bytedance/sonic repo.
|
||||
4. In your forked repository, make your changes in a new git branch:
|
||||
```
|
||||
git checkout -b bugfix/security_bug develop
|
||||
```
|
||||
5. Create your patch, including appropriate test cases.
|
||||
6. Follow our [Style Guides](#code-style-guides).
|
||||
7. Commit your changes using a descriptive commit message that follows [AngularJS Git Commit Message Conventions](https://docs.google.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit).
|
||||
Adherence to these conventions is necessary because release notes will be automatically generated from these messages.
|
||||
8. Push your branch to GitHub:
|
||||
```
|
||||
git push origin bugfix/security_bug
|
||||
```
|
||||
9. In GitHub, send a pull request to `sonic:main`
|
||||
|
||||
Note: you must use one of `optimize/feature/bugfix/doc/ci/test/refactor` following a slash(`/`) as the branch prefix.
|
||||
|
||||
Your pr title and commit message should follow https://www.conventionalcommits.org/.
|
||||
|
||||
## Contribution Prerequisites
|
||||
- Our development environment keeps up with [Go Official](https://golang.org/project/).
|
||||
- You need fully checking with lint tools before submit your pull request. [gofmt](https://golang.org/pkg/cmd/gofmt/) & [golangci-lint](https://github.com/golangci/golangci-lint)
|
||||
- You are familiar with [Github](https://github.com)
|
||||
- Maybe you need familiar with [Actions](https://github.com/features/actions)(our default workflow tool).
|
||||
|
||||
## Code Style Guides
|
||||
See [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
|
||||
Good resources:
|
||||
- [Effective Go](https://golang.org/doc/effective_go)
|
||||
- [Pingcap General advice](https://pingcap.github.io/style-guide/general.html)
|
||||
- [Uber Go Style Guide](https://github.com/uber-go/guide/blob/master/style.md)
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,116 @@
|
|||
#
|
||||
# Copyright 2021 ByteDance Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
ARCH := avx avx2 sse
|
||||
TMP_DIR := output
|
||||
OUT_DIR := internal/native
|
||||
SRC_FILE := native/native.c
|
||||
|
||||
CPU_avx := amd64
|
||||
CPU_avx2 := amd64
|
||||
CPU_sse := amd64
|
||||
|
||||
TMPL_avx := fastint_amd64_test fastfloat_amd64_test native_amd64_test native_export_amd64
|
||||
TMPL_avx2 := fastint_amd64_test fastfloat_amd64_test native_amd64_test native_export_amd64
|
||||
TMPL_sse := fastint_amd64_test fastfloat_amd64_test native_amd64_test native_export_amd64
|
||||
|
||||
CFLAGS_avx := -msse -mssse3 -mno-sse4 -mavx -mpclmul -mno-avx2 -DUSE_AVX=1 -DUSE_AVX2=0
|
||||
CFLAGS_avx2 := -msse -mssse3 -mno-sse4 -mavx -mpclmul -mavx2 -DUSE_AVX=1 -DUSE_AVX2=1
|
||||
CFLAGS_sse := -msse -mssse3 -mno-sse4 -mno-avx -mno-avx2 -mpclmul
|
||||
TARGETFLAGS := -target x86_64-apple-macos11 -nostdlib -fno-builtin -fno-asynchronous-unwind-tables
|
||||
|
||||
|
||||
CC_amd64 := clang
|
||||
ASM2ASM_amd64 := tools/asm2asm/asm2asm.py
|
||||
|
||||
CFLAGS := -mno-red-zone
|
||||
CFLAGS += -fno-exceptions
|
||||
CFLAGS += -fno-rtti
|
||||
CFLAGS += -fno-stack-protector
|
||||
CFLAGS += -O3
|
||||
CFLAGS += -Wall -Werror
|
||||
|
||||
NATIVE_SRC := $(wildcard native/*.h)
|
||||
NATIVE_SRC += $(wildcard native/*.c)
|
||||
|
||||
.PHONY: all clean ${ARCH}
|
||||
|
||||
define build_tmpl
|
||||
$(eval @arch := $(1))
|
||||
$(eval @tmpl := $(2))
|
||||
$(eval @dest := $(3))
|
||||
|
||||
${@dest}: ${@tmpl}
|
||||
mkdir -p $(dir ${@dest})
|
||||
echo '// Code generated by Makefile, DO NOT EDIT.' > ${@dest}
|
||||
echo >> ${@dest}
|
||||
sed -e 's/{{PACKAGE}}/${@arch}/g' ${@tmpl} >> ${@dest}
|
||||
endef
|
||||
|
||||
define build_arch
|
||||
$(eval @cpu := $(value CPU_$(1)))
|
||||
$(eval @deps := $(foreach tmpl,$(value TMPL_$(1)),${OUT_DIR}/$(1)/${tmpl}.go))
|
||||
$(eval @asmin := ${TMP_DIR}/$(1)/native.s)
|
||||
$(eval @asmout := ${OUT_DIR}/$(1)/native_${@cpu}.s)
|
||||
$(eval @stubin := ${OUT_DIR}/native_${@cpu}.tmpl)
|
||||
$(eval @stubout := ${OUT_DIR}/$(1)/native_${@cpu}.go)
|
||||
|
||||
$(1): ${@asmout} ${@deps}
|
||||
|
||||
${@asmout}: ${@stubout} ${NATIVE_SRC}
|
||||
mkdir -p ${TMP_DIR}/$(1)
|
||||
$${CC_${@cpu}} $${CFLAGS} $${CFLAGS_$(1)} ${TARGETFLAGS} -S -o ${TMP_DIR}/$(1)/native.s ${SRC_FILE}
|
||||
$(foreach file,
|
||||
$(wildcard native/unittest/*),
|
||||
$${CC_${@cpu}} $${CFLAGS} $${CFLAGS_$(1)} -I./native -o ${TMP_DIR}/$(1)/test $(file)
|
||||
./${TMP_DIR}/$(1)/test
|
||||
)
|
||||
python3 $${ASM2ASM_${@cpu}} ${@asmout} ${TMP_DIR}/$(1)/native.s
|
||||
asmfmt -w ${@asmout}
|
||||
|
||||
$(eval $(call \
|
||||
build_tmpl, \
|
||||
$(1), \
|
||||
${@stubin}, \
|
||||
${@stubout} \
|
||||
))
|
||||
|
||||
$(foreach \
|
||||
tmpl, \
|
||||
$(value TMPL_$(1)), \
|
||||
$(eval $(call \
|
||||
build_tmpl, \
|
||||
$(1), \
|
||||
${OUT_DIR}/${tmpl}.tmpl, \
|
||||
${OUT_DIR}/$(1)/${tmpl}.go \
|
||||
)) \
|
||||
)
|
||||
endef
|
||||
|
||||
all: ${ARCH}
|
||||
|
||||
clean:
|
||||
for arch in ${ARCH}; do \
|
||||
rm -vfr ${TMP_DIR}/$${arch}; \
|
||||
rm -vfr ${OUT_DIR}/$${arch}; \
|
||||
done
|
||||
|
||||
$(foreach \
|
||||
arch, \
|
||||
${ARCH}, \
|
||||
$(eval $(call build_arch,${arch})) \
|
||||
)
|
||||
|
|
@ -0,0 +1,362 @@
|
|||
# Sonic
|
||||
|
||||
English | [中文](README_ZH_CN.md)
|
||||
|
||||
A blazingly fast JSON serializing & deserializing library, accelerated by JIT (just-in-time compiling) and SIMD (single-instruction-multiple-data).
|
||||
|
||||
## Requirement
|
||||
- Go 1.15~1.20
|
||||
- Linux/MacOS/Windows
|
||||
- Amd64 ARCH
|
||||
|
||||
## Features
|
||||
- Runtime object binding without code generation
|
||||
- Complete APIs for JSON value manipulation
|
||||
- Fast, fast, fast!
|
||||
|
||||
## Benchmarks
|
||||
For **all sizes** of json and **all scenarios** of usage, **Sonic performs best**.
|
||||
- [Medium](https://github.com/bytedance/sonic/blob/main/decoder/testdata_test.go#L19) (13KB, 300+ key, 6 layers)
|
||||
```powershell
|
||||
goversion: 1.17.1
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz
|
||||
BenchmarkEncoder_Generic_Sonic-16 32393 ns/op 402.40 MB/s 11965 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Generic_Sonic_Fast-16 21668 ns/op 601.57 MB/s 10940 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Generic_JsonIter-16 42168 ns/op 309.12 MB/s 14345 B/op 115 allocs/op
|
||||
BenchmarkEncoder_Generic_GoJson-16 65189 ns/op 199.96 MB/s 23261 B/op 16 allocs/op
|
||||
BenchmarkEncoder_Generic_StdLib-16 106322 ns/op 122.60 MB/s 49136 B/op 789 allocs/op
|
||||
BenchmarkEncoder_Binding_Sonic-16 6269 ns/op 2079.26 MB/s 14173 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Binding_Sonic_Fast-16 5281 ns/op 2468.16 MB/s 12322 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Binding_JsonIter-16 20056 ns/op 649.93 MB/s 9488 B/op 2 allocs/op
|
||||
BenchmarkEncoder_Binding_GoJson-16 8311 ns/op 1568.32 MB/s 9481 B/op 1 allocs/op
|
||||
BenchmarkEncoder_Binding_StdLib-16 16448 ns/op 792.52 MB/s 9479 B/op 1 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_Sonic-16 6681 ns/op 1950.93 MB/s 12738 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_Sonic_Fast-16 4179 ns/op 3118.99 MB/s 10757 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_JsonIter-16 9861 ns/op 1321.84 MB/s 14362 B/op 115 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_GoJson-16 18850 ns/op 691.52 MB/s 23278 B/op 16 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_StdLib-16 45902 ns/op 283.97 MB/s 49174 B/op 789 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_Sonic-16 1480 ns/op 8810.09 MB/s 13049 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_Sonic_Fast-16 1209 ns/op 10785.23 MB/s 11546 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_JsonIter-16 6170 ns/op 2112.58 MB/s 9504 B/op 2 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_GoJson-16 3321 ns/op 3925.52 MB/s 9496 B/op 1 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_StdLib-16 3739 ns/op 3486.49 MB/s 9480 B/op 1 allocs/op
|
||||
|
||||
BenchmarkDecoder_Generic_Sonic-16 66812 ns/op 195.10 MB/s 57602 B/op 723 allocs/op
|
||||
BenchmarkDecoder_Generic_Sonic_Fast-16 54523 ns/op 239.07 MB/s 49786 B/op 313 allocs/op
|
||||
BenchmarkDecoder_Generic_StdLib-16 124260 ns/op 104.90 MB/s 50869 B/op 772 allocs/op
|
||||
BenchmarkDecoder_Generic_JsonIter-16 91274 ns/op 142.81 MB/s 55782 B/op 1068 allocs/op
|
||||
BenchmarkDecoder_Generic_GoJson-16 88569 ns/op 147.17 MB/s 66367 B/op 973 allocs/op
|
||||
BenchmarkDecoder_Binding_Sonic-16 32557 ns/op 400.38 MB/s 28302 B/op 137 allocs/op
|
||||
BenchmarkDecoder_Binding_Sonic_Fast-16 28649 ns/op 455.00 MB/s 24999 B/op 34 allocs/op
|
||||
BenchmarkDecoder_Binding_StdLib-16 111437 ns/op 116.97 MB/s 10576 B/op 208 allocs/op
|
||||
BenchmarkDecoder_Binding_JsonIter-16 35090 ns/op 371.48 MB/s 14673 B/op 385 allocs/op
|
||||
BenchmarkDecoder_Binding_GoJson-16 28738 ns/op 453.59 MB/s 22039 B/op 49 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_Sonic-16 12321 ns/op 1057.91 MB/s 57233 B/op 723 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_Sonic_Fast-16 10644 ns/op 1224.64 MB/s 49362 B/op 313 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_StdLib-16 57587 ns/op 226.35 MB/s 50874 B/op 772 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_JsonIter-16 38666 ns/op 337.12 MB/s 55789 B/op 1068 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_GoJson-16 30259 ns/op 430.79 MB/s 66370 B/op 974 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_Sonic-16 5965 ns/op 2185.28 MB/s 27747 B/op 137 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_Sonic_Fast-16 5170 ns/op 2521.31 MB/s 24715 B/op 34 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_StdLib-16 27582 ns/op 472.58 MB/s 10576 B/op 208 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_JsonIter-16 13571 ns/op 960.51 MB/s 14685 B/op 385 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_GoJson-16 10031 ns/op 1299.51 MB/s 22111 B/op 49 allocs/op
|
||||
|
||||
BenchmarkGetOne_Sonic-16 3276 ns/op 3975.78 MB/s 24 B/op 1 allocs/op
|
||||
BenchmarkGetOne_Gjson-16 9431 ns/op 1380.81 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkGetOne_Jsoniter-16 51178 ns/op 254.46 MB/s 27936 B/op 647 allocs/op
|
||||
BenchmarkGetOne_Parallel_Sonic-16 216.7 ns/op 60098.95 MB/s 24 B/op 1 allocs/op
|
||||
BenchmarkGetOne_Parallel_Gjson-16 1076 ns/op 12098.62 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkGetOne_Parallel_Jsoniter-16 17741 ns/op 734.06 MB/s 27945 B/op 647 allocs/op
|
||||
BenchmarkSetOne_Sonic-16 9571 ns/op 1360.61 MB/s 1584 B/op 17 allocs/op
|
||||
BenchmarkSetOne_Sjson-16 36456 ns/op 357.22 MB/s 52180 B/op 9 allocs/op
|
||||
BenchmarkSetOne_Jsoniter-16 79475 ns/op 163.86 MB/s 45862 B/op 964 allocs/op
|
||||
BenchmarkSetOne_Parallel_Sonic-16 850.9 ns/op 15305.31 MB/s 1584 B/op 17 allocs/op
|
||||
BenchmarkSetOne_Parallel_Sjson-16 18194 ns/op 715.77 MB/s 52247 B/op 9 allocs/op
|
||||
BenchmarkSetOne_Parallel_Jsoniter-16 33560 ns/op 388.05 MB/s 45892 B/op 964 allocs/op
|
||||
```
|
||||
- [Small](https://github.com/bytedance/sonic/blob/main/testdata/small.go) (400B, 11 keys, 3 layers)
|
||||

|
||||
- [Large](https://github.com/bytedance/sonic/blob/main/testdata/twitter.json) (635KB, 10000+ key, 6 layers)
|
||||

|
||||
|
||||
See [bench.sh](https://github.com/bytedance/sonic/blob/main/bench.sh) for benchmark codes.
|
||||
|
||||
## How it works
|
||||
See [INTRODUCTION.md](./docs/INTRODUCTION.md).
|
||||
|
||||
## Usage
|
||||
|
||||
### Marshal/Unmarshal
|
||||
|
||||
Default behaviors are mostly consistent with `encoding/json`, except HTML escaping form (see [Escape HTML](https://github.com/bytedance/sonic/blob/main/README.md#escape-html)) and `SortKeys` feature (optional support see [Sort Keys](https://github.com/bytedance/sonic/blob/main/README.md#sort-keys)) that is **NOT** in conformity to [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259).
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
var data YourSchema
|
||||
// Marshal
|
||||
output, err := sonic.Marshal(&data)
|
||||
// Unmarshal
|
||||
err := sonic.Unmarshal(output, &data)
|
||||
```
|
||||
|
||||
### Streaming IO
|
||||
Sonic supports decoding json from `io.Reader` or encoding objects into `io.`Writer`, aims at handling multiple values as well as reducing memory consumption.
|
||||
- encoder
|
||||
```go
|
||||
var o1 = map[string]interface{}{
|
||||
"a": "b",
|
||||
}
|
||||
var o2 = 1
|
||||
var w = bytes.NewBuffer(nil)
|
||||
var enc = sonic.ConfigDefault.NewEncoder(w)
|
||||
enc.Encode(o1)
|
||||
enc.Encode(o2)
|
||||
fmt.Println(w.String())
|
||||
// Output:
|
||||
// {"a":"b"}
|
||||
// 1
|
||||
```
|
||||
- decoder
|
||||
```go
|
||||
var o = map[string]interface{}{}
|
||||
var r = strings.NewReader(`{"a":"b"}{"1":"2"}`)
|
||||
var dec = sonic.ConfigDefault.NewDecoder(r)
|
||||
dec.Decode(&o)
|
||||
dec.Decode(&o)
|
||||
fmt.Printf("%+v", o)
|
||||
// Output:
|
||||
// map[1:2 a:b]
|
||||
```
|
||||
|
||||
### Use Number/Use Int64
|
||||
```go
|
||||
import "github.com/bytedance/sonic/decoder"
|
||||
|
||||
var input = `1`
|
||||
var data interface{}
|
||||
|
||||
// default float64
|
||||
dc := decoder.NewDecoder(input)
|
||||
dc.Decode(&data) // data == float64(1)
|
||||
// use json.Number
|
||||
dc = decoder.NewDecoder(input)
|
||||
dc.UseNumber()
|
||||
dc.Decode(&data) // data == json.Number("1")
|
||||
// use int64
|
||||
dc = decoder.NewDecoder(input)
|
||||
dc.UseInt64()
|
||||
dc.Decode(&data) // data == int64(1)
|
||||
|
||||
root, err := sonic.GetFromString(input)
|
||||
// Get json.Number
|
||||
jn := root.Number()
|
||||
jm := root.InterfaceUseNumber().(json.Number) // jn == jm
|
||||
// Get float64
|
||||
fn := root.Float64()
|
||||
fm := root.Interface().(float64) // jn == jm
|
||||
```
|
||||
|
||||
### Sort Keys
|
||||
On account of the performance loss from sorting (roughly 10%), sonic doesn't enable this feature by default. If your component depends on it to work (like [zstd](https://github.com/facebook/zstd)), Use it like this:
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
import "github.com/bytedance/sonic/encoder"
|
||||
|
||||
// Binding map only
|
||||
m := map[string]interface{}{}
|
||||
v, err := encoder.Encode(m, encoder.SortMapKeys)
|
||||
|
||||
// Or ast.Node.SortKeys() before marshal
|
||||
var root := sonic.Get(JSON)
|
||||
err := root.SortKeys()
|
||||
```
|
||||
### Escape HTML
|
||||
On account of the performance loss (roughly 15%), sonic doesn't enable this feature by default. You can use `encoder.EscapeHTML` option to open this feature (align with `encoding/json.HTMLEscape`).
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
v := map[string]string{"&&":"<>"}
|
||||
ret, err := Encode(v, EscapeHTML) // ret == `{"\u0026\u0026":{"X":"\u003c\u003e"}}`
|
||||
```
|
||||
### Compact Format
|
||||
Sonic encodes primitive objects (struct/map...) as compact-format JSON by default, except marshaling `json.RawMessage` or `json.Marshaler`: sonic ensures validating their output JSON but **DONOT** compacting them for performance concerns. We provide the option `encoder.CompactMarshaler` to add compacting process.
|
||||
|
||||
### Print Error
|
||||
If there invalid syntax in input JSON, sonic will return `decoder.SyntaxError`, which supports pretty-printing of error position
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
import "github.com/bytedance/sonic/decoder"
|
||||
|
||||
var data interface{}
|
||||
err := sonic.UnmarshalString("[[[}]]", &data)
|
||||
if err != nil {
|
||||
/* One line by default */
|
||||
println(e.Error()) // "Syntax error at index 3: invalid char\n\n\t[[[}]]\n\t...^..\n"
|
||||
/* Pretty print */
|
||||
if e, ok := err.(decoder.SyntaxError); ok {
|
||||
/*Syntax error at index 3: invalid char
|
||||
|
||||
[[[}]]
|
||||
...^..
|
||||
*/
|
||||
print(e.Description())
|
||||
} else if me, ok := err.(*decoder.MismatchTypeError); ok {
|
||||
// decoder.MismatchTypeError is new to Sonic v1.6.0
|
||||
print(me.Description())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Mismatched Types [Sonic v1.6.0]
|
||||
If there a **mismatch-typed** value for a given key, sonic will report `decoder.MismatchTypeError` (if there are many, report the last one), but still skip wrong the value and keep decoding next JSON.
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
import "github.com/bytedance/sonic/decoder"
|
||||
|
||||
var data = struct{
|
||||
A int
|
||||
B int
|
||||
}{}
|
||||
err := UnmarshalString(`{"A":"1","B":1}`, &data)
|
||||
println(err.Error()) // Mismatch type int with value string "at index 5: mismatched type with value\n\n\t{\"A\":\"1\",\"B\":1}\n\t.....^.........\n"
|
||||
fmt.Printf("%+v", data) // {A:0 B:1}
|
||||
```
|
||||
### Ast.Node
|
||||
Sonic/ast.Node is a completely self-contained AST for JSON. It implements serialization and deserialization both and provides robust APIs for obtaining and modification of generic data.
|
||||
#### Get/Index
|
||||
Search partial JSON by given paths, which must be non-negative integer or string, or nil
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
input := []byte(`{"key1":[{},{"key2":{"key3":[1,2,3]}}]}`)
|
||||
|
||||
// no path, returns entire json
|
||||
root, err := sonic.Get(input)
|
||||
raw := root.Raw() // == string(input)
|
||||
|
||||
// multiple paths
|
||||
root, err := sonic.Get(input, "key1", 1, "key2")
|
||||
sub := root.Get("key3").Index(2).Int64() // == 3
|
||||
```
|
||||
**Tip**: since `Index()` uses offset to locate data, which is much faster than scanning like `Get()`, we suggest you use it as much as possible. And sonic also provides another API `IndexOrGet()` to underlying use offset as well as ensure the key is matched.
|
||||
|
||||
#### Set/Unset
|
||||
Modify the json content by Set()/Unset()
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
// Set
|
||||
exist, err := root.Set("key4", NewBool(true)) // exist == false
|
||||
alias1 := root.Get("key4")
|
||||
println(alias1.Valid()) // true
|
||||
alias2 := root.Index(1)
|
||||
println(alias1 == alias2) // true
|
||||
|
||||
// Unset
|
||||
exist, err := root.UnsetByIndex(1) // exist == true
|
||||
println(root.Get("key4").Check()) // "value not exist"
|
||||
```
|
||||
|
||||
#### Serialize
|
||||
To encode `ast.Node` as json, use `MarshalJson()` or `json.Marshal()` (MUST pass the node's pointer)
|
||||
```go
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/bytedance/sonic"
|
||||
)
|
||||
|
||||
buf, err := root.MarshalJson()
|
||||
println(string(buf)) // {"key1":[{},{"key2":{"key3":[1,2,3]}}]}
|
||||
exp, err := json.Marshal(&root) // WARN: use pointer
|
||||
println(string(buf) == string(exp)) // true
|
||||
```
|
||||
|
||||
#### APIs
|
||||
- validation: `Check()`, `Error()`, `Valid()`, `Exist()`
|
||||
- searching: `Index()`, `Get()`, `IndexPair()`, `IndexOrGet()`, `GetByPath()`
|
||||
- go-type casting: `Int64()`, `Float64()`, `String()`, `Number()`, `Bool()`, `Map[UseNumber|UseNode]()`, `Array[UseNumber|UseNode]()`, `Interface[UseNumber|UseNode]()`
|
||||
- go-type packing: `NewRaw()`, `NewNumber()`, `NewNull()`, `NewBool()`, `NewString()`, `NewObject()`, `NewArray()`
|
||||
- iteration: `Values()`, `Properties()`, `ForEach()`, `SortKeys()`
|
||||
- modification: `Set()`, `SetByIndex()`, `Add()`
|
||||
|
||||
## Compatibility
|
||||
Sonic **DOES NOT** ensure to support all environments, due to the difficulty of developing high-performance codes. For developers who use sonic to build their applications in different environments, we have the following suggestions:
|
||||
|
||||
- Developing on **Mac M1**: Make sure you have Rosetta 2 installed on your machine, and set `GOARCH=amd64` when building your application. Rosetta 2 can automatically translate x86 binaries to arm64 binaries and run x86 applications on Mac M1.
|
||||
- Developing on **Linux arm64**: You can install qemu and use the `qemu-x86_64 -cpu max` command to convert x86 binaries to amr64 binaries for applications built with sonic. The qemu can achieve a similar transfer effect to Rosetta 2 on Mac M1.
|
||||
|
||||
For developers who want to use sonic on Linux arm64 without qemu, or those who want to handle JSON strictly consistent with `encoding/json`, we provide some compatible APIs as `sonic.API`
|
||||
- `ConfigDefault`: the sonic's default config (`EscapeHTML=false`,`SortKeys=false`...) to run on sonic-supporting environment. It will fall back to `encoding/json` with the corresponding config, and some options like `SortKeys=false` will be invalid.
|
||||
- `ConfigStd`: the std-compatible config (`EscapeHTML=true`,`SortKeys=true`...) to run on sonic-supporting environment. It will fall back to `encoding/json`.
|
||||
- `ConfigFastest`: the fastest config (`NoQuoteTextMarshaler=true`) to run on sonic-supporting environment. It will fall back to `encoding/json` with the corresponding config, and some options will be invalid.
|
||||
|
||||
## Tips
|
||||
|
||||
### Pretouch
|
||||
Since Sonic uses [golang-asm](https://github.com/twitchyliquid64/golang-asm) as a JIT assembler, which is NOT very suitable for runtime compiling, first-hit running of a huge schema may cause request-timeout or even process-OOM. For better stability, we advise **using `Pretouch()` for huge-schema or compact-memory applications** before `Marshal()/Unmarshal()`.
|
||||
```go
|
||||
import (
|
||||
"reflect"
|
||||
"github.com/bytedance/sonic"
|
||||
"github.com/bytedance/sonic/option"
|
||||
)
|
||||
|
||||
func init() {
|
||||
var v HugeStruct
|
||||
|
||||
// For most large types (nesting depth <= option.DefaultMaxInlineDepth)
|
||||
err := sonic.Pretouch(reflect.TypeOf(v))
|
||||
|
||||
// with more CompileOption...
|
||||
err := sonic.Pretouch(reflect.TypeOf(v),
|
||||
// If the type is too deep nesting (nesting depth > option.DefaultMaxInlineDepth),
|
||||
// you can set compile recursive loops in Pretouch for better stability in JIT.
|
||||
option.WithCompileRecursiveDepth(loop),
|
||||
// For a large nested struct, try to set a smaller depth to reduce compiling time.
|
||||
option.WithCompileMaxInlineDepth(depth),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
### Copy string
|
||||
When decoding **string values without any escaped characters**, sonic references them from the origin JSON buffer instead of mallocing a new buffer to copy. This helps a lot for CPU performance but may leave the whole JSON buffer in memory as long as the decoded objects are being used. In practice, we found the extra memory introduced by referring JSON buffer is usually 20% ~ 80% of decoded objects. Once an application holds these objects for a long time (for example, cache the decoded objects for reusing), its in-use memory on the server may go up. We provide the option `decoder.CopyString()` for users to choose not to reference the JSON buffer, which may cause a decline in CPU performance to some degree.
|
||||
|
||||
### Pass string or []byte?
|
||||
For alignment to `encoding/json`, we provide API to pass `[]byte` as an argument, but the string-to-bytes copy is conducted at the same time considering safety, which may lose performance when the origin JSON is huge. Therefore, you can use `UnmarshalString()` and `GetFromString()` to pass a string, as long as your origin data is a string or **nocopy-cast** is safe for your []byte. We also provide API `MarshalString()` for convenient **nocopy-cast** of encoded JSON []byte, which is safe since sonic's output bytes is always duplicated and unique.
|
||||
|
||||
### Accelerate `encoding.TextMarshaler`
|
||||
To ensure data security, sonic.Encoder quotes and escapes string values from `encoding.TextMarshaler` interfaces by default, which may degrade performance much if most of your data is in form of them. We provide `encoder.NoQuoteTextMarshaler` to skip these operations, which means you **MUST** ensure their output string escaped and quoted following [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259).
|
||||
|
||||
|
||||
### Better performance for generic data
|
||||
In **fully-parsed** scenario, `Unmarshal()` performs better than `Get()`+`Node.Interface()`. But if you only have a part of the schema for specific json, you can combine `Get()` and `Unmarshal()` together:
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
node, err := sonic.GetFromString(_TwitterJson, "statuses", 3, "user")
|
||||
var user User // your partial schema...
|
||||
err = sonic.UnmarshalString(node.Raw(), &user)
|
||||
```
|
||||
Even if you don't have any schema, use `ast.Node` as the container of generic values instead of `map` or `interface`:
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
root, err := sonic.GetFromString(_TwitterJson)
|
||||
user := root.GetByPath("statuses", 3, "user") // === root.Get("status").Index(3).Get("user")
|
||||
err = user.Check()
|
||||
|
||||
// err = user.LoadAll() // only call this when you want to use 'user' concurrently...
|
||||
go someFunc(user)
|
||||
```
|
||||
Why? Because `ast.Node` stores its children using `array`:
|
||||
- `Array`'s performance is **much better** than `Map` when Inserting (Deserialize) and Scanning (Serialize) data;
|
||||
- **Hashing** (`map[x]`) is not as efficient as **Indexing** (`array[x]`), which `ast.Node` can conduct on **both array and object**;
|
||||
- Using `Interface()`/`Map()` means Sonic must parse all the underlying values, while `ast.Node` can parse them **on demand**.
|
||||
|
||||
**CAUTION:** `ast.Node` **DOESN'T** ensure concurrent security directly, due to its **lazy-load** design. However, you can call `Node.Load()`/`Node.LoadAll()` to achieve that, which may bring performance reduction while it still works faster than converting to `map` or `interface{}`
|
||||
|
||||
## Community
|
||||
Sonic is a subproject of [CloudWeGo](https://www.cloudwego.io/). We are committed to building a cloud native ecosystem.
|
|
@ -0,0 +1,382 @@
|
|||
# Sonic
|
||||
|
||||
[English](README.md) | 中文
|
||||
|
||||
一个速度奇快的 JSON 序列化/反序列化库,由 JIT (即时编译)和 SIMD (单指令流多数据流)加速。
|
||||
|
||||
## 依赖
|
||||
|
||||
- Go 1.15~1.20
|
||||
- Linux/MacOS/Windows
|
||||
- Amd64 架构
|
||||
|
||||
## 特色
|
||||
|
||||
- 运行时对象绑定,无需代码生成
|
||||
- 完备的 JSON 操作 API
|
||||
- 快,更快,还要更快!
|
||||
|
||||
## 基准测试
|
||||
|
||||
对于**所有大小**的 json 和**所有使用场景**, **Sonic 表现均为最佳**。
|
||||
- [中型](https://github.com/bytedance/sonic/blob/main/decoder/testdata_test.go#L19) (13kB, 300+ 键, 6 层)
|
||||
```powershell
|
||||
goversion: 1.17.1
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz
|
||||
BenchmarkEncoder_Generic_Sonic-16 32393 ns/op 402.40 MB/s 11965 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Generic_Sonic_Fast-16 21668 ns/op 601.57 MB/s 10940 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Generic_JsonIter-16 42168 ns/op 309.12 MB/s 14345 B/op 115 allocs/op
|
||||
BenchmarkEncoder_Generic_GoJson-16 65189 ns/op 199.96 MB/s 23261 B/op 16 allocs/op
|
||||
BenchmarkEncoder_Generic_StdLib-16 106322 ns/op 122.60 MB/s 49136 B/op 789 allocs/op
|
||||
BenchmarkEncoder_Binding_Sonic-16 6269 ns/op 2079.26 MB/s 14173 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Binding_Sonic_Fast-16 5281 ns/op 2468.16 MB/s 12322 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Binding_JsonIter-16 20056 ns/op 649.93 MB/s 9488 B/op 2 allocs/op
|
||||
BenchmarkEncoder_Binding_GoJson-16 8311 ns/op 1568.32 MB/s 9481 B/op 1 allocs/op
|
||||
BenchmarkEncoder_Binding_StdLib-16 16448 ns/op 792.52 MB/s 9479 B/op 1 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_Sonic-16 6681 ns/op 1950.93 MB/s 12738 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_Sonic_Fast-16 4179 ns/op 3118.99 MB/s 10757 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_JsonIter-16 9861 ns/op 1321.84 MB/s 14362 B/op 115 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_GoJson-16 18850 ns/op 691.52 MB/s 23278 B/op 16 allocs/op
|
||||
BenchmarkEncoder_Parallel_Generic_StdLib-16 45902 ns/op 283.97 MB/s 49174 B/op 789 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_Sonic-16 1480 ns/op 8810.09 MB/s 13049 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_Sonic_Fast-16 1209 ns/op 10785.23 MB/s 11546 B/op 4 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_JsonIter-16 6170 ns/op 2112.58 MB/s 9504 B/op 2 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_GoJson-16 3321 ns/op 3925.52 MB/s 9496 B/op 1 allocs/op
|
||||
BenchmarkEncoder_Parallel_Binding_StdLib-16 3739 ns/op 3486.49 MB/s 9480 B/op 1 allocs/op
|
||||
|
||||
BenchmarkDecoder_Generic_Sonic-16 66812 ns/op 195.10 MB/s 57602 B/op 723 allocs/op
|
||||
BenchmarkDecoder_Generic_Sonic_Fast-16 54523 ns/op 239.07 MB/s 49786 B/op 313 allocs/op
|
||||
BenchmarkDecoder_Generic_StdLib-16 124260 ns/op 104.90 MB/s 50869 B/op 772 allocs/op
|
||||
BenchmarkDecoder_Generic_JsonIter-16 91274 ns/op 142.81 MB/s 55782 B/op 1068 allocs/op
|
||||
BenchmarkDecoder_Generic_GoJson-16 88569 ns/op 147.17 MB/s 66367 B/op 973 allocs/op
|
||||
BenchmarkDecoder_Binding_Sonic-16 32557 ns/op 400.38 MB/s 28302 B/op 137 allocs/op
|
||||
BenchmarkDecoder_Binding_Sonic_Fast-16 28649 ns/op 455.00 MB/s 24999 B/op 34 allocs/op
|
||||
BenchmarkDecoder_Binding_StdLib-16 111437 ns/op 116.97 MB/s 10576 B/op 208 allocs/op
|
||||
BenchmarkDecoder_Binding_JsonIter-16 35090 ns/op 371.48 MB/s 14673 B/op 385 allocs/op
|
||||
BenchmarkDecoder_Binding_GoJson-16 28738 ns/op 453.59 MB/s 22039 B/op 49 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_Sonic-16 12321 ns/op 1057.91 MB/s 57233 B/op 723 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_Sonic_Fast-16 10644 ns/op 1224.64 MB/s 49362 B/op 313 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_StdLib-16 57587 ns/op 226.35 MB/s 50874 B/op 772 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_JsonIter-16 38666 ns/op 337.12 MB/s 55789 B/op 1068 allocs/op
|
||||
BenchmarkDecoder_Parallel_Generic_GoJson-16 30259 ns/op 430.79 MB/s 66370 B/op 974 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_Sonic-16 5965 ns/op 2185.28 MB/s 27747 B/op 137 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_Sonic_Fast-16 5170 ns/op 2521.31 MB/s 24715 B/op 34 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_StdLib-16 27582 ns/op 472.58 MB/s 10576 B/op 208 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_JsonIter-16 13571 ns/op 960.51 MB/s 14685 B/op 385 allocs/op
|
||||
BenchmarkDecoder_Parallel_Binding_GoJson-16 10031 ns/op 1299.51 MB/s 22111 B/op 49 allocs/op
|
||||
|
||||
BenchmarkGetOne_Sonic-16 3276 ns/op 3975.78 MB/s 24 B/op 1 allocs/op
|
||||
BenchmarkGetOne_Gjson-16 9431 ns/op 1380.81 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkGetOne_Jsoniter-16 51178 ns/op 254.46 MB/s 27936 B/op 647 allocs/op
|
||||
BenchmarkGetOne_Parallel_Sonic-16 216.7 ns/op 60098.95 MB/s 24 B/op 1 allocs/op
|
||||
BenchmarkGetOne_Parallel_Gjson-16 1076 ns/op 12098.62 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkGetOne_Parallel_Jsoniter-16 17741 ns/op 734.06 MB/s 27945 B/op 647 allocs/op
|
||||
BenchmarkSetOne_Sonic-16 9571 ns/op 1360.61 MB/s 1584 B/op 17 allocs/op
|
||||
BenchmarkSetOne_Sjson-16 36456 ns/op 357.22 MB/s 52180 B/op 9 allocs/op
|
||||
BenchmarkSetOne_Jsoniter-16 79475 ns/op 163.86 MB/s 45862 B/op 964 allocs/op
|
||||
BenchmarkSetOne_Parallel_Sonic-16 850.9 ns/op 15305.31 MB/s 1584 B/op 17 allocs/op
|
||||
BenchmarkSetOne_Parallel_Sjson-16 18194 ns/op 715.77 MB/s 52247 B/op 9 allocs/op
|
||||
BenchmarkSetOne_Parallel_Jsoniter-16 33560 ns/op 388.05 MB/s 45892 B/op 964 allocs/op
|
||||
```
|
||||
- [小型](https://github.com/bytedance/sonic/blob/main/testdata/small.go) (400B, 11 个键, 3 层)
|
||||

|
||||
- [大型](https://github.com/bytedance/sonic/blob/main/testdata/twitter.json) (635kB, 10000+ 个键, 6 层)
|
||||

|
||||
|
||||
要查看基准测试代码,请参阅 [bench.sh](https://github.com/bytedance/sonic/blob/main/bench.sh) 。
|
||||
|
||||
## 工作原理
|
||||
|
||||
请参阅 [INTRODUCTION_ZH_CN.md](./docs/INTRODUCTION_ZH_CN.md).
|
||||
|
||||
## 使用方式
|
||||
|
||||
### 序列化/反序列化
|
||||
|
||||
默认的行为基本上与 `encoding/json` 相一致,除了 HTML 转义形式(参见 [Escape HTML](https://github.com/bytedance/sonic/blob/main/README.md#escape-html)) 和 `SortKeys` 功能(参见 [Sort Keys](https://github.com/bytedance/sonic/blob/main/README.md#sort-keys))**没有**遵循 [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259) 。
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
var data YourSchema
|
||||
// Marshal
|
||||
output, err := sonic.Marshal(&data)
|
||||
// Unmarshal
|
||||
err := sonic.Unmarshal(output, &data)
|
||||
```
|
||||
|
||||
### 流式输入输出
|
||||
|
||||
Sonic 支持解码 `io.Reader` 中输入的 json,或将对象编码为 json 后输出至 `io.Writer`,以处理多个值并减少内存消耗。
|
||||
- 编码器
|
||||
```go
|
||||
var o1 = map[string]interface{}{
|
||||
"a": "b",
|
||||
}
|
||||
var o2 = 1
|
||||
var w = bytes.NewBuffer(nil)
|
||||
var enc = sonic.ConfigDefault.NewEncoder(w)
|
||||
enc.Encode(o1)
|
||||
enc.Encode(o2)
|
||||
fmt.Println(w.String())
|
||||
// Output:
|
||||
// {"a":"b"}
|
||||
// 1
|
||||
```
|
||||
- 解码器
|
||||
```go
|
||||
var o = map[string]interface{}{}
|
||||
var r = strings.NewReader(`{"a":"b"}{"1":"2"}`)
|
||||
var dec = sonic.ConfigDefault.NewDecoder(r)
|
||||
dec.Decode(&o)
|
||||
dec.Decode(&o)
|
||||
fmt.Printf("%+v", o)
|
||||
// Output:
|
||||
// map[1:2 a:b]
|
||||
```
|
||||
|
||||
### 使用 `Number` / `int64`
|
||||
|
||||
```go
|
||||
import "github.com/bytedance/sonic/decoder"
|
||||
|
||||
var input = `1`
|
||||
var data interface{}
|
||||
|
||||
// default float64
|
||||
dc := decoder.NewDecoder(input)
|
||||
dc.Decode(&data) // data == float64(1)
|
||||
// use json.Number
|
||||
dc = decoder.NewDecoder(input)
|
||||
dc.UseNumber()
|
||||
dc.Decode(&data) // data == json.Number("1")
|
||||
// use int64
|
||||
dc = decoder.NewDecoder(input)
|
||||
dc.UseInt64()
|
||||
dc.Decode(&data) // data == int64(1)
|
||||
|
||||
root, err := sonic.GetFromString(input)
|
||||
// Get json.Number
|
||||
jn := root.Number()
|
||||
jm := root.InterfaceUseNumber().(json.Number) // jn == jm
|
||||
// Get float64
|
||||
fn := root.Float64()
|
||||
fm := root.Interface().(float64) // jn == jm
|
||||
```
|
||||
|
||||
### 对键排序
|
||||
|
||||
考虑到排序带来的性能损失(约 10% ), sonic 默认不会启用这个功能。如果你的组件依赖这个行为(如 [zstd](https://github.com/facebook/zstd)) ,可以仿照下面的例子:
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
import "github.com/bytedance/sonic/encoder"
|
||||
|
||||
// Binding map only
|
||||
m := map[string]interface{}{}
|
||||
v, err := encoder.Encode(m, encoder.SortMapKeys)
|
||||
|
||||
// Or ast.Node.SortKeys() before marshal
|
||||
var root := sonic.Get(JSON)
|
||||
err := root.SortKeys()
|
||||
```
|
||||
|
||||
### HTML 转义
|
||||
|
||||
考虑到性能损失(约15%), sonic 默认不会启用这个功能。你可以使用 `encoder.EscapeHTML` 选项来开启(与 `encoding/json.HTMLEscape` 行为一致)。
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
v := map[string]string{"&&":"<>"}
|
||||
ret, err := Encode(v, EscapeHTML) // ret == `{"\u0026\u0026":{"X":"\u003c\u003e"}}`
|
||||
```
|
||||
|
||||
### 紧凑格式
|
||||
Sonic 默认将基本类型( `struct` , `map` 等)编码为紧凑格式的 JSON ,除非使用 `json.RawMessage` or `json.Marshaler` 进行编码: sonic 确保输出的 JSON 合法,但出于性能考虑,**不会**加工成紧凑格式。我们提供选项 `encoder.CompactMarshaler` 来添加此过程,
|
||||
|
||||
### 打印错误
|
||||
|
||||
如果输入的 JSON 存在无效的语法,sonic 将返回 `decoder.SyntaxError`,该错误支持错误位置的美化输出。
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
import "github.com/bytedance/sonic/decoder"
|
||||
|
||||
var data interface{}
|
||||
err := sonic.UnmarshalString("[[[}]]", &data)
|
||||
if err != nil {
|
||||
/* One line by default */
|
||||
println(e.Error()) // "Syntax error at index 3: invalid char\n\n\t[[[}]]\n\t...^..\n"
|
||||
/* Pretty print */
|
||||
if e, ok := err.(decoder.SyntaxError); ok {
|
||||
/*Syntax error at index 3: invalid char
|
||||
|
||||
[[[}]]
|
||||
...^..
|
||||
*/
|
||||
print(e.Description())
|
||||
} else if me, ok := err.(*decoder.MismatchTypeError); ok {
|
||||
// decoder.MismatchTypeError is new to Sonic v1.6.0
|
||||
print(me.Description())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 类型不匹配 [Sonic v1.6.0]
|
||||
|
||||
如果给定键中存在**类型不匹配**的值, sonic 会抛出 `decoder.MismatchTypeError` (如果有多个,只会报告最后一个),但仍会跳过错误的值并解码下一个 JSON 。
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
import "github.com/bytedance/sonic/decoder"
|
||||
|
||||
var data = struct{
|
||||
A int
|
||||
B int
|
||||
}{}
|
||||
err := UnmarshalString(`{"A":"1","B":1}`, &data)
|
||||
println(err.Error()) // Mismatch type int with value string "at index 5: mismatched type with value\n\n\t{\"A\":\"1\",\"B\":1}\n\t.....^.........\n"
|
||||
fmt.Printf("%+v", data) // {A:0 B:1}
|
||||
```
|
||||
### `Ast.Node`
|
||||
|
||||
Sonic/ast.Node 是完全独立的 JSON 抽象语法树库。它实现了序列化和反序列化,并提供了获取和修改通用数据的鲁棒的 API。
|
||||
|
||||
#### 查找/索引
|
||||
|
||||
通过给定的路径搜索 JSON 片段,路径必须为非负整数,字符串或 `nil` 。
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
input := []byte(`{"key1":[{},{"key2":{"key3":[1,2,3]}}]}`)
|
||||
|
||||
// no path, returns entire json
|
||||
root, err := sonic.Get(input)
|
||||
raw := root.Raw() // == string(input)
|
||||
|
||||
// multiple paths
|
||||
root, err := sonic.Get(input, "key1", 1, "key2")
|
||||
sub := root.Get("key3").Index(2).Int64() // == 3
|
||||
```
|
||||
**注意**:由于 `Index()` 使用偏移量来定位数据,比使用扫描的 `Get()` 要快的多,建议尽可能的使用 `Index` 。 Sonic 也提供了另一个 API, `IndexOrGet()` ,以偏移量为基础并且也确保键的匹配。
|
||||
|
||||
#### 修改
|
||||
|
||||
使用 ` Set()` / `Unset()` 修改 json 的内容
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
// Set
|
||||
exist, err := root.Set("key4", NewBool(true)) // exist == false
|
||||
alias1 := root.Get("key4")
|
||||
println(alias1.Valid()) // true
|
||||
alias2 := root.Index(1)
|
||||
println(alias1 == alias2) // true
|
||||
|
||||
// Unset
|
||||
exist, err := root.UnsetByIndex(1) // exist == true
|
||||
println(root.Get("key4").Check()) // "value not exist"
|
||||
```
|
||||
|
||||
#### 序列化
|
||||
要将 `ast.Node` 编码为 json ,使用 `MarshalJson()` 或者 `json.Marshal()` (必须传递指向节点的指针)
|
||||
```go
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/bytedance/sonic"
|
||||
)
|
||||
|
||||
buf, err := root.MarshalJson()
|
||||
println(string(buf)) // {"key1":[{},{"key2":{"key3":[1,2,3]}}]}
|
||||
exp, err := json.Marshal(&root) // WARN: use pointer
|
||||
println(string(buf) == string(exp)) // true
|
||||
```
|
||||
|
||||
#### APIs
|
||||
- 合法性检查: `Check()`, `Error()`, `Valid()`, `Exist()`
|
||||
- 索引: `Index()`, `Get()`, `IndexPair()`, `IndexOrGet()`, `GetByPath()`
|
||||
- 转换至 go 内置类型: `Int64()`, `Float64()`, `String()`, `Number()`, `Bool()`, `Map[UseNumber|UseNode]()`, `Array[UseNumber|UseNode]()`, `Interface[UseNumber|UseNode]()`
|
||||
- go 类型打包: `NewRaw()`, `NewNumber()`, `NewNull()`, `NewBool()`, `NewString()`, `NewObject()`, `NewArray()`
|
||||
- 迭代: `Values()`, `Properties()`, `ForEach()`, `SortKeys()`
|
||||
- 修改: `Set()`, `SetByIndex()`, `Add()`
|
||||
|
||||
## 兼容性
|
||||
由于开发高性能代码的困难性, Sonic **不**保证对所有环境的支持。对于在不同环境中使用 Sonic 构建应用程序的开发者,我们有以下建议:
|
||||
|
||||
- 在 **Mac M1** 上开发:确保在您的计算机上安装了 Rosetta 2,并在构建时设置 `GOARCH=amd64` 。 Rosetta 2 可以自动将 x86 二进制文件转换为 arm64 二进制文件,并在 Mac M1 上运行 x86 应用程序。
|
||||
- 在 **Linux arm64** 上开发:您可以安装 qemu 并使用 `qemu-x86_64 -cpu max` 命令来将 x86 二进制文件转换为 arm64 二进制文件。qemu可以实现与Mac M1上的Rosetta 2类似的转换效果。
|
||||
|
||||
对于希望在不使用 qemu 下使用 sonic 的开发者,或者希望处理 JSON 时与 `encoding/JSON` 严格保持一致的开发者,我们在 `sonic.API` 中提供了一些兼容性 API
|
||||
- `ConfigDefault`: 在支持 sonic 的环境下 sonic 的默认配置(`EscapeHTML=false`,`SortKeys=false`等)。行为与具有相应配置的 `encoding/json` 一致,一些选项,如 `SortKeys=false` 将无效。
|
||||
- `ConfigStd`: 在支持 sonic 的环境下与标准库兼容的配置(`EscapeHTML=true`,`SortKeys=true`等)。行为与 `encoding/json` 一致。
|
||||
- `ConfigFastest`: 在支持 sonic 的环境下运行最快的配置(`NoQuoteTextMarshaler=true`)。行为与具有相应配置的 `encoding/json` 一致,某些选项将无效。
|
||||
|
||||
## 注意事项
|
||||
|
||||
### 预热
|
||||
由于 Sonic 使用 [golang-asm](https://github.com/twitchyliquid64/golang-asm) 作为 JIT 汇编器,这个库并不适用于运行时编译,第一次运行一个大型模式可能会导致请求超时甚至进程内存溢出。为了更好地稳定性,我们建议在运行大型模式或在内存有限的应用中,在使用 `Marshal()/Unmarshal()` 前运行 `Pretouch()`。
|
||||
```go
|
||||
import (
|
||||
"reflect"
|
||||
"github.com/bytedance/sonic"
|
||||
"github.com/bytedance/sonic/option"
|
||||
)
|
||||
|
||||
func init() {
|
||||
var v HugeStruct
|
||||
|
||||
// For most large types (nesting depth <= option.DefaultMaxInlineDepth)
|
||||
err := sonic.Pretouch(reflect.TypeOf(v))
|
||||
|
||||
// with more CompileOption...
|
||||
err := sonic.Pretouch(reflect.TypeOf(v),
|
||||
// If the type is too deep nesting (nesting depth > option.DefaultMaxInlineDepth),
|
||||
// you can set compile recursive loops in Pretouch for better stability in JIT.
|
||||
option.WithCompileRecursiveDepth(loop),
|
||||
// For a large nested struct, try to set a smaller depth to reduce compiling time.
|
||||
option.WithCompileMaxInlineDepth(depth),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
### 拷贝字符串
|
||||
|
||||
当解码 **没有转义字符的字符串**时, sonic 会从原始的 JSON 缓冲区内引用而不是复制到新的一个缓冲区中。这对 CPU 的性能方面很有帮助,但是可能因此在解码后对象仍在使用的时候将整个 JSON 缓冲区保留在内存中。实践中我们发现,通过引用 JSON 缓冲区引入的额外内存通常是解码后对象的 20% 至 80% ,一旦应用长期保留这些对象(如缓存以备重用),服务器所使用的内存可能会增加。我们提供了选项 `decoder.CopyString()` 供用户选择,不引用 JSON 缓冲区。这可能在一定程度上降低 CPU 性能。
|
||||
|
||||
### 传递字符串还是字节数组?
|
||||
为了和 `encoding/json` 保持一致,我们提供了传递 `[]byte` 作为参数的 API ,但考虑到安全性,字符串到字节的复制是同时进行的,这在原始 JSON 非常大时可能会导致性能损失。因此,你可以使用 `UnmarshalString()` 和 `GetFromString()` 来传递字符串,只要你的原始数据是字符串,或**零拷贝类型转换**对于你的字节数组是安全的。我们也提供了 `MarshalString()` 的 API ,以便对编码的 JSON 字节数组进行**零拷贝类型转换**,因为 sonic 输出的字节始终是重复并且唯一的,所以这样是安全的。
|
||||
|
||||
### 加速 `encoding.TextMarshaler`
|
||||
|
||||
为了保证数据安全性, `sonic.Encoder` 默认会对来自 `encoding.TextMarshaler` 接口的字符串进行引用和转义,如果大部分数据都是这种形式那可能会导致很大的性能损失。我们提供了 `encoder.NoQuoteTextMarshaler` 选项来跳过这些操作,但你**必须**保证他们的输出字符串依照 [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259) 进行了转义和引用。
|
||||
|
||||
|
||||
### 泛型的性能优化
|
||||
|
||||
在 **完全解析**的场景下, `Unmarshal()` 表现得比 `Get()`+`Node.Interface()` 更好。但是如果你只有特定 JSON 的部分模式,你可以将 `Get()` 和 `Unmarshal()` 结合使用:
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
node, err := sonic.GetFromString(_TwitterJson, "statuses", 3, "user")
|
||||
var user User // your partial schema...
|
||||
err = sonic.UnmarshalString(node.Raw(), &user)
|
||||
```
|
||||
甚至如果你没有任何模式,可以用 `ast.Node` 代替 `map` 或 `interface` 作为泛型的容器:
|
||||
```go
|
||||
import "github.com/bytedance/sonic"
|
||||
|
||||
root, err := sonic.GetFromString(_TwitterJson)
|
||||
user := root.GetByPath("statuses", 3, "user") // === root.Get("status").Index(3).Get("user")
|
||||
err = user.Check()
|
||||
|
||||
// err = user.LoadAll() // only call this when you want to use 'user' concurrently...
|
||||
go someFunc(user)
|
||||
```
|
||||
为什么?因为 `ast.Node` 使用 `array` 来存储其子节点:
|
||||
- 在插入(反序列化)和扫描(序列化)数据时,`Array` 的性能比 `Map` **好得多**;
|
||||
- **哈希**(`map[x]`)的效率不如**索引**(`array[x]`)高效,而 `ast.Node` 可以在数组和对象上使用索引;
|
||||
- 使用 `Interface()` / `Map()` 意味着 sonic 必须解析所有的底层值,而 `ast.Node` 可以**按需解析**它们。
|
||||
|
||||
**注意**:由于 `ast.Node` 的惰性加载设计,其**不能**直接保证并发安全性,但你可以调用 `Node.Load()` / `Node.LoadAll()` 来实现并发安全。尽管可能会带来性能损失,但仍比转换成 `map` 或 `interface{}` 更为高效。
|
||||
|
||||
## 社区
|
||||
|
||||
Sonic 是 [CloudWeGo](https://www.cloudwego.io/) 下的一个子项目。我们致力于构建云原生生态系统。
|
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
* Copyright 2021 ByteDance Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package sonic
|
||||
|
||||
import (
|
||||
`io`
|
||||
|
||||
`github.com/bytedance/sonic/ast`
|
||||
)
|
||||
|
||||
// Config is a combination of sonic/encoder.Options and sonic/decoder.Options
|
||||
type Config struct {
|
||||
// EscapeHTML indicates encoder to escape all HTML characters
|
||||
// after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape).
|
||||
// WARNING: This hurts performance A LOT, USE WITH CARE.
|
||||
EscapeHTML bool
|
||||
|
||||
// SortMapKeys indicates encoder that the keys of a map needs to be sorted
|
||||
// before serializing into JSON.
|
||||
// WARNING: This hurts performance A LOT, USE WITH CARE.
|
||||
SortMapKeys bool
|
||||
|
||||
// CompactMarshaler indicates encoder that the output JSON from json.Marshaler
|
||||
// is always compact and needs no validation
|
||||
CompactMarshaler bool
|
||||
|
||||
// NoQuoteTextMarshaler indicates encoder that the output text from encoding.TextMarshaler
|
||||
// is always escaped string and needs no quoting
|
||||
NoQuoteTextMarshaler bool
|
||||
|
||||
// NoNullSliceOrMap indicates encoder that all empty Array or Object are encoded as '[]' or '{}',
|
||||
// instead of 'null'
|
||||
NoNullSliceOrMap bool
|
||||
|
||||
// UseInt64 indicates decoder to unmarshal an integer into an interface{} as an
|
||||
// int64 instead of as a float64.
|
||||
UseInt64 bool
|
||||
|
||||
// UseNumber indicates decoder to unmarshal a number into an interface{} as a
|
||||
// json.Number instead of as a float64.
|
||||
UseNumber bool
|
||||
|
||||
// UseUnicodeErrors indicates decoder to return an error when encounter invalid
|
||||
// UTF-8 escape sequences.
|
||||
UseUnicodeErrors bool
|
||||
|
||||
// DisallowUnknownFields indicates decoder to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
DisallowUnknownFields bool
|
||||
|
||||
// CopyString indicates decoder to decode string values by copying instead of referring.
|
||||
CopyString bool
|
||||
|
||||
// ValidateString indicates decoder and encoder to valid string values: decoder will return errors
|
||||
// when unescaped control chars(\u0000-\u001f) in the string value of JSON.
|
||||
ValidateString bool
|
||||
}
|
||||
|
||||
var (
|
||||
// ConfigDefault is the default config of APIs, aiming at efficiency and safty.
|
||||
ConfigDefault = Config{}.Froze()
|
||||
|
||||
// ConfigStd is the standard config of APIs, aiming at being compatible with encoding/json.
|
||||
ConfigStd = Config{
|
||||
EscapeHTML : true,
|
||||
SortMapKeys: true,
|
||||
CompactMarshaler: true,
|
||||
CopyString : true,
|
||||
ValidateString : true,
|
||||
}.Froze()
|
||||
|
||||
// ConfigFastest is the fastest config of APIs, aiming at speed.
|
||||
ConfigFastest = Config{
|
||||
NoQuoteTextMarshaler: true,
|
||||
}.Froze()
|
||||
)
|
||||
|
||||
|
||||
// API is a binding of specific config.
|
||||
// This interface is inspired by github.com/json-iterator/go,
|
||||
// and has same behaviors under equavilent config.
|
||||
type API interface {
|
||||
// MarshalToString returns the JSON encoding string of v
|
||||
MarshalToString(v interface{}) (string, error)
|
||||
// Marshal returns the JSON encoding bytes of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// MarshalIndent returns the JSON encoding bytes with indent and prefix.
|
||||
MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
|
||||
// UnmarshalFromString parses the JSON-encoded bytes and stores the result in the value pointed to by v.
|
||||
UnmarshalFromString(str string, v interface{}) error
|
||||
// Unmarshal parses the JSON-encoded string and stores the result in the value pointed to by v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// NewEncoder create a Encoder holding writer
|
||||
NewEncoder(writer io.Writer) Encoder
|
||||
// NewDecoder create a Decoder holding reader
|
||||
NewDecoder(reader io.Reader) Decoder
|
||||
// Valid validates the JSON-encoded bytes and reportes if it is valid
|
||||
Valid(data []byte) bool
|
||||
}
|
||||
|
||||
// Encoder encodes JSON into io.Writer
|
||||
type Encoder interface {
|
||||
// Encode writes the JSON encoding of v to the stream, followed by a newline character.
|
||||
Encode(val interface{}) error
|
||||
// SetEscapeHTML specifies whether problematic HTML characters
|
||||
// should be escaped inside JSON quoted strings.
|
||||
// The default behavior NOT ESCAPE
|
||||
SetEscapeHTML(on bool)
|
||||
// SetIndent instructs the encoder to format each subsequent encoded value
|
||||
// as if indented by the package-level function Indent(dst, src, prefix, indent).
|
||||
// Calling SetIndent("", "") disables indentation
|
||||
SetIndent(prefix, indent string)
|
||||
}
|
||||
|
||||
// Decoder decodes JSON from io.Read
|
||||
type Decoder interface {
|
||||
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v.
|
||||
Decode(val interface{}) error
|
||||
// Buffered returns a reader of the data remaining in the Decoder's buffer.
|
||||
// The reader is valid until the next call to Decode.
|
||||
Buffered() io.Reader
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination is a struct
|
||||
// and the input contains object keys which do not match any non-ignored, exported fields in the destination.
|
||||
DisallowUnknownFields()
|
||||
// More reports whether there is another element in the current array or object being parsed.
|
||||
More() bool
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a Number instead of as a float64.
|
||||
UseNumber()
|
||||
}
|
||||
|
||||
// Marshal returns the JSON encoding bytes of v.
|
||||
func Marshal(val interface{}) ([]byte, error) {
|
||||
return ConfigDefault.Marshal(val)
|
||||
}
|
||||
|
||||
// MarshalString returns the JSON encoding string of v.
|
||||
func MarshalString(val interface{}) (string, error) {
|
||||
return ConfigDefault.MarshalToString(val)
|
||||
}
|
||||
|
||||
// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
|
||||
// NOTICE: This API copies given buffer by default,
|
||||
// if you want to pass JSON more efficiently, use UnmarshalString instead.
|
||||
func Unmarshal(buf []byte, val interface{}) error {
|
||||
return ConfigDefault.Unmarshal(buf, val)
|
||||
}
|
||||
|
||||
// UnmarshalString is like Unmarshal, except buf is a string.
|
||||
func UnmarshalString(buf string, val interface{}) error {
|
||||
return ConfigDefault.UnmarshalFromString(buf, val)
|
||||
}
|
||||
|
||||
// Get searches the given path from json,
|
||||
// and returns its representing ast.Node.
|
||||
//
|
||||
// Each path arg must be integer or string:
|
||||
// - Integer is target index(>=0), means searching current node as array.
|
||||
// - String is target key, means searching current node as object.
|
||||
//
|
||||
//
|
||||
// Note, the api expects the json is well-formed at least,
|
||||
// otherwise it may return unexpected result.
|
||||
func Get(src []byte, path ...interface{}) (ast.Node, error) {
|
||||
return GetFromString(string(src), path...)
|
||||
}
|
||||
|
||||
// GetFromString is same with Get except src is string,
|
||||
// which can reduce unnecessary memory copy.
|
||||
func GetFromString(src string, path ...interface{}) (ast.Node, error) {
|
||||
return ast.NewSearcher(src).GetByPath(path...)
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
// +build amd64,go1.15,!go1.21
|
||||
|
||||
/*
|
||||
* Copyright 2022 ByteDance Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
`runtime`
|
||||
`unsafe`
|
||||
|
||||
`github.com/bytedance/sonic/encoder`
|
||||
`github.com/bytedance/sonic/internal/native`
|
||||
`github.com/bytedance/sonic/internal/native/types`
|
||||
`github.com/bytedance/sonic/internal/rt`
|
||||
uq `github.com/bytedance/sonic/unquote`
|
||||
`github.com/chenzhuoyu/base64x`
|
||||
)
|
||||
|
||||
var typeByte = rt.UnpackEface(byte(0)).Type
|
||||
|
||||
//go:nocheckptr
|
||||
func quote(buf *[]byte, val string) {
|
||||
*buf = append(*buf, '"')
|
||||
if len(val) == 0 {
|
||||
*buf = append(*buf, '"')
|
||||
return
|
||||
}
|
||||
|
||||
sp := rt.IndexChar(val, 0)
|
||||
nb := len(val)
|
||||
b := (*rt.GoSlice)(unsafe.Pointer(buf))
|
||||
|
||||
// input buffer
|
||||
for nb > 0 {
|
||||
// output buffer
|
||||
dp := unsafe.Pointer(uintptr(b.Ptr) + uintptr(b.Len))
|
||||
dn := b.Cap - b.Len
|
||||
// call native.Quote, dn is byte count it outputs
|
||||
ret := native.Quote(sp, nb, dp, &dn, 0)
|
||||
// update *buf length
|
||||
b.Len += dn
|
||||
|
||||
// no need more output
|
||||
if ret >= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// double buf size
|
||||
*b = growslice(typeByte, *b, b.Cap*2)
|
||||
// ret is the complement of consumed input
|
||||
ret = ^ret
|
||||
// update input buffer
|
||||
nb -= ret
|
||||
sp = unsafe.Pointer(uintptr(sp) + uintptr(ret))
|
||||
}
|
||||
|
||||
runtime.KeepAlive(buf)
|
||||
runtime.KeepAlive(sp)
|
||||
*buf = append(*buf, '"')
|
||||
}
|
||||
|
||||
func unquote(src string) (string, types.ParsingError) {
|
||||
return uq.String(src)
|
||||
}
|
||||
|
||||
func decodeBase64(src string) ([]byte, error) {
|
||||
return base64x.StdEncoding.DecodeString(src)
|
||||
}
|
||||
|
||||
func encodeBase64(src []byte) string {
|
||||
return base64x.StdEncoding.EncodeToString(src)
|
||||
}
|
||||
|
||||
func (self *Parser) decodeValue() (val types.JsonState) {
|
||||
sv := (*rt.GoString)(unsafe.Pointer(&self.s))
|
||||
self.p = native.Value(sv.Ptr, sv.Len, self.p, &val, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func (self *Parser) skip() (int, types.ParsingError) {
|
||||
fsm := types.NewStateMachine()
|
||||
start := native.SkipOne(&self.s, &self.p, fsm, 0)
|
||||
types.FreeStateMachine(fsm)
|
||||
|
||||
if start < 0 {
|
||||
return self.p, types.ParsingError(-start)
|
||||
}
|
||||
return start, 0
|
||||
}
|
||||
|
||||
func (self *Node) encodeInterface(buf *[]byte) error {
|
||||
//WARN: NOT compatible with json.Encoder
|
||||
return encoder.EncodeInto(buf, self.packAny(), 0)
|
||||
}
|
||||
|
||||
func (self *Parser) skipFast() (int, types.ParsingError) {
|
||||
start := native.SkipOneFast(&self.s, &self.p)
|
||||
if start < 0 {
|
||||
return self.p, types.ParsingError(-start)
|
||||
}
|
||||
return start, 0
|
||||
}
|
||||
|
||||
func (self *Parser) getByPath(path ...interface{}) (int, types.ParsingError) {
|
||||
fsm := types.NewStateMachine()
|
||||
start := native.GetByPath(&self.s, &self.p, &path, fsm)
|
||||
types.FreeStateMachine(fsm)
|
||||
runtime.KeepAlive(path)
|
||||
if start < 0 {
|
||||
return self.p, types.ParsingError(-start)
|
||||
}
|
||||
return start, 0
|
||||
}
|
||||
|
||||
func (self *Searcher) GetByPath(path ...interface{}) (Node, error) {
|
||||
var err types.ParsingError
|
||||
var start int
|
||||
|
||||
self.parser.p = 0
|
||||
start, err = self.parser.getByPath(path...)
|
||||
if err != 0 {
|
||||
// for compatibility with old version
|
||||
if err == types.ERR_NOT_FOUND {
|
||||
return Node{}, ErrNotExist
|
||||
}
|
||||
if err == types.ERR_UNSUPPORT_TYPE {
|
||||
panic("path must be either int(>=0) or string")
|
||||
}
|
||||
return Node{}, self.parser.syntaxError(err)
|
||||
}
|
||||
|
||||
t := switchRawType(self.parser.s[start])
|
||||
if t == _V_NONE {
|
||||
return Node{}, self.parser.ExportError(err)
|
||||
}
|
||||
return newRawNode(self.parser.s[start:self.parser.p], t), nil
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
// +build !amd64 go1.21
|
||||
|
||||
/*
|
||||
* Copyright 2022 ByteDance Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
`encoding/base64`
|
||||
`encoding/json`
|
||||
`fmt`
|
||||
|
||||
`github.com/bytedance/sonic/internal/native/types`
|
||||
`github.com/bytedance/sonic/internal/rt`
|
||||
)
|
||||
|
||||
func quote(buf *[]byte, val string) {
|
||||
quoteString(buf, val)
|
||||
}
|
||||
|
||||
func unquote(src string) (string, types.ParsingError) {
|
||||
sp := rt.IndexChar(src, -1)
|
||||
out, ok := unquoteBytes(rt.BytesFrom(sp, len(src)+2, len(src)+2))
|
||||
if !ok {
|
||||
return "", types.ERR_INVALID_ESCAPE
|
||||
}
|
||||
return rt.Mem2Str(out), 0
|
||||
}
|
||||
|
||||
func decodeBase64(src string) ([]byte, error) {
|
||||
return base64.StdEncoding.DecodeString(src)
|
||||
}
|
||||
|
||||
func encodeBase64(src []byte) string {
|
||||
return base64.StdEncoding.EncodeToString(src)
|
||||
}
|
||||
|
||||
func (self *Parser) decodeValue() (val types.JsonState) {
|
||||
e, v := decodeValue(self.s, self.p)
|
||||
if e < 0 {
|
||||
return v
|
||||
}
|
||||
self.p = e
|
||||
return v
|
||||
}
|
||||
|
||||
func (self *Parser) skip() (int, types.ParsingError) {
|
||||
e, s := skipValue(self.s, self.p)
|
||||
if e < 0 {
|
||||
return self.p, types.ParsingError(-e)
|
||||
}
|
||||
self.p = e
|
||||
return s, 0
|
||||
}
|
||||
|
||||
func (self *Parser) skipFast() (int, types.ParsingError) {
|
||||
e, s := skipValueFast(self.s, self.p)
|
||||
if e < 0 {
|
||||
return self.p, types.ParsingError(-e)
|
||||
}
|
||||
self.p = e
|
||||
return s, 0
|
||||
}
|
||||
|
||||
func (self *Node) encodeInterface(buf *[]byte) error {
|
||||
out, err := json.Marshal(self.packAny())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*buf = append(*buf, out...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Searcher) GetByPath(path ...interface{}) (Node, error) {
|
||||
self.parser.p = 0
|
||||
|
||||
var err types.ParsingError
|
||||
for _, p := range path {
|
||||
if idx, ok := p.(int); ok && idx >= 0 {
|
||||
if err = self.parser.searchIndex(idx); err != 0 {
|
||||
return Node{}, self.parser.ExportError(err)
|
||||
}
|
||||
} else if key, ok := p.(string); ok {
|
||||
if err = self.parser.searchKey(key); err != 0 {
|
||||
return Node{}, self.parser.ExportError(err)
|
||||
}
|
||||
} else {
|
||||
panic("path must be either int(>=0) or string")
|
||||
}
|
||||
}
|
||||
|
||||
var start = self.parser.p
|
||||
if start, err = self.parser.skip(); err != 0 {
|
||||
return Node{}, self.parser.ExportError(err)
|
||||
}
|
||||
ns := len(self.parser.s)
|
||||
if self.parser.p > ns || start >= ns || start>=self.parser.p {
|
||||
return Node{}, fmt.Errorf("skip %d char out of json boundary", start)
|
||||
}
|
||||
|
||||
t := switchRawType(self.parser.s[start])
|
||||
if t == _V_NONE {
|
||||
return Node{}, self.parser.ExportError(err)
|
||||
}
|
||||
|
||||
return newRawNode(self.parser.s[start:self.parser.p], t), nil
|
||||
}
|
|
@ -0,0 +1,575 @@
|
|||
/*
|
||||
* Copyright 2022 ByteDance Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
`encoding/base64`
|
||||
`runtime`
|
||||
`strconv`
|
||||
`unsafe`
|
||||
|
||||
`github.com/bytedance/sonic/internal/native/types`
|
||||
`github.com/bytedance/sonic/internal/rt`
|
||||
)
|
||||
|
||||
const _blankCharsMask = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n')
|
||||
|
||||
const (
|
||||
bytesNull = "null"
|
||||
bytesTrue = "true"
|
||||
bytesFalse = "false"
|
||||
bytesObject = "{}"
|
||||
bytesArray = "[]"
|
||||
)
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return (int(1<<c) & _blankCharsMask) != 0
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func skipBlank(src string, pos int) int {
|
||||
se := uintptr(rt.IndexChar(src, len(src)))
|
||||
sp := uintptr(rt.IndexChar(src, pos))
|
||||
|
||||
for sp < se {
|
||||
if !isSpace(*(*byte)(unsafe.Pointer(sp))) {
|
||||
break
|
||||
}
|
||||
sp += 1
|
||||
}
|
||||
if sp >= se {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
runtime.KeepAlive(src)
|
||||
return int(sp - uintptr(rt.IndexChar(src, 0)))
|
||||
}
|
||||
|
||||
func decodeNull(src string, pos int) (ret int) {
|
||||
ret = pos + 4
|
||||
if ret > len(src) {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
if src[pos:ret] == bytesNull {
|
||||
return ret
|
||||
} else {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeTrue(src string, pos int) (ret int) {
|
||||
ret = pos + 4
|
||||
if ret > len(src) {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
if src[pos:ret] == bytesTrue {
|
||||
return ret
|
||||
} else {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func decodeFalse(src string, pos int) (ret int) {
|
||||
ret = pos + 5
|
||||
if ret > len(src) {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
if src[pos:ret] == bytesFalse {
|
||||
return ret
|
||||
}
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func decodeString(src string, pos int) (ret int, v string) {
|
||||
ret, ep := skipString(src, pos)
|
||||
if ep == -1 {
|
||||
(*rt.GoString)(unsafe.Pointer(&v)).Ptr = rt.IndexChar(src, pos+1)
|
||||
(*rt.GoString)(unsafe.Pointer(&v)).Len = ret - pos - 2
|
||||
return ret, v
|
||||
}
|
||||
|
||||
vv, ok := unquoteBytes(rt.Str2Mem(src[pos:ret]))
|
||||
if !ok {
|
||||
return -int(types.ERR_INVALID_CHAR), ""
|
||||
}
|
||||
|
||||
runtime.KeepAlive(src)
|
||||
return ret, rt.Mem2Str(vv)
|
||||
}
|
||||
|
||||
func decodeBinary(src string, pos int) (ret int, v []byte) {
|
||||
var vv string
|
||||
ret, vv = decodeString(src, pos)
|
||||
if ret < 0 {
|
||||
return ret, nil
|
||||
}
|
||||
var err error
|
||||
v, err = base64.StdEncoding.DecodeString(vv)
|
||||
if err != nil {
|
||||
return -int(types.ERR_INVALID_CHAR), nil
|
||||
}
|
||||
return ret, v
|
||||
}
|
||||
|
||||
func isDigit(c byte) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func decodeInt64(src string, pos int) (ret int, v int64, err error) {
|
||||
sp := uintptr(rt.IndexChar(src, pos))
|
||||
ss := uintptr(sp)
|
||||
se := uintptr(rt.IndexChar(src, len(src)))
|
||||
if uintptr(sp) >= se {
|
||||
return -int(types.ERR_EOF), 0, nil
|
||||
}
|
||||
|
||||
if c := *(*byte)(unsafe.Pointer(sp)); c == '-' {
|
||||
sp += 1
|
||||
}
|
||||
if sp == se {
|
||||
return -int(types.ERR_EOF), 0, nil
|
||||
}
|
||||
|
||||
for ; sp < se; sp += uintptr(1) {
|
||||
if !isDigit(*(*byte)(unsafe.Pointer(sp))) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if sp < se {
|
||||
if c := *(*byte)(unsafe.Pointer(sp)); c == '.' || c == 'e' || c == 'E' {
|
||||
return -int(types.ERR_INVALID_NUMBER_FMT), 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
var vv string
|
||||
ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
|
||||
(*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss)
|
||||
(*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos
|
||||
|
||||
v, err = strconv.ParseInt(vv, 10, 64)
|
||||
if err != nil {
|
||||
//NOTICE: allow overflow here
|
||||
if err.(*strconv.NumError).Err == strconv.ErrRange {
|
||||
return ret, 0, err
|
||||
}
|
||||
return -int(types.ERR_INVALID_CHAR), 0, err
|
||||
}
|
||||
|
||||
runtime.KeepAlive(src)
|
||||
return ret, v, nil
|
||||
}
|
||||
|
||||
func isNumberChars(c byte) bool {
|
||||
return (c >= '0' && c <= '9') || c == '+' || c == '-' || c == 'e' || c == 'E' || c == '.'
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func decodeFloat64(src string, pos int) (ret int, v float64, err error) {
|
||||
sp := uintptr(rt.IndexChar(src, pos))
|
||||
ss := uintptr(sp)
|
||||
se := uintptr(rt.IndexChar(src, len(src)))
|
||||
if uintptr(sp) >= se {
|
||||
return -int(types.ERR_EOF), 0, nil
|
||||
}
|
||||
|
||||
if c := *(*byte)(unsafe.Pointer(sp)); c == '-' {
|
||||
sp += 1
|
||||
}
|
||||
if sp == se {
|
||||
return -int(types.ERR_EOF), 0, nil
|
||||
}
|
||||
|
||||
for ; sp < se; sp += uintptr(1) {
|
||||
if !isNumberChars(*(*byte)(unsafe.Pointer(sp))) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var vv string
|
||||
ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
|
||||
(*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss)
|
||||
(*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos
|
||||
|
||||
v, err = strconv.ParseFloat(vv, 64)
|
||||
if err != nil {
|
||||
//NOTICE: allow overflow here
|
||||
if err.(*strconv.NumError).Err == strconv.ErrRange {
|
||||
return ret, 0, err
|
||||
}
|
||||
return -int(types.ERR_INVALID_CHAR), 0, err
|
||||
}
|
||||
|
||||
runtime.KeepAlive(src)
|
||||
return ret, v, nil
|
||||
}
|
||||
|
||||
func decodeValue(src string, pos int) (ret int, v types.JsonState) {
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, types.JsonState{Vt: types.ValueType(pos)}
|
||||
}
|
||||
switch c := src[pos]; c {
|
||||
case 'n':
|
||||
ret = decodeNull(src, pos)
|
||||
if ret < 0 {
|
||||
return ret, types.JsonState{Vt: types.ValueType(ret)}
|
||||
}
|
||||
return ret, types.JsonState{Vt: types.V_NULL}
|
||||
case '"':
|
||||
var ep int
|
||||
ret, ep = skipString(src, pos)
|
||||
if ret < 0 {
|
||||
return ret, types.JsonState{Vt: types.ValueType(ret)}
|
||||
}
|
||||
return ret, types.JsonState{Vt: types.V_STRING, Iv: int64(pos + 1), Ep: ep}
|
||||
case '{':
|
||||
return pos + 1, types.JsonState{Vt: types.V_OBJECT}
|
||||
case '[':
|
||||
return pos + 1, types.JsonState{Vt: types.V_ARRAY}
|
||||
case 't':
|
||||
ret = decodeTrue(src, pos)
|
||||
if ret < 0 {
|
||||
return ret, types.JsonState{Vt: types.ValueType(ret)}
|
||||
}
|
||||
return ret, types.JsonState{Vt: types.V_TRUE}
|
||||
case 'f':
|
||||
ret = decodeFalse(src, pos)
|
||||
if ret < 0 {
|
||||
return ret, types.JsonState{Vt: types.ValueType(ret)}
|
||||
}
|
||||
return ret, types.JsonState{Vt: types.V_FALSE}
|
||||
case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
var iv int64
|
||||
ret, iv, _ = decodeInt64(src, pos)
|
||||
if ret >= 0 {
|
||||
return ret, types.JsonState{Vt: types.V_INTEGER, Iv: iv, Ep: pos}
|
||||
} else if ret != -int(types.ERR_INVALID_NUMBER_FMT) {
|
||||
return ret, types.JsonState{Vt: types.ValueType(ret)}
|
||||
}
|
||||
var fv float64
|
||||
ret, fv, _ = decodeFloat64(src, pos)
|
||||
if ret >= 0 {
|
||||
return ret, types.JsonState{Vt: types.V_DOUBLE, Dv: fv, Ep: pos}
|
||||
} else {
|
||||
return ret, types.JsonState{Vt: types.ValueType(ret)}
|
||||
}
|
||||
default:
|
||||
return -int(types.ERR_INVALID_CHAR), types.JsonState{Vt:-types.ValueType(types.ERR_INVALID_CHAR)}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func skipNumber(src string, pos int) (ret int) {
|
||||
sp := uintptr(rt.IndexChar(src, pos))
|
||||
se := uintptr(rt.IndexChar(src, len(src)))
|
||||
if uintptr(sp) >= se {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
|
||||
if c := *(*byte)(unsafe.Pointer(sp)); c == '-' {
|
||||
sp += 1
|
||||
}
|
||||
ss := sp
|
||||
|
||||
var pointer bool
|
||||
var exponent bool
|
||||
var lastIsDigit bool
|
||||
var nextNeedDigit = true
|
||||
|
||||
for ; sp < se; sp += uintptr(1) {
|
||||
c := *(*byte)(unsafe.Pointer(sp))
|
||||
if isDigit(c) {
|
||||
lastIsDigit = true
|
||||
nextNeedDigit = false
|
||||
continue
|
||||
} else if nextNeedDigit {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
} else if c == '.' {
|
||||
if !lastIsDigit || pointer || exponent || sp == ss {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
pointer = true
|
||||
lastIsDigit = false
|
||||
nextNeedDigit = true
|
||||
continue
|
||||
} else if c == 'e' || c == 'E' {
|
||||
if !lastIsDigit || exponent {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
if sp == se-1 {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
exponent = true
|
||||
lastIsDigit = false
|
||||
nextNeedDigit = false
|
||||
continue
|
||||
} else if c == '-' || c == '+' {
|
||||
if prev := *(*byte)(unsafe.Pointer(sp - 1)); prev != 'e' && prev != 'E' {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
lastIsDigit = false
|
||||
nextNeedDigit = true
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if nextNeedDigit {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(src)
|
||||
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func skipString(src string, pos int) (ret int, ep int) {
|
||||
if pos+1 >= len(src) {
|
||||
return -int(types.ERR_EOF), -1
|
||||
}
|
||||
|
||||
sp := uintptr(rt.IndexChar(src, pos))
|
||||
se := uintptr(rt.IndexChar(src, len(src)))
|
||||
|
||||
// not start with quote
|
||||
if *(*byte)(unsafe.Pointer(sp)) != '"' {
|
||||
return -int(types.ERR_INVALID_CHAR), -1
|
||||
}
|
||||
sp += 1
|
||||
|
||||
ep = -1
|
||||
for sp < se {
|
||||
c := *(*byte)(unsafe.Pointer(sp))
|
||||
if c == '\\' {
|
||||
if ep == -1 {
|
||||
ep = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
|
||||
}
|
||||
sp += 2
|
||||
continue
|
||||
}
|
||||
sp += 1
|
||||
if c == '"' {
|
||||
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep
|
||||
}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(src)
|
||||
// not found the closed quote until EOF
|
||||
return -int(types.ERR_EOF), -1
|
||||
}
|
||||
|
||||
//go:nocheckptr
|
||||
func skipPair(src string, pos int, lchar byte, rchar byte) (ret int) {
|
||||
if pos+1 >= len(src) {
|
||||
return -int(types.ERR_EOF)
|
||||
}
|
||||
|
||||
sp := uintptr(rt.IndexChar(src, pos))
|
||||
se := uintptr(rt.IndexChar(src, len(src)))
|
||||
|
||||
if *(*byte)(unsafe.Pointer(sp)) != lchar {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
|
||||
sp += 1
|
||||
nbrace := 1
|
||||
inquote := false
|
||||
|
||||
for sp < se {
|
||||
c := *(*byte)(unsafe.Pointer(sp))
|
||||
if c == '\\' {
|
||||
sp += 2
|
||||
continue
|
||||
} else if c == '"' {
|
||||
inquote = !inquote
|
||||
} else if c == lchar {
|
||||
if !inquote {
|
||||
nbrace += 1
|
||||
}
|
||||
} else if c == rchar {
|
||||
if !inquote {
|
||||
nbrace -= 1
|
||||
if nbrace == 0 {
|
||||
sp += 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
sp += 1
|
||||
}
|
||||
|
||||
if nbrace != 0 {
|
||||
return -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
|
||||
runtime.KeepAlive(src)
|
||||
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
|
||||
}
|
||||
|
||||
func skipValueFast(src string, pos int) (ret int, start int) {
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
switch c := src[pos]; c {
|
||||
case 'n':
|
||||
ret = decodeNull(src, pos)
|
||||
case '"':
|
||||
ret, _ = skipString(src, pos)
|
||||
case '{':
|
||||
ret = skipPair(src, pos, '{', '}')
|
||||
case '[':
|
||||
ret = skipPair(src, pos, '[', ']')
|
||||
case 't':
|
||||
ret = decodeTrue(src, pos)
|
||||
case 'f':
|
||||
ret = decodeFalse(src, pos)
|
||||
case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
ret = skipNumber(src, pos)
|
||||
default:
|
||||
ret = -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
return ret, pos
|
||||
}
|
||||
|
||||
func skipValue(src string, pos int) (ret int, start int) {
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
switch c := src[pos]; c {
|
||||
case 'n':
|
||||
ret = decodeNull(src, pos)
|
||||
case '"':
|
||||
ret, _ = skipString(src, pos)
|
||||
case '{':
|
||||
ret, _ = skipObject(src, pos)
|
||||
case '[':
|
||||
ret, _ = skipArray(src, pos)
|
||||
case 't':
|
||||
ret = decodeTrue(src, pos)
|
||||
case 'f':
|
||||
ret = decodeFalse(src, pos)
|
||||
case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
ret = skipNumber(src, pos)
|
||||
default:
|
||||
ret = -int(types.ERR_INVALID_CHAR)
|
||||
}
|
||||
return ret, pos
|
||||
}
|
||||
|
||||
func skipObject(src string, pos int) (ret int, start int) {
|
||||
start = skipBlank(src, pos)
|
||||
if start < 0 {
|
||||
return start, -1
|
||||
}
|
||||
|
||||
if src[start] != '{' {
|
||||
return -int(types.ERR_INVALID_CHAR), -1
|
||||
}
|
||||
|
||||
pos = start + 1
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
if src[pos] == '}' {
|
||||
return pos + 1, start
|
||||
}
|
||||
|
||||
for {
|
||||
pos, _ = skipString(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
if src[pos] != ':' {
|
||||
return -int(types.ERR_INVALID_CHAR), -1
|
||||
}
|
||||
|
||||
pos++
|
||||
pos, _ = skipValue(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
if src[pos] == '}' {
|
||||
return pos + 1, start
|
||||
}
|
||||
if src[pos] != ',' {
|
||||
return -int(types.ERR_INVALID_CHAR), -1
|
||||
}
|
||||
|
||||
pos++
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func skipArray(src string, pos int) (ret int, start int) {
|
||||
start = skipBlank(src, pos)
|
||||
if start < 0 {
|
||||
return start, -1
|
||||
}
|
||||
|
||||
if src[start] != '[' {
|
||||
return -int(types.ERR_INVALID_CHAR), -1
|
||||
}
|
||||
|
||||
pos = start + 1
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
if src[pos] == ']' {
|
||||
return pos + 1, start
|
||||
}
|
||||
|
||||
for {
|
||||
pos, _ = skipValue(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
|
||||
pos = skipBlank(src, pos)
|
||||
if pos < 0 {
|
||||
return pos, -1
|
||||
}
|
||||
if src[pos] == ']' {
|
||||
return pos + 1, start
|
||||
}
|
||||
if src[pos] != ',' {
|
||||
return -int(types.ERR_INVALID_CHAR), -1
|
||||
}
|
||||
pos++
|
||||
}
|
||||
}
|
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
* Copyright 2021 ByteDance Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
`sync`
|
||||
`unicode/utf8`
|
||||
|
||||
`github.com/bytedance/sonic/internal/rt`
|
||||
)
|
||||
|
||||
const (
|
||||
_MaxBuffer = 1024 // 1KB buffer size
|
||||
)
|
||||
|
||||
func quoteString(e *[]byte, s string) {
|
||||
*e = append(*e, '"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if safeSet[b] {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
*e = append(*e, s[start:i]...)
|
||||
}
|
||||
*e = append(*e, '\\')
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
*e = append(*e, b)
|
||||
case '\n':
|
||||
*e = append(*e, 'n')
|
||||
case '\r':
|
||||
*e = append(*e, 'r')
|
||||
case '\t':
|
||||
*e = append(*e, 't')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
// If escapeHTML is set, it also escapes <, >, and &
|
||||
// because they can lead to security holes when
|
||||
// user-controlled strings are rendered into JSON
|
||||
// and served to some browsers.
|
||||
*e = append(*e, `u00`...)
|
||||
*e = append(*e, hex[b>>4])
|
||||
*e = append(*e, hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
// if c == utf8.RuneError && size == 1 {
|
||||
// if start < i {
|
||||
// e.Write(s[start:i])
|
||||
// }
|
||||
// e.WriteString(`\ufffd`)
|
||||
// i += size
|
||||
// start = i
|
||||
// continue
|
||||
// }
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
*e = append(*e, s[start:i]...)
|
||||
}
|
||||
*e = append(*e, `\u202`...)
|
||||
*e = append(*e, hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
*e = append(*e, s[start:]...)
|
||||
}
|
||||
*e = append(*e, '"')
|
||||
}
|
||||
|
||||
var bytesPool = sync.Pool{}
|
||||
|
||||
func (self *Node) MarshalJSON() ([]byte, error) {
|
||||
buf := newBuffer()
|
||||
err := self.encode(buf)
|
||||
if err != nil {
|
||||
freeBuffer(buf)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := make([]byte, len(*buf))
|
||||
copy(ret, *buf)
|
||||
freeBuffer(buf)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func newBuffer() *[]byte {
|
||||
if ret := bytesPool.Get(); ret != nil {
|
||||
return ret.(*[]byte)
|
||||
} else {
|
||||
buf := make([]byte, 0, _MaxBuffer)
|
||||
return &buf
|
||||
}
|
||||
}
|
||||
|
||||
func freeBuffer(buf *[]byte) {
|
||||
*buf = (*buf)[:0]
|
||||
bytesPool.Put(buf)
|
||||
}
|
||||
|
||||
func (self *Node) encode(buf *[]byte) error {
|
||||
if self.IsRaw() {
|
||||
return self.encodeRaw(buf)
|
||||
}
|
||||
switch self.Type() {
|
||||
case V_NONE : return ErrNotExist
|
||||
case V_ERROR : return self.Check()
|
||||
case V_NULL : return self.encodeNull(buf)
|
||||
case V_TRUE : return self.encodeTrue(buf)
|
||||
case V_FALSE : return self.encodeFalse(buf)
|
||||
case V_ARRAY : return self.encodeArray(buf)
|
||||
case V_OBJECT: return self.encodeObject(buf)
|
||||
case V_STRING: return self.encodeString(buf)
|
||||
case V_NUMBER: return self.encodeNumber(buf)
|
||||
case V_ANY : return self.encodeInterface(buf)
|
||||
default : return ErrUnsupportType
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Node) encodeRaw(buf *[]byte) error {
|
||||
raw, err := self.Raw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*buf = append(*buf, raw...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Node) encodeNull(buf *[]byte) error {
|
||||
*buf = append(*buf, bytesNull...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Node) encodeTrue(buf *[]byte) error {
|
||||
*buf = append(*buf, bytesTrue...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Node) encodeFalse(buf *[]byte) error {
|
||||
*buf = append(*buf, bytesFalse...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Node) encodeNumber(buf *[]byte) error {
|
||||
str := rt.StrFrom(self.p, self.v)
|
||||
*buf = append(*buf, str...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Node) encodeString(buf *[]byte) error {
|
||||
if self.v == 0 {
|
||||
*buf = append(*buf, '"', '"')
|
||||
return nil
|
||||
}
|
||||
|
||||
quote(buf, rt.StrFrom(self.p, self.v))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Node) encodeArray(buf *[]byte) error {
|
||||
if self.isLazy() {
|
||||
if err := self.skipAllIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nb := self.len()
|
||||
if nb == 0 {
|
||||
*buf = append(*buf, bytesArray...)
|
||||
return nil
|
||||
}
|
||||
|
||||
*buf = append(*buf, '[')
|
||||
|
||||
var p = (*Node)(self.p)
|
||||
err := p.encode(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 1; i < nb; i++ {
|
||||
*buf = append(*buf, ',')
|
||||
p = p.unsafe_next()
|
||||
err := p.encode(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*buf = append(*buf, ']')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Pair) encode(buf *[]byte) error {
|
||||
if len(*buf) == 0 {
|
||||
*buf = append(*buf, '"', '"', ':')
|
||||
return self.Value.encode(buf)
|
||||
}
|
||||
|
||||
quote(buf, self.Key)
|
||||
*buf = append(*buf, ':')
|
||||
|
||||
return self.Value.encode(buf)
|
||||
}
|
||||
|
||||
func (self *Node) encodeObject(buf *[]byte) error {
|
||||
if self.isLazy() {
|
||||
if err := self.skipAllKey(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nb := self.len()
|
||||
if nb == 0 {
|
||||
*buf = append(*buf, bytesObject...)
|
||||
return nil
|
||||
}
|
||||
|
||||
*buf = append(*buf, '{')
|
||||
|
||||
var p = (*Pair)(self.p)
|
||||
err := p.encode(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 1; i < nb; i++ {
|
||||
*buf = append(*buf, ',')
|
||||
p = p.unsafe_next()
|
||||
err := p.encode(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*buf = append(*buf, '}')
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue