Merge pull request #754 from QAQ-rookie/feature/karmada-get
add get function to karmadactl
This commit is contained in:
commit
74371bf550
3
go.mod
3
go.mod
|
@ -11,6 +11,7 @@ require (
|
|||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.14.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/tools v0.1.2
|
||||
|
@ -19,11 +20,13 @@ require (
|
|||
k8s.io/apiextensions-apiserver v0.21.3
|
||||
k8s.io/apimachinery v0.21.3
|
||||
k8s.io/apiserver v0.21.3
|
||||
k8s.io/cli-runtime v0.21.3
|
||||
k8s.io/client-go v0.21.3
|
||||
k8s.io/code-generator v0.21.3
|
||||
k8s.io/component-base v0.21.3
|
||||
k8s.io/component-helpers v0.21.3
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kubectl v0.21.3
|
||||
k8s.io/kubernetes v1.21.3
|
||||
k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471
|
||||
sigs.k8s.io/cluster-api v0.4.0
|
||||
|
|
27
go.sum
27
go.sum
|
@ -41,6 +41,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
|
@ -58,6 +59,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||
github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ=
|
||||
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
|
@ -120,6 +122,7 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
|
@ -174,6 +177,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -202,6 +206,7 @@ github.com/drone/envsubst/v2 v2.0.0-20210615175204-7bf45dbf5372/go.mod h1:esf2rs
|
|||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
|
@ -217,6 +222,7 @@ github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL
|
|||
github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/evanphx/json-patch/v5 v5.2.0 h1:8ozOH5xxoMYDt5/u+yMTsVXydVCbTORFnOOoq2lumco=
|
||||
github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -230,12 +236,14 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM
|
|||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE=
|
||||
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M=
|
||||
github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -389,6 +397,7 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -407,6 +416,7 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
|
|||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
|
@ -494,6 +504,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
||||
|
@ -537,6 +548,7 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT
|
|||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
|
@ -545,9 +557,11 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
|
@ -556,6 +570,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
|||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
|
@ -613,6 +628,7 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv
|
|||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -664,6 +680,7 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO
|
|||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
|
||||
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
|
@ -671,6 +688,7 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh
|
|||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -711,6 +729,7 @@ github.com/spf13/viper v1.8.0/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH
|
|||
github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
@ -740,6 +759,7 @@ github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59b
|
|||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
@ -770,6 +790,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
@ -1240,8 +1261,10 @@ gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -1258,6 +1281,7 @@ k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII=
|
|||
k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
|
||||
k8s.io/apiserver v0.21.3 h1:QxAgE1ZPQG5cPlHScHTnLxP9H/kU3zjH1Vnd8G+n5OI=
|
||||
k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU=
|
||||
k8s.io/cli-runtime v0.21.3 h1:eXevRomULAAGjQ7m6qo+AWHvtVRqaLG8WQICEBwjtmo=
|
||||
k8s.io/cli-runtime v0.21.3/go.mod h1:h65y0uXIXDnNjd5J+F3CvQU3ZNplH4+rjqbII7JkD4A=
|
||||
k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg=
|
||||
k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
|
||||
|
@ -1288,6 +1312,7 @@ k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iL
|
|||
k8s.io/kube-proxy v0.21.3/go.mod h1:if2JLVYQIksfrwkGrPVqwMv4ef9+9aZwsA1s4tGsi4k=
|
||||
k8s.io/kube-scheduler v0.21.3 h1:Tm5NjkoShREiwgC8ldsrRxB6S2DlkmVP6Vdi6OY0n4Q=
|
||||
k8s.io/kube-scheduler v0.21.3/go.mod h1:2UeqsPooQyBrFTLmEwOIrluLRasLw7aQuBH+p3IIOW8=
|
||||
k8s.io/kubectl v0.21.3 h1:RmHvvz7tLnFmVqUzJuR44D8oE5zv1iyDojxSQllY+II=
|
||||
k8s.io/kubectl v0.21.3/go.mod h1:/x/kzrhfL1h1W07z6a1UTbd8SWZUYAWXskigkG4OBCg=
|
||||
k8s.io/kubelet v0.21.3/go.mod h1:QT3On5Zlarq8Vpt96uTAnjoHB45fUyyxEwutMLmsxac=
|
||||
k8s.io/kubernetes v1.21.3 h1:wBFDBWqkKtr010Sp9dGv8gmG87Yr5BsTCaJpYKrL3NU=
|
||||
|
@ -1323,9 +1348,11 @@ sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WG
|
|||
sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4=
|
||||
sigs.k8s.io/kind v0.11.1 h1:pVzOkhUwMBrCB0Q/WllQDO3v14Y+o2V0tFgjTqIUjwA=
|
||||
sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA=
|
||||
sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE=
|
||||
sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0=
|
||||
sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo=
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg=
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg=
|
||||
sigs.k8s.io/mcs-api v0.1.0 h1:edDbg0oRGfXw8TmZjKYep06LcJLv/qcYLidejnUp0PM=
|
||||
sigs.k8s.io/mcs-api v0.1.0/go.mod h1:gGiAryeFNB4GBsq2LBmVqSgKoobLxt+p7ii/WG5QYYw=
|
||||
|
|
|
@ -0,0 +1,546 @@
|
|||
package karmadactl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubectl/pkg/cmd/get"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl/options"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
)
|
||||
|
||||
const printColumnClusterNum = 1
|
||||
|
||||
var (
|
||||
getIn = os.Stdin
|
||||
getOut = os.Stdout
|
||||
getErr = os.Stderr
|
||||
|
||||
podColumns = []metav1.TableColumnDefinition{
|
||||
{Name: "Cluster", Type: "string", Format: "", Priority: 0},
|
||||
{Name: "ADOPTION", Type: "string", Format: "", Priority: 0},
|
||||
}
|
||||
|
||||
noPushModeMessage = "The karmadactl get command now only supports Push mode, [ %s ] are not push mode\n"
|
||||
getShort = `Display one or many resources`
|
||||
defaultKubeConfig = filepath.Join(os.Getenv("HOME"), ".kube/config")
|
||||
)
|
||||
|
||||
// NewCmdGet New get command
|
||||
func NewCmdGet(out io.Writer, karmadaConfig KarmadaConfig) *cobra.Command {
|
||||
ioStreams := genericclioptions.IOStreams{In: getIn, Out: getOut, ErrOut: getErr}
|
||||
o := NewCommandGetOptions("karmadactl", ioStreams)
|
||||
cmd := &cobra.Command{
|
||||
Use: "get [NAME | -l label | -n namespace] [flags]",
|
||||
DisableFlagsInUseLine: true,
|
||||
Short: getShort,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmdutil.CheckErr(o.Complete(cmd, args))
|
||||
cmdutil.CheckErr(o.Run(karmadaConfig, cmd, args))
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", "default", "-n=namespace or -n namespace")
|
||||
cmd.Flags().StringVarP(&o.LabelSelector, "labels", "l", "", "-l=label or -l label")
|
||||
cmd.Flags().StringSliceVarP(&o.Clusters, "clusters", "C", []string{}, "-C=member1,member2")
|
||||
o.GlobalCommandOptions.AddFlags(cmd.Flags())
|
||||
return cmd
|
||||
}
|
||||
|
||||
// CommandGetOptions contains the input to the get command.
|
||||
type CommandGetOptions struct {
|
||||
// global flags
|
||||
options.GlobalCommandOptions
|
||||
|
||||
Clusters []string
|
||||
|
||||
PrintFlags *get.PrintFlags
|
||||
ToPrinter func(*meta.RESTMapping, *bool, bool, bool) (printers.ResourcePrinterFunc, error)
|
||||
IsHumanReadablePrinter bool
|
||||
PrintWithOpenAPICols bool
|
||||
|
||||
CmdParent string
|
||||
|
||||
resource.FilenameOptions
|
||||
|
||||
Raw string
|
||||
ChunkSize int64
|
||||
|
||||
OutputWatchEvents bool
|
||||
|
||||
LabelSelector string
|
||||
FieldSelector string
|
||||
AllNamespaces bool
|
||||
Namespace string
|
||||
ExplicitNamespace bool
|
||||
|
||||
ServerPrint bool
|
||||
|
||||
NoHeaders bool
|
||||
Sort bool
|
||||
IgnoreNotFound bool
|
||||
Export bool
|
||||
|
||||
genericclioptions.IOStreams
|
||||
}
|
||||
|
||||
// NewCommandGetOptions returns a GetOptions with default chunk size 500.
|
||||
func NewCommandGetOptions(parent string, streams genericclioptions.IOStreams) *CommandGetOptions {
|
||||
return &CommandGetOptions{
|
||||
PrintFlags: get.NewGetPrintFlags(),
|
||||
|
||||
CmdParent: parent,
|
||||
|
||||
IOStreams: streams,
|
||||
ChunkSize: 500,
|
||||
ServerPrint: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Complete takes the command arguments and infers any remaining options.
|
||||
func (g *CommandGetOptions) Complete(cmd *cobra.Command, args []string) error {
|
||||
newScheme := gclient.NewSchema()
|
||||
// human readable printers have special conversion rules, so we determine if we're using one.
|
||||
g.IsHumanReadablePrinter = true
|
||||
|
||||
// check karmada config path
|
||||
env := os.Getenv("KUBECONFIG")
|
||||
if env != "" {
|
||||
g.KubeConfig = env
|
||||
}
|
||||
|
||||
if g.KubeConfig == "" {
|
||||
g.KubeConfig = defaultKubeConfig
|
||||
}
|
||||
if !Exists(g.KubeConfig) {
|
||||
return ErrEmptyConfig
|
||||
}
|
||||
|
||||
g.ToPrinter = func(mapping *meta.RESTMapping, outputObjects *bool, withNamespace bool, withKind bool) (printers.ResourcePrinterFunc, error) {
|
||||
// make a new copy of current flags / opts before mutating
|
||||
printFlags := g.PrintFlags.Copy()
|
||||
|
||||
if withNamespace {
|
||||
_ = printFlags.EnsureWithNamespace()
|
||||
}
|
||||
if withKind {
|
||||
_ = printFlags.EnsureWithKind()
|
||||
}
|
||||
|
||||
printer, err := printFlags.ToPrinter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
printer, err = printers.NewTypeSetter(newScheme).WrapToPrinter(printer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if g.ServerPrint {
|
||||
printer = &get.TablePrinter{Delegate: printer}
|
||||
}
|
||||
return printer.PrintObj, nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Obj cluster info
|
||||
type Obj struct {
|
||||
Cluster string
|
||||
Infos runtime.Object
|
||||
Mapping *meta.RESTMapping
|
||||
}
|
||||
|
||||
// RBInfo resourcebinding info and print info
|
||||
var RBInfo map[string]*OtherPrint
|
||||
|
||||
// OtherPrint applied is used in the display column
|
||||
type OtherPrint struct {
|
||||
Applied interface{}
|
||||
}
|
||||
|
||||
// Run performs the get operation.
|
||||
func (g *CommandGetOptions) Run(karmadaConfig KarmadaConfig, cmd *cobra.Command, args []string) error {
|
||||
mux := sync.Mutex{}
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var objs []Obj
|
||||
var allErrs []error
|
||||
errs := sets.NewString()
|
||||
|
||||
clusterInfos := make(map[string]*ClusterInfo)
|
||||
RBInfo = make(map[string]*OtherPrint)
|
||||
|
||||
karmadaclient, err := clusterInfoInit(g, karmadaConfig, clusterInfos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var noPushModeCluster []string
|
||||
wg.Add(len(g.Clusters))
|
||||
for idx := range g.Clusters {
|
||||
if clusterInfos[g.Clusters[idx]].ClusterSyncMode != clusterv1alpha1.Push {
|
||||
noPushModeCluster = append(noPushModeCluster, g.Clusters[idx])
|
||||
wg.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := g.getSecretTokenInKarmada(karmadaclient, g.Clusters[idx], clusterInfos); err != nil {
|
||||
return errors.Wrap(err, "Method getSecretTokenInKarmada get Secret info in karmada failed, err is")
|
||||
}
|
||||
f := getFactory(g.Clusters[idx], clusterInfos)
|
||||
go g.getObjInfo(&wg, &mux, f, g.Clusters[idx], &objs, &allErrs, args)
|
||||
}
|
||||
wg.Wait()
|
||||
if len(noPushModeCluster) != 0 {
|
||||
fmt.Println(fmt.Sprintf(noPushModeMessage, strings.Join(noPushModeCluster, ",")))
|
||||
}
|
||||
|
||||
table := &metav1.Table{}
|
||||
allTableRows, mapping, err := g.reconstructionRow(objs, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
table.Rows = allTableRows
|
||||
|
||||
setNoAdoption(mapping)
|
||||
setColumnDefinition(table)
|
||||
|
||||
if len(table.Rows) == 0 {
|
||||
msg := fmt.Sprintf("%v from server (NotFound)", args)
|
||||
fmt.Println(msg)
|
||||
return nil
|
||||
}
|
||||
printObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newPrintObj := &unstructured.Unstructured{Object: printObj}
|
||||
|
||||
var printer printers.ResourcePrinter
|
||||
var lastMapping *meta.RESTMapping
|
||||
|
||||
// track if we write any output
|
||||
trackingWriter := &trackingWriterWrapper{Delegate: g.Out}
|
||||
// output an empty line separating output
|
||||
separatorWriter := &separatorWriterWrapper{Delegate: trackingWriter}
|
||||
|
||||
w := printers.GetNewTabWriter(separatorWriter)
|
||||
if shouldGetNewPrinterForMapping(printer, lastMapping, mapping) {
|
||||
w.Flush()
|
||||
w.SetRememberedWidths(nil)
|
||||
|
||||
// add linebreaks between resource groups (if there is more than one)
|
||||
// when it satisfies all following 3 conditions:
|
||||
// 1) it's not the first resource group
|
||||
// 2) it has row header
|
||||
// 3) we've written output since the last time we started a new set of headers
|
||||
if !g.NoHeaders && trackingWriter.Written > 0 {
|
||||
separatorWriter.SetReady(true)
|
||||
}
|
||||
|
||||
printer, err = g.ToPrinter(mapping, nil, false, false)
|
||||
if err != nil {
|
||||
if !errs.Has(err.Error()) {
|
||||
errs.Insert(err.Error())
|
||||
allErrs = append(allErrs, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
//lastMapping = mapping
|
||||
}
|
||||
err = printer.PrintObj(newPrintObj, w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Flush()
|
||||
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
// getObjInfo get obj info in member cluster
|
||||
func (g *CommandGetOptions) getObjInfo(wg *sync.WaitGroup, mux *sync.Mutex, f cmdutil.Factory,
|
||||
cluster string, objs *[]Obj, allErrs *[]error, args []string) {
|
||||
defer wg.Done()
|
||||
chunkSize := g.ChunkSize
|
||||
r := f.NewBuilder().
|
||||
Unstructured().
|
||||
NamespaceParam(g.Namespace).DefaultNamespace().AllNamespaces(g.AllNamespaces).
|
||||
FilenameParam(g.ExplicitNamespace, &g.FilenameOptions).
|
||||
LabelSelectorParam(g.LabelSelector).
|
||||
FieldSelectorParam(g.FieldSelector).
|
||||
RequestChunksOf(chunkSize).
|
||||
ResourceTypeOrNameArgs(true, args...).
|
||||
ContinueOnError().
|
||||
Latest().
|
||||
Flatten().
|
||||
TransformRequests(g.transformRequests).
|
||||
Do()
|
||||
|
||||
r.IgnoreErrors(apierrors.IsNotFound)
|
||||
|
||||
infos, err := r.Infos()
|
||||
if err != nil {
|
||||
*allErrs = append(*allErrs, err)
|
||||
}
|
||||
mux.Lock()
|
||||
var objInfo Obj
|
||||
for ix := range infos {
|
||||
objInfo = Obj{
|
||||
Cluster: cluster,
|
||||
Infos: infos[ix].Object,
|
||||
Mapping: infos[ix].Mapping,
|
||||
}
|
||||
*objs = append(*objs, objInfo)
|
||||
}
|
||||
mux.Unlock()
|
||||
}
|
||||
|
||||
// reconstructionRow reconstruction tableRow
|
||||
func (g *CommandGetOptions) reconstructionRow(objs []Obj, table *metav1.Table) ([]metav1.TableRow, *meta.RESTMapping, error) {
|
||||
var allTableRows []metav1.TableRow
|
||||
var mapping *meta.RESTMapping
|
||||
for ix := range objs {
|
||||
mapping = objs[ix].Mapping
|
||||
unstr, ok := objs[ix].Infos.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("attempt to decode non-Unstructured object")
|
||||
}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, table); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for rowIdx := range table.Rows {
|
||||
var tempRow metav1.TableRow
|
||||
rbKey := getRBKey(mapping.Resource, table.Rows[rowIdx], objs[ix].Cluster)
|
||||
tempRow.Cells = append(append(tempRow.Cells, table.Rows[rowIdx].Cells[0], objs[ix].Cluster), table.Rows[rowIdx].Cells[1:]...)
|
||||
if _, ok := RBInfo[rbKey]; ok {
|
||||
tempRow.Cells = append(tempRow.Cells, "Y")
|
||||
} else {
|
||||
tempRow.Cells = append(tempRow.Cells, "N")
|
||||
}
|
||||
table.Rows[rowIdx].Cells = tempRow.Cells
|
||||
}
|
||||
allTableRows = append(allTableRows, table.Rows...)
|
||||
}
|
||||
return allTableRows, mapping, nil
|
||||
}
|
||||
|
||||
type trackingWriterWrapper struct {
|
||||
Delegate io.Writer
|
||||
Written int
|
||||
}
|
||||
|
||||
func (t *trackingWriterWrapper) Write(p []byte) (n int, err error) {
|
||||
t.Written += len(p)
|
||||
return t.Delegate.Write(p)
|
||||
}
|
||||
|
||||
type separatorWriterWrapper struct {
|
||||
Delegate io.Writer
|
||||
Ready bool
|
||||
}
|
||||
|
||||
func (s *separatorWriterWrapper) Write(p []byte) (n int, err error) {
|
||||
// If we're about to write non-empty bytes and `s` is ready,
|
||||
// we prepend an empty line to `p` and reset `s.Read`.
|
||||
if len(p) != 0 && s.Ready {
|
||||
fmt.Fprintln(s.Delegate)
|
||||
s.Ready = false
|
||||
}
|
||||
return s.Delegate.Write(p)
|
||||
}
|
||||
|
||||
func (s *separatorWriterWrapper) SetReady(state bool) {
|
||||
s.Ready = state
|
||||
}
|
||||
|
||||
func shouldGetNewPrinterForMapping(printer printers.ResourcePrinter, lastMapping, mapping *meta.RESTMapping) bool {
|
||||
return printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource
|
||||
}
|
||||
|
||||
// ClusterInfo Information about the member in the karmada cluster.
|
||||
type ClusterInfo struct {
|
||||
APIEndpoint string
|
||||
BearerToken string
|
||||
CAData string
|
||||
ClusterSyncMode clusterv1alpha1.ClusterSyncMode
|
||||
}
|
||||
|
||||
func clusterInfoInit(g *CommandGetOptions, karmadaConfig KarmadaConfig, clusterInfos map[string]*ClusterInfo) (*rest.Config, error) {
|
||||
karmadaclient, err := karmadaConfig.GetRestConfig(g.KarmadaContext, g.KubeConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Func GetRestConfig get karmada client failed, err is")
|
||||
}
|
||||
|
||||
if err := getClusterInKarmada(karmadaclient, clusterInfos); err != nil {
|
||||
return nil, errors.Wrap(err, "Method getClusterInKarmada get cluster info in karmada failed, err is")
|
||||
}
|
||||
|
||||
if err := getRBInKarmada(g.Namespace, karmadaclient); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(g.Clusters) <= 0 {
|
||||
for c := range clusterInfos {
|
||||
g.Clusters = append(g.Clusters, c)
|
||||
}
|
||||
}
|
||||
return karmadaclient, nil
|
||||
}
|
||||
|
||||
func getFactory(clusterName string, clusterInfos map[string]*ClusterInfo) cmdutil.Factory {
|
||||
kubeConfigFlags := NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
// Build member cluster kubeConfigFlags
|
||||
kubeConfigFlags.BearerToken = stringptr(clusterInfos[clusterName].BearerToken)
|
||||
kubeConfigFlags.APIServer = stringptr(clusterInfos[clusterName].APIEndpoint)
|
||||
kubeConfigFlags.CaBundle = stringptr(clusterInfos[clusterName].CAData)
|
||||
matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags)
|
||||
return cmdutil.NewFactory(matchVersionKubeConfigFlags)
|
||||
}
|
||||
|
||||
func (g *CommandGetOptions) transformRequests(req *rest.Request) {
|
||||
// We need full objects if printing with openapi columns
|
||||
if g.PrintWithOpenAPICols {
|
||||
return
|
||||
}
|
||||
if !g.ServerPrint || !g.IsHumanReadablePrinter {
|
||||
return
|
||||
}
|
||||
|
||||
req.SetHeader("Accept", strings.Join([]string{
|
||||
fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName),
|
||||
fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName),
|
||||
"application/json",
|
||||
}, ","))
|
||||
}
|
||||
|
||||
func getRBInKarmada(namespace string, config *rest.Config) error {
|
||||
resourceList := &workv1alpha1.ResourceBindingList{}
|
||||
gClient, err := gclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = gClient.List(context.TODO(), resourceList, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
policyv1alpha1.PropagationPolicyNamespaceLabel: namespace,
|
||||
})}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx := range resourceList.Items {
|
||||
rbKey := resourceList.Items[idx].GetName()
|
||||
val := resourceList.Items[idx].Status.AggregatedStatus
|
||||
for i := range val {
|
||||
if val[i].Applied && val[i].ClusterName != "" {
|
||||
newRBKey := fmt.Sprintf("%s-%s", val[i].ClusterName, rbKey)
|
||||
RBInfo[newRBKey] = &OtherPrint{
|
||||
Applied: val[i].Applied,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSecretTokenInKarmada get token ca in karmada cluster
|
||||
func (g *CommandGetOptions) getSecretTokenInKarmada(client *rest.Config, name string, clusterInfos map[string]*ClusterInfo) error {
|
||||
clusterClient, err := kubernetes.NewForConfig(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secret, err := clusterClient.CoreV1().Secrets(g.ClusterNamespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterInfos[name].BearerToken = string(secret.Data[clusterv1alpha1.SecretTokenKey])
|
||||
clusterInfos[name].CAData = string(secret.Data[clusterv1alpha1.SecretCADataKey])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getClusterInKarmada get cluster info in karmada cluster
|
||||
func getClusterInKarmada(client *rest.Config, clusterInfos map[string]*ClusterInfo) error {
|
||||
clusterList := &clusterv1alpha1.ClusterList{}
|
||||
gClient, err := gclient.NewForConfig(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = gClient.List(context.TODO(), clusterList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range clusterList.Items {
|
||||
cluster := &ClusterInfo{
|
||||
APIEndpoint: clusterList.Items[i].Spec.APIEndpoint,
|
||||
ClusterSyncMode: clusterList.Items[i].Spec.SyncMode,
|
||||
}
|
||||
clusterInfos[clusterList.Items[i].GetName()] = cluster
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRBKey(groupResource schema.GroupVersionResource, row metav1.TableRow, cluster string) string {
|
||||
rbKey, _ := row.Cells[0].(string)
|
||||
var suffix string
|
||||
switch groupResource.Resource {
|
||||
case "deployments":
|
||||
suffix = "deployment"
|
||||
case "services":
|
||||
suffix = "service"
|
||||
case "daemonsets":
|
||||
suffix = "daemonset"
|
||||
default:
|
||||
suffix = groupResource.Resource
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s", cluster, rbKey, suffix)
|
||||
}
|
||||
|
||||
// setNoAdoption set pod no print adoption
|
||||
func setNoAdoption(mapping *meta.RESTMapping) {
|
||||
if mapping != nil && mapping.Resource.Resource == "pods" {
|
||||
podColumns[printColumnClusterNum].Priority = 1
|
||||
}
|
||||
}
|
||||
|
||||
// setColumnDefinition set print ColumnDefinition
|
||||
func setColumnDefinition(table *metav1.Table) {
|
||||
var tempColumnDefinition []metav1.TableColumnDefinition
|
||||
if len(table.ColumnDefinitions) > 0 {
|
||||
tempColumnDefinition = append(append(append(tempColumnDefinition, table.ColumnDefinitions[0], podColumns[0]), table.ColumnDefinitions[1:]...), podColumns[1:]...)
|
||||
table.ColumnDefinitions = tempColumnDefinition
|
||||
}
|
||||
}
|
||||
|
||||
// Exists determine if path exists
|
||||
func Exists(path string) bool {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return os.IsExist(err)
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,399 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copy From: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubectl/pkg/cmd/get/get_flags.go
|
||||
Change: ConfigFlags struct add CaBundle fields, toRawKubeConfigLoader method modify new loadRules and overrides.
|
||||
*/
|
||||
|
||||
package karmadactl
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
diskcached "k8s.io/client-go/discovery/cached/disk"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
const (
|
||||
flagClusterName = "cluster"
|
||||
flagAuthInfoName = "user"
|
||||
flagContext = "context"
|
||||
flagNamespace = "namespace"
|
||||
flagAPIServer = "server"
|
||||
flagTLSServerName = "tls-server-name"
|
||||
flagInsecure = "insecure-skip-tls-verify"
|
||||
flagCertFile = "client-certificate"
|
||||
flagKeyFile = "client-key"
|
||||
flagCAFile = "certificate-authority"
|
||||
flagBearerToken = "token"
|
||||
flagTimeout = "request-timeout"
|
||||
flagCacheDir = "cache-dir"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultCacheDir = filepath.Join(homedir.HomeDir(), ".kube", "cache")
|
||||
// ErrEmptyConfig is the error message to be displayed if the configuration info is missing or incomplete
|
||||
ErrEmptyConfig = clientcmd.NewEmptyConfigError(
|
||||
`Missing or incomplete configuration info. Please point to an existing, complete config file:
|
||||
1. Via the command-line flag --kubeconfig
|
||||
2. Via the KUBECONFIG environment variable
|
||||
3. In your home directory as ~/.kube/config
|
||||
|
||||
To view or setup config directly use the 'config' command.`)
|
||||
)
|
||||
|
||||
// RESTClientGetter is an interface that the ConfigFlags describe to provide an easier way to mock for commands
|
||||
// and eliminate the direct coupling to a struct type. Users may wish to duplicate this type in their own packages
|
||||
// as per the golang type overlapping.
|
||||
type RESTClientGetter interface {
|
||||
// ToRESTConfig returns restconfig
|
||||
ToRESTConfig() (*rest.Config, error)
|
||||
// ToDiscoveryClient returns discovery client
|
||||
ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
|
||||
// ToRESTMapper returns a restmapper
|
||||
ToRESTMapper() (meta.RESTMapper, error)
|
||||
// ToRawKubeConfigLoader return kubeconfig loader as-is
|
||||
ToRawKubeConfigLoader() clientcmd.ClientConfig
|
||||
}
|
||||
|
||||
type clientConfig struct {
|
||||
defaultClientConfig clientcmd.ClientConfig
|
||||
}
|
||||
|
||||
func (c *clientConfig) RawConfig() (clientcmdapi.Config, error) {
|
||||
config, err := c.defaultClientConfig.RawConfig()
|
||||
// replace client-go's ErrEmptyConfig error with our custom, more verbose version
|
||||
if clientcmd.IsEmptyConfig(err) {
|
||||
return config, ErrEmptyConfig
|
||||
}
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (c *clientConfig) ClientConfig() (*rest.Config, error) {
|
||||
config, err := c.defaultClientConfig.ClientConfig()
|
||||
// replace client-go's ErrEmptyConfig error with our custom, more verbose version
|
||||
if clientcmd.IsEmptyConfig(err) {
|
||||
return config, ErrEmptyConfig
|
||||
}
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (c *clientConfig) Namespace() (string, bool, error) {
|
||||
namespace, ok, err := c.defaultClientConfig.Namespace()
|
||||
// replace client-go's ErrEmptyConfig error with our custom, more verbose version
|
||||
if clientcmd.IsEmptyConfig(err) {
|
||||
return namespace, ok, ErrEmptyConfig
|
||||
}
|
||||
return namespace, ok, err
|
||||
}
|
||||
|
||||
func (c *clientConfig) ConfigAccess() clientcmd.ConfigAccess {
|
||||
return c.defaultClientConfig.ConfigAccess()
|
||||
}
|
||||
|
||||
var _ RESTClientGetter = &ConfigFlags{}
|
||||
|
||||
// ConfigFlags composes the set of values necessary
|
||||
// for obtaining a REST client config
|
||||
type ConfigFlags struct {
|
||||
CaBundle *string
|
||||
|
||||
CacheDir *string
|
||||
KubeConfig *string
|
||||
|
||||
// config flags
|
||||
ClusterName *string
|
||||
AuthInfoName *string
|
||||
Context *string
|
||||
Namespace *string
|
||||
APIServer *string
|
||||
TLSServerName *string
|
||||
Insecure *bool
|
||||
CertFile *string
|
||||
KeyFile *string
|
||||
CAFile *string
|
||||
BearerToken *string
|
||||
Impersonate *string
|
||||
ImpersonateGroup *[]string
|
||||
Username *string
|
||||
Password *string
|
||||
Timeout *string
|
||||
// If non-nil, wrap config function can transform the Config
|
||||
// before it is returned in ToRESTConfig function.
|
||||
WrapConfigFn func(*rest.Config) *rest.Config
|
||||
|
||||
clientConfig clientcmd.ClientConfig
|
||||
lock sync.Mutex
|
||||
// If set to true, will use persistent client config and
|
||||
// propagate the config to the places that need it, rather than
|
||||
// loading the config multiple times
|
||||
usePersistentConfig bool
|
||||
// Allows increasing burst used for discovery, this is useful
|
||||
// in clusters with many registered resources
|
||||
discoveryBurst int
|
||||
}
|
||||
|
||||
// ToRESTConfig implements RESTClientGetter.
|
||||
// Returns a REST client configuration based on a provided path
|
||||
// to a .kubeconfig file, loading rules, and config flag overrides.
|
||||
// Expects the AddFlags method to have been called. If WrapConfigFn
|
||||
// is non-nil this function can transform config before return.
|
||||
func (f *ConfigFlags) ToRESTConfig() (*rest.Config, error) {
|
||||
c, err := f.ToRawKubeConfigLoader().ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.WrapConfigFn != nil {
|
||||
return f.WrapConfigFn(c), nil
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ToRawKubeConfigLoader binds config flag values to config overrides
|
||||
// Returns an interactive clientConfig if the password flag is enabled,
|
||||
// or a non-interactive clientConfig otherwise.
|
||||
func (f *ConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
if f.usePersistentConfig {
|
||||
return f.toRawKubePersistentConfigLoader()
|
||||
}
|
||||
return f.toRawKubeConfigLoader()
|
||||
}
|
||||
|
||||
func (f *ConfigFlags) toRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
//loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules := &clientcmd.ClientConfigLoadingRules{}
|
||||
// use the standard defaults for this client command
|
||||
// DEPRECATED: remove and replace with something more accurate
|
||||
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
|
||||
|
||||
if f.KubeConfig != nil {
|
||||
loadingRules.ExplicitPath = *f.KubeConfig
|
||||
}
|
||||
|
||||
clusterOverrides := clientcmd.ClusterDefaults
|
||||
clusterOverrides.CertificateAuthorityData = []byte(*f.CaBundle)
|
||||
overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clusterOverrides}
|
||||
|
||||
// bind auth info flag values to overrides
|
||||
if f.CertFile != nil {
|
||||
overrides.AuthInfo.ClientCertificate = *f.CertFile
|
||||
}
|
||||
if f.KeyFile != nil {
|
||||
overrides.AuthInfo.ClientKey = *f.KeyFile
|
||||
}
|
||||
if f.BearerToken != nil {
|
||||
overrides.AuthInfo.Token = *f.BearerToken
|
||||
}
|
||||
|
||||
// bind cluster flags
|
||||
if f.APIServer != nil {
|
||||
overrides.ClusterInfo.Server = *f.APIServer
|
||||
}
|
||||
if f.TLSServerName != nil {
|
||||
overrides.ClusterInfo.TLSServerName = *f.TLSServerName
|
||||
}
|
||||
if f.CAFile != nil {
|
||||
overrides.ClusterInfo.CertificateAuthority = *f.CAFile
|
||||
}
|
||||
if f.Insecure != nil {
|
||||
overrides.ClusterInfo.InsecureSkipTLSVerify = *f.Insecure
|
||||
}
|
||||
|
||||
// bind context flags
|
||||
if f.Context != nil {
|
||||
overrides.CurrentContext = *f.Context
|
||||
}
|
||||
if f.ClusterName != nil {
|
||||
overrides.Context.Cluster = *f.ClusterName
|
||||
}
|
||||
if f.AuthInfoName != nil {
|
||||
overrides.Context.AuthInfo = *f.AuthInfoName
|
||||
}
|
||||
if f.Namespace != nil {
|
||||
overrides.Context.Namespace = *f.Namespace
|
||||
}
|
||||
|
||||
if f.Timeout != nil {
|
||||
overrides.Timeout = *f.Timeout
|
||||
}
|
||||
|
||||
// we only have an interactive prompt when a password is allowed
|
||||
if f.Password == nil {
|
||||
return &clientConfig{clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)}
|
||||
}
|
||||
return &clientConfig{clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)}
|
||||
}
|
||||
|
||||
// toRawKubePersistentConfigLoader binds config flag values to config overrides
|
||||
// Returns a persistent clientConfig for propagation.
|
||||
func (f *ConfigFlags) toRawKubePersistentConfigLoader() clientcmd.ClientConfig {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.clientConfig == nil {
|
||||
f.clientConfig = f.toRawKubeConfigLoader()
|
||||
}
|
||||
|
||||
return f.clientConfig
|
||||
}
|
||||
|
||||
// ToDiscoveryClient implements RESTClientGetter.
|
||||
// Expects the AddFlags method to have been called.
|
||||
// Returns a CachedDiscoveryInterface using a computed RESTConfig.
|
||||
func (f *ConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
|
||||
config, err := f.ToRESTConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The more groups you have, the more discovery requests you need to make.
|
||||
// given 25 groups (our groups + a few custom resources) with one-ish version each, discovery needs to make 50 requests
|
||||
// double it just so we don't end up here again for a while. This config is only used for discovery.
|
||||
config.Burst = f.discoveryBurst
|
||||
|
||||
cacheDir := defaultCacheDir
|
||||
|
||||
// retrieve a user-provided value for the "cache-dir"
|
||||
// override httpCacheDir and discoveryCacheDir if user-value is given.
|
||||
if f.CacheDir != nil {
|
||||
cacheDir = *f.CacheDir
|
||||
}
|
||||
httpCacheDir := filepath.Join(cacheDir, "http")
|
||||
discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(cacheDir, "discovery"), config.Host)
|
||||
|
||||
return diskcached.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, time.Duration(10*time.Minute))
|
||||
}
|
||||
|
||||
// ToRESTMapper returns a mapper.
|
||||
func (f *ConfigFlags) ToRESTMapper() (meta.RESTMapper, error) {
|
||||
discoveryClient, err := f.ToDiscoveryClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
expander := restmapper.NewShortcutExpander(mapper, discoveryClient)
|
||||
return expander, nil
|
||||
}
|
||||
|
||||
// AddFlags binds client configuration flags to a given flagset
|
||||
func (f *ConfigFlags) AddFlags(flags *pflag.FlagSet) {
|
||||
if f.KubeConfig != nil {
|
||||
flags.StringVar(f.KubeConfig, "kubeconfig", *f.KubeConfig, "Path to the kubeconfig file to use for CLI requests.")
|
||||
}
|
||||
if f.CacheDir != nil {
|
||||
flags.StringVar(f.CacheDir, flagCacheDir, *f.CacheDir, "Default cache directory")
|
||||
}
|
||||
|
||||
// add config options
|
||||
if f.CertFile != nil {
|
||||
flags.StringVar(f.CertFile, flagCertFile, *f.CertFile, "Path to a client certificate file for TLS")
|
||||
}
|
||||
if f.KeyFile != nil {
|
||||
flags.StringVar(f.KeyFile, flagKeyFile, *f.KeyFile, "Path to a client key file for TLS")
|
||||
}
|
||||
if f.BearerToken != nil {
|
||||
flags.StringVar(f.BearerToken, flagBearerToken, *f.BearerToken, "Bearer token for authentication to the API server")
|
||||
}
|
||||
if f.ClusterName != nil {
|
||||
flags.StringVar(f.ClusterName, flagClusterName, *f.ClusterName, "The name of the kubeconfig cluster to use")
|
||||
}
|
||||
if f.AuthInfoName != nil {
|
||||
flags.StringVar(f.AuthInfoName, flagAuthInfoName, *f.AuthInfoName, "The name of the kubeconfig user to use")
|
||||
}
|
||||
if f.Namespace != nil {
|
||||
flags.StringVarP(f.Namespace, flagNamespace, "n", *f.Namespace, "If present, the namespace scope for this CLI request")
|
||||
}
|
||||
if f.Context != nil {
|
||||
flags.StringVar(f.Context, flagContext, *f.Context, "The name of the kubeconfig context to use")
|
||||
}
|
||||
|
||||
if f.APIServer != nil {
|
||||
flags.StringVarP(f.APIServer, flagAPIServer, "s", *f.APIServer, "The address and port of the Kubernetes API server")
|
||||
}
|
||||
if f.TLSServerName != nil {
|
||||
flags.StringVar(f.TLSServerName, flagTLSServerName, *f.TLSServerName, "Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used")
|
||||
}
|
||||
if f.Insecure != nil {
|
||||
flags.BoolVar(f.Insecure, flagInsecure, *f.Insecure, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure")
|
||||
}
|
||||
if f.CAFile != nil {
|
||||
flags.StringVar(f.CAFile, flagCAFile, *f.CAFile, "Path to a cert file for the certificate authority")
|
||||
}
|
||||
if f.Timeout != nil {
|
||||
flags.StringVar(f.Timeout, flagTimeout, *f.Timeout, "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.")
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeprecatedPasswordFlag enables the username and password config flags
|
||||
func (f *ConfigFlags) WithDeprecatedPasswordFlag() *ConfigFlags {
|
||||
f.Username = stringptr("")
|
||||
f.Password = stringptr("")
|
||||
return f
|
||||
}
|
||||
|
||||
// WithDiscoveryBurst sets the RESTClient burst for discovery.
|
||||
func (f *ConfigFlags) WithDiscoveryBurst(discoveryBurst int) *ConfigFlags {
|
||||
f.discoveryBurst = discoveryBurst
|
||||
return f
|
||||
}
|
||||
|
||||
// NewConfigFlags returns ConfigFlags with default values set
|
||||
func NewConfigFlags(usePersistentConfig bool) *ConfigFlags {
|
||||
impersonateGroup := []string{}
|
||||
insecure := false
|
||||
|
||||
return &ConfigFlags{
|
||||
Insecure: &insecure,
|
||||
Timeout: stringptr("0"),
|
||||
KubeConfig: stringptr(""),
|
||||
|
||||
CacheDir: stringptr(defaultCacheDir),
|
||||
ClusterName: stringptr(""),
|
||||
AuthInfoName: stringptr(""),
|
||||
Context: stringptr(""),
|
||||
Namespace: stringptr(""),
|
||||
APIServer: stringptr(""),
|
||||
TLSServerName: stringptr(""),
|
||||
CertFile: stringptr(""),
|
||||
KeyFile: stringptr(""),
|
||||
CAFile: stringptr(""),
|
||||
BearerToken: stringptr(""),
|
||||
Impersonate: stringptr(""),
|
||||
ImpersonateGroup: &impersonateGroup,
|
||||
|
||||
usePersistentConfig: usePersistentConfig,
|
||||
// The more groups you have, the more discovery requests you need to make.
|
||||
// given 25 groups (our groups + a few custom resources) with one-ish version each, discovery needs to make 50 requests
|
||||
// double it just so we don't end up here again for a while. This config is only used for discovery.
|
||||
discoveryBurst: 100,
|
||||
}
|
||||
}
|
||||
|
||||
func stringptr(val string) *string {
|
||||
return &val
|
||||
}
|
||||
|
||||
// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive
|
||||
var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/\.)]`)
|
||||
|
||||
// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name.
|
||||
func computeDiscoverCacheDir(parentDir, host string) string {
|
||||
// strip the optional scheme from host if its there:
|
||||
schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1)
|
||||
// now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived
|
||||
safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_")
|
||||
return filepath.Join(parentDir, safeHost)
|
||||
}
|
|
@ -47,6 +47,7 @@ func NewKarmadaCtlCommand(out io.Writer, cmdUse, cmdStr string) *cobra.Command {
|
|||
rootCmd.AddCommand(sharedcommand.NewCmdVersion(out, cmdStr))
|
||||
rootCmd.AddCommand(NewCmdCordon(out, karmadaConfig, cmdStr))
|
||||
rootCmd.AddCommand(NewCmdUncordon(out, karmadaConfig, cmdStr))
|
||||
rootCmd.AddCommand(NewCmdGet(out, karmadaConfig))
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,12 @@
|
|||
# go-ansiterm
|
||||
|
||||
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
|
||||
|
||||
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
|
||||
|
||||
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
|
||||
|
||||
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
|
||||
|
||||
-----
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
@ -0,0 +1,188 @@
|
|||
package ansiterm
|
||||
|
||||
const LogEnv = "DEBUG_TERMINAL"
|
||||
|
||||
// ANSI constants
|
||||
// References:
|
||||
// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
|
||||
// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||
// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
|
||||
// -- http://en.wikipedia.org/wiki/ANSI_escape_code
|
||||
// -- http://vt100.net/emu/dec_ansi_parser
|
||||
// -- http://vt100.net/emu/vt500_parser.svg
|
||||
// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
|
||||
// -- http://www.inwap.com/pdp10/ansicode.txt
|
||||
const (
|
||||
// ECMA-48 Set Graphics Rendition
|
||||
// Note:
|
||||
// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
|
||||
// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
|
||||
// -- Windows does not expose the per-window cursor (i.e., caret) blink times
|
||||
ANSI_SGR_RESET = 0
|
||||
ANSI_SGR_BOLD = 1
|
||||
ANSI_SGR_DIM = 2
|
||||
_ANSI_SGR_ITALIC = 3
|
||||
ANSI_SGR_UNDERLINE = 4
|
||||
_ANSI_SGR_BLINKSLOW = 5
|
||||
_ANSI_SGR_BLINKFAST = 6
|
||||
ANSI_SGR_REVERSE = 7
|
||||
_ANSI_SGR_INVISIBLE = 8
|
||||
_ANSI_SGR_LINETHROUGH = 9
|
||||
_ANSI_SGR_FONT_00 = 10
|
||||
_ANSI_SGR_FONT_01 = 11
|
||||
_ANSI_SGR_FONT_02 = 12
|
||||
_ANSI_SGR_FONT_03 = 13
|
||||
_ANSI_SGR_FONT_04 = 14
|
||||
_ANSI_SGR_FONT_05 = 15
|
||||
_ANSI_SGR_FONT_06 = 16
|
||||
_ANSI_SGR_FONT_07 = 17
|
||||
_ANSI_SGR_FONT_08 = 18
|
||||
_ANSI_SGR_FONT_09 = 19
|
||||
_ANSI_SGR_FONT_10 = 20
|
||||
_ANSI_SGR_DOUBLEUNDERLINE = 21
|
||||
ANSI_SGR_BOLD_DIM_OFF = 22
|
||||
_ANSI_SGR_ITALIC_OFF = 23
|
||||
ANSI_SGR_UNDERLINE_OFF = 24
|
||||
_ANSI_SGR_BLINK_OFF = 25
|
||||
_ANSI_SGR_RESERVED_00 = 26
|
||||
ANSI_SGR_REVERSE_OFF = 27
|
||||
_ANSI_SGR_INVISIBLE_OFF = 28
|
||||
_ANSI_SGR_LINETHROUGH_OFF = 29
|
||||
ANSI_SGR_FOREGROUND_BLACK = 30
|
||||
ANSI_SGR_FOREGROUND_RED = 31
|
||||
ANSI_SGR_FOREGROUND_GREEN = 32
|
||||
ANSI_SGR_FOREGROUND_YELLOW = 33
|
||||
ANSI_SGR_FOREGROUND_BLUE = 34
|
||||
ANSI_SGR_FOREGROUND_MAGENTA = 35
|
||||
ANSI_SGR_FOREGROUND_CYAN = 36
|
||||
ANSI_SGR_FOREGROUND_WHITE = 37
|
||||
_ANSI_SGR_RESERVED_01 = 38
|
||||
ANSI_SGR_FOREGROUND_DEFAULT = 39
|
||||
ANSI_SGR_BACKGROUND_BLACK = 40
|
||||
ANSI_SGR_BACKGROUND_RED = 41
|
||||
ANSI_SGR_BACKGROUND_GREEN = 42
|
||||
ANSI_SGR_BACKGROUND_YELLOW = 43
|
||||
ANSI_SGR_BACKGROUND_BLUE = 44
|
||||
ANSI_SGR_BACKGROUND_MAGENTA = 45
|
||||
ANSI_SGR_BACKGROUND_CYAN = 46
|
||||
ANSI_SGR_BACKGROUND_WHITE = 47
|
||||
_ANSI_SGR_RESERVED_02 = 48
|
||||
ANSI_SGR_BACKGROUND_DEFAULT = 49
|
||||
// 50 - 65: Unsupported
|
||||
|
||||
ANSI_MAX_CMD_LENGTH = 4096
|
||||
|
||||
MAX_INPUT_EVENTS = 128
|
||||
DEFAULT_WIDTH = 80
|
||||
DEFAULT_HEIGHT = 24
|
||||
|
||||
ANSI_BEL = 0x07
|
||||
ANSI_BACKSPACE = 0x08
|
||||
ANSI_TAB = 0x09
|
||||
ANSI_LINE_FEED = 0x0A
|
||||
ANSI_VERTICAL_TAB = 0x0B
|
||||
ANSI_FORM_FEED = 0x0C
|
||||
ANSI_CARRIAGE_RETURN = 0x0D
|
||||
ANSI_ESCAPE_PRIMARY = 0x1B
|
||||
ANSI_ESCAPE_SECONDARY = 0x5B
|
||||
ANSI_OSC_STRING_ENTRY = 0x5D
|
||||
ANSI_COMMAND_FIRST = 0x40
|
||||
ANSI_COMMAND_LAST = 0x7E
|
||||
DCS_ENTRY = 0x90
|
||||
CSI_ENTRY = 0x9B
|
||||
OSC_STRING = 0x9D
|
||||
ANSI_PARAMETER_SEP = ";"
|
||||
ANSI_CMD_G0 = '('
|
||||
ANSI_CMD_G1 = ')'
|
||||
ANSI_CMD_G2 = '*'
|
||||
ANSI_CMD_G3 = '+'
|
||||
ANSI_CMD_DECPNM = '>'
|
||||
ANSI_CMD_DECPAM = '='
|
||||
ANSI_CMD_OSC = ']'
|
||||
ANSI_CMD_STR_TERM = '\\'
|
||||
|
||||
KEY_CONTROL_PARAM_2 = ";2"
|
||||
KEY_CONTROL_PARAM_3 = ";3"
|
||||
KEY_CONTROL_PARAM_4 = ";4"
|
||||
KEY_CONTROL_PARAM_5 = ";5"
|
||||
KEY_CONTROL_PARAM_6 = ";6"
|
||||
KEY_CONTROL_PARAM_7 = ";7"
|
||||
KEY_CONTROL_PARAM_8 = ";8"
|
||||
KEY_ESC_CSI = "\x1B["
|
||||
KEY_ESC_N = "\x1BN"
|
||||
KEY_ESC_O = "\x1BO"
|
||||
|
||||
FILL_CHARACTER = ' '
|
||||
)
|
||||
|
||||
func getByteRange(start byte, end byte) []byte {
|
||||
bytes := make([]byte, 0, 32)
|
||||
for i := start; i <= end; i++ {
|
||||
bytes = append(bytes, byte(i))
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
var toGroundBytes = getToGroundBytes()
|
||||
var executors = getExecuteBytes()
|
||||
|
||||
// SPACE 20+A0 hex Always and everywhere a blank space
|
||||
// Intermediate 20-2F hex !"#$%&'()*+,-./
|
||||
var intermeds = getByteRange(0x20, 0x2F)
|
||||
|
||||
// Parameters 30-3F hex 0123456789:;<=>?
|
||||
// CSI Parameters 30-39, 3B hex 0123456789;
|
||||
var csiParams = getByteRange(0x30, 0x3F)
|
||||
|
||||
var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
|
||||
|
||||
// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
|
||||
var upperCase = getByteRange(0x40, 0x5F)
|
||||
|
||||
// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
|
||||
var lowerCase = getByteRange(0x60, 0x7E)
|
||||
|
||||
// Alphabetics 40-7E hex (all of upper and lower case)
|
||||
var alphabetics = append(upperCase, lowerCase...)
|
||||
|
||||
var printables = getByteRange(0x20, 0x7F)
|
||||
|
||||
var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
|
||||
var escapeToGroundBytes = getEscapeToGroundBytes()
|
||||
|
||||
// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
|
||||
// byte ranges below
|
||||
|
||||
func getEscapeToGroundBytes() []byte {
|
||||
escapeToGroundBytes := getByteRange(0x30, 0x4F)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
|
||||
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
|
||||
return escapeToGroundBytes
|
||||
}
|
||||
|
||||
func getExecuteBytes() []byte {
|
||||
executeBytes := getByteRange(0x00, 0x17)
|
||||
executeBytes = append(executeBytes, 0x19)
|
||||
executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
|
||||
return executeBytes
|
||||
}
|
||||
|
||||
func getToGroundBytes() []byte {
|
||||
groundBytes := []byte{0x18}
|
||||
groundBytes = append(groundBytes, 0x1A)
|
||||
groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
|
||||
groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
|
||||
groundBytes = append(groundBytes, 0x99)
|
||||
groundBytes = append(groundBytes, 0x9A)
|
||||
groundBytes = append(groundBytes, 0x9C)
|
||||
return groundBytes
|
||||
}
|
||||
|
||||
// Delete 7F hex Always and everywhere ignored
|
||||
// C1 Control 80-9F hex 32 additional control characters
|
||||
// G1 Displayable A1-FE hex 94 additional displayable characters
|
||||
// Special A0+FF hex Same as SPACE and DELETE
|
|
@ -0,0 +1,7 @@
|
|||
package ansiterm
|
||||
|
||||
type ansiContext struct {
|
||||
currentChar byte
|
||||
paramBuffer []byte
|
||||
interBuffer []byte
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
package ansiterm
|
||||
|
||||
type csiEntryState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
|
||||
csiState.parser.logf("CsiEntry::Handle %#x", b)
|
||||
|
||||
nextState, err := csiState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(alphabetics, b):
|
||||
return csiState.parser.ground, nil
|
||||
case sliceContains(csiCollectables, b):
|
||||
return csiState.parser.csiParam, nil
|
||||
case sliceContains(executors, b):
|
||||
return csiState, csiState.parser.execute()
|
||||
}
|
||||
|
||||
return csiState, nil
|
||||
}
|
||||
|
||||
func (csiState csiEntryState) Transition(s state) error {
|
||||
csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
|
||||
csiState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case csiState.parser.ground:
|
||||
return csiState.parser.csiDispatch()
|
||||
case csiState.parser.csiParam:
|
||||
switch {
|
||||
case sliceContains(csiParams, csiState.parser.context.currentChar):
|
||||
csiState.parser.collectParam()
|
||||
case sliceContains(intermeds, csiState.parser.context.currentChar):
|
||||
csiState.parser.collectInter()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (csiState csiEntryState) Enter() error {
|
||||
csiState.parser.clear()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package ansiterm
|
||||
|
||||
type csiParamState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (csiState csiParamState) Handle(b byte) (s state, e error) {
|
||||
csiState.parser.logf("CsiParam::Handle %#x", b)
|
||||
|
||||
nextState, err := csiState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(alphabetics, b):
|
||||
return csiState.parser.ground, nil
|
||||
case sliceContains(csiCollectables, b):
|
||||
csiState.parser.collectParam()
|
||||
return csiState, nil
|
||||
case sliceContains(executors, b):
|
||||
return csiState, csiState.parser.execute()
|
||||
}
|
||||
|
||||
return csiState, nil
|
||||
}
|
||||
|
||||
func (csiState csiParamState) Transition(s state) error {
|
||||
csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
|
||||
csiState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case csiState.parser.ground:
|
||||
return csiState.parser.csiDispatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package ansiterm
|
||||
|
||||
type escapeIntermediateState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
|
||||
escState.parser.logf("escapeIntermediateState::Handle %#x", b)
|
||||
nextState, err := escState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(intermeds, b):
|
||||
return escState, escState.parser.collectInter()
|
||||
case sliceContains(executors, b):
|
||||
return escState, escState.parser.execute()
|
||||
case sliceContains(escapeIntermediateToGroundBytes, b):
|
||||
return escState.parser.ground, nil
|
||||
}
|
||||
|
||||
return escState, nil
|
||||
}
|
||||
|
||||
func (escState escapeIntermediateState) Transition(s state) error {
|
||||
escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
|
||||
escState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case escState.parser.ground:
|
||||
return escState.parser.escDispatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package ansiterm
|
||||
|
||||
type escapeState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (escState escapeState) Handle(b byte) (s state, e error) {
|
||||
escState.parser.logf("escapeState::Handle %#x", b)
|
||||
nextState, err := escState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case b == ANSI_ESCAPE_SECONDARY:
|
||||
return escState.parser.csiEntry, nil
|
||||
case b == ANSI_OSC_STRING_ENTRY:
|
||||
return escState.parser.oscString, nil
|
||||
case sliceContains(executors, b):
|
||||
return escState, escState.parser.execute()
|
||||
case sliceContains(escapeToGroundBytes, b):
|
||||
return escState.parser.ground, nil
|
||||
case sliceContains(intermeds, b):
|
||||
return escState.parser.escapeIntermediate, nil
|
||||
}
|
||||
|
||||
return escState, nil
|
||||
}
|
||||
|
||||
func (escState escapeState) Transition(s state) error {
|
||||
escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
|
||||
escState.baseState.Transition(s)
|
||||
|
||||
switch s {
|
||||
case escState.parser.ground:
|
||||
return escState.parser.escDispatch()
|
||||
case escState.parser.escapeIntermediate:
|
||||
return escState.parser.collectInter()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (escState escapeState) Enter() error {
|
||||
escState.parser.clear()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package ansiterm
|
||||
|
||||
type AnsiEventHandler interface {
|
||||
// Print
|
||||
Print(b byte) error
|
||||
|
||||
// Execute C0 commands
|
||||
Execute(b byte) error
|
||||
|
||||
// CUrsor Up
|
||||
CUU(int) error
|
||||
|
||||
// CUrsor Down
|
||||
CUD(int) error
|
||||
|
||||
// CUrsor Forward
|
||||
CUF(int) error
|
||||
|
||||
// CUrsor Backward
|
||||
CUB(int) error
|
||||
|
||||
// Cursor to Next Line
|
||||
CNL(int) error
|
||||
|
||||
// Cursor to Previous Line
|
||||
CPL(int) error
|
||||
|
||||
// Cursor Horizontal position Absolute
|
||||
CHA(int) error
|
||||
|
||||
// Vertical line Position Absolute
|
||||
VPA(int) error
|
||||
|
||||
// CUrsor Position
|
||||
CUP(int, int) error
|
||||
|
||||
// Horizontal and Vertical Position (depends on PUM)
|
||||
HVP(int, int) error
|
||||
|
||||
// Text Cursor Enable Mode
|
||||
DECTCEM(bool) error
|
||||
|
||||
// Origin Mode
|
||||
DECOM(bool) error
|
||||
|
||||
// 132 Column Mode
|
||||
DECCOLM(bool) error
|
||||
|
||||
// Erase in Display
|
||||
ED(int) error
|
||||
|
||||
// Erase in Line
|
||||
EL(int) error
|
||||
|
||||
// Insert Line
|
||||
IL(int) error
|
||||
|
||||
// Delete Line
|
||||
DL(int) error
|
||||
|
||||
// Insert Character
|
||||
ICH(int) error
|
||||
|
||||
// Delete Character
|
||||
DCH(int) error
|
||||
|
||||
// Set Graphics Rendition
|
||||
SGR([]int) error
|
||||
|
||||
// Pan Down
|
||||
SU(int) error
|
||||
|
||||
// Pan Up
|
||||
SD(int) error
|
||||
|
||||
// Device Attributes
|
||||
DA([]string) error
|
||||
|
||||
// Set Top and Bottom Margins
|
||||
DECSTBM(int, int) error
|
||||
|
||||
// Index
|
||||
IND() error
|
||||
|
||||
// Reverse Index
|
||||
RI() error
|
||||
|
||||
// Flush updates from previous commands
|
||||
Flush() error
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package ansiterm
|
||||
|
||||
type groundState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (gs groundState) Handle(b byte) (s state, e error) {
|
||||
gs.parser.context.currentChar = b
|
||||
|
||||
nextState, err := gs.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sliceContains(printables, b):
|
||||
return gs, gs.parser.print()
|
||||
|
||||
case sliceContains(executors, b):
|
||||
return gs, gs.parser.execute()
|
||||
}
|
||||
|
||||
return gs, nil
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package ansiterm
|
||||
|
||||
type oscStringState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
func (oscState oscStringState) Handle(b byte) (s state, e error) {
|
||||
oscState.parser.logf("OscString::Handle %#x", b)
|
||||
nextState, err := oscState.baseState.Handle(b)
|
||||
if nextState != nil || err != nil {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case isOscStringTerminator(b):
|
||||
return oscState.parser.ground, nil
|
||||
}
|
||||
|
||||
return oscState, nil
|
||||
}
|
||||
|
||||
// See below for OSC string terminators for linux
|
||||
// http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||
func isOscStringTerminator(b byte) bool {
|
||||
|
||||
if b == ANSI_BEL || b == 0x5C {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
package ansiterm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type AnsiParser struct {
|
||||
currState state
|
||||
eventHandler AnsiEventHandler
|
||||
context *ansiContext
|
||||
csiEntry state
|
||||
csiParam state
|
||||
dcsEntry state
|
||||
escape state
|
||||
escapeIntermediate state
|
||||
error state
|
||||
ground state
|
||||
oscString state
|
||||
stateMap []state
|
||||
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
type Option func(*AnsiParser)
|
||||
|
||||
func WithLogf(f func(string, ...interface{})) Option {
|
||||
return func(ap *AnsiParser) {
|
||||
ap.logf = f
|
||||
}
|
||||
}
|
||||
|
||||
func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
|
||||
ap := &AnsiParser{
|
||||
eventHandler: evtHandler,
|
||||
context: &ansiContext{},
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(ap)
|
||||
}
|
||||
|
||||
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
|
||||
logFile, _ := os.Create("ansiParser.log")
|
||||
logger := log.New(logFile, "", log.LstdFlags)
|
||||
if ap.logf != nil {
|
||||
l := ap.logf
|
||||
ap.logf = func(s string, v ...interface{}) {
|
||||
l(s, v...)
|
||||
logger.Printf(s, v...)
|
||||
}
|
||||
} else {
|
||||
ap.logf = logger.Printf
|
||||
}
|
||||
}
|
||||
|
||||
if ap.logf == nil {
|
||||
ap.logf = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
|
||||
ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
|
||||
ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
|
||||
ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
|
||||
ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
|
||||
ap.error = errorState{baseState{name: "Error", parser: ap}}
|
||||
ap.ground = groundState{baseState{name: "Ground", parser: ap}}
|
||||
ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
|
||||
|
||||
ap.stateMap = []state{
|
||||
ap.csiEntry,
|
||||
ap.csiParam,
|
||||
ap.dcsEntry,
|
||||
ap.escape,
|
||||
ap.escapeIntermediate,
|
||||
ap.error,
|
||||
ap.ground,
|
||||
ap.oscString,
|
||||
}
|
||||
|
||||
ap.currState = getState(initialState, ap.stateMap)
|
||||
|
||||
ap.logf("CreateParser: parser %p", ap)
|
||||
return ap
|
||||
}
|
||||
|
||||
func getState(name string, states []state) state {
|
||||
for _, el := range states {
|
||||
if el.Name() == name {
|
||||
return el
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
|
||||
for i, b := range bytes {
|
||||
if err := ap.handle(b); err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
|
||||
return len(bytes), ap.eventHandler.Flush()
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) handle(b byte) error {
|
||||
ap.context.currentChar = b
|
||||
newState, err := ap.currState.Handle(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if newState == nil {
|
||||
ap.logf("WARNING: newState is nil")
|
||||
return errors.New("New state of 'nil' is invalid.")
|
||||
}
|
||||
|
||||
if newState != ap.currState {
|
||||
if err := ap.changeState(newState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) changeState(newState state) error {
|
||||
ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
|
||||
|
||||
// Exit old state
|
||||
if err := ap.currState.Exit(); err != nil {
|
||||
ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform transition action
|
||||
if err := ap.currState.Transition(newState); err != nil {
|
||||
ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Enter new state
|
||||
if err := newState.Enter(); err != nil {
|
||||
ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
ap.currState = newState
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
package ansiterm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func parseParams(bytes []byte) ([]string, error) {
|
||||
paramBuff := make([]byte, 0, 0)
|
||||
params := []string{}
|
||||
|
||||
for _, v := range bytes {
|
||||
if v == ';' {
|
||||
if len(paramBuff) > 0 {
|
||||
// Completed parameter, append it to the list
|
||||
s := string(paramBuff)
|
||||
params = append(params, s)
|
||||
paramBuff = make([]byte, 0, 0)
|
||||
}
|
||||
} else {
|
||||
paramBuff = append(paramBuff, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Last parameter may not be terminated with ';'
|
||||
if len(paramBuff) > 0 {
|
||||
s := string(paramBuff)
|
||||
params = append(params, s)
|
||||
}
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func parseCmd(context ansiContext) (string, error) {
|
||||
return string(context.currentChar), nil
|
||||
}
|
||||
|
||||
func getInt(params []string, dflt int) int {
|
||||
i := getInts(params, 1, dflt)[0]
|
||||
return i
|
||||
}
|
||||
|
||||
func getInts(params []string, minCount int, dflt int) []int {
|
||||
ints := []int{}
|
||||
|
||||
for _, v := range params {
|
||||
i, _ := strconv.Atoi(v)
|
||||
// Zero is mapped to the default value in VT100.
|
||||
if i == 0 {
|
||||
i = dflt
|
||||
}
|
||||
ints = append(ints, i)
|
||||
}
|
||||
|
||||
if len(ints) < minCount {
|
||||
remaining := minCount - len(ints)
|
||||
for i := 0; i < remaining; i++ {
|
||||
ints = append(ints, dflt)
|
||||
}
|
||||
}
|
||||
|
||||
return ints
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) modeDispatch(param string, set bool) error {
|
||||
switch param {
|
||||
case "?3":
|
||||
return ap.eventHandler.DECCOLM(set)
|
||||
case "?6":
|
||||
return ap.eventHandler.DECOM(set)
|
||||
case "?25":
|
||||
return ap.eventHandler.DECTCEM(set)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) hDispatch(params []string) error {
|
||||
if len(params) == 1 {
|
||||
return ap.modeDispatch(params[0], true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) lDispatch(params []string) error {
|
||||
if len(params) == 1 {
|
||||
return ap.modeDispatch(params[0], false)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEraseParam(params []string) int {
|
||||
param := getInt(params, 0)
|
||||
if param < 0 || 3 < param {
|
||||
param = 0
|
||||
}
|
||||
|
||||
return param
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
package ansiterm
|
||||
|
||||
func (ap *AnsiParser) collectParam() error {
|
||||
currChar := ap.context.currentChar
|
||||
ap.logf("collectParam %#x", currChar)
|
||||
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) collectInter() error {
|
||||
currChar := ap.context.currentChar
|
||||
ap.logf("collectInter %#x", currChar)
|
||||
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) escDispatch() error {
|
||||
cmd, _ := parseCmd(*ap.context)
|
||||
intermeds := ap.context.interBuffer
|
||||
ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
|
||||
ap.logf("escDispatch: %v(%v)", cmd, intermeds)
|
||||
|
||||
switch cmd {
|
||||
case "D": // IND
|
||||
return ap.eventHandler.IND()
|
||||
case "E": // NEL, equivalent to CRLF
|
||||
err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
|
||||
if err == nil {
|
||||
err = ap.eventHandler.Execute(ANSI_LINE_FEED)
|
||||
}
|
||||
return err
|
||||
case "M": // RI
|
||||
return ap.eventHandler.RI()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) csiDispatch() error {
|
||||
cmd, _ := parseCmd(*ap.context)
|
||||
params, _ := parseParams(ap.context.paramBuffer)
|
||||
ap.logf("Parsed params: %v with length: %d", params, len(params))
|
||||
|
||||
ap.logf("csiDispatch: %v(%v)", cmd, params)
|
||||
|
||||
switch cmd {
|
||||
case "@":
|
||||
return ap.eventHandler.ICH(getInt(params, 1))
|
||||
case "A":
|
||||
return ap.eventHandler.CUU(getInt(params, 1))
|
||||
case "B":
|
||||
return ap.eventHandler.CUD(getInt(params, 1))
|
||||
case "C":
|
||||
return ap.eventHandler.CUF(getInt(params, 1))
|
||||
case "D":
|
||||
return ap.eventHandler.CUB(getInt(params, 1))
|
||||
case "E":
|
||||
return ap.eventHandler.CNL(getInt(params, 1))
|
||||
case "F":
|
||||
return ap.eventHandler.CPL(getInt(params, 1))
|
||||
case "G":
|
||||
return ap.eventHandler.CHA(getInt(params, 1))
|
||||
case "H":
|
||||
ints := getInts(params, 2, 1)
|
||||
x, y := ints[0], ints[1]
|
||||
return ap.eventHandler.CUP(x, y)
|
||||
case "J":
|
||||
param := getEraseParam(params)
|
||||
return ap.eventHandler.ED(param)
|
||||
case "K":
|
||||
param := getEraseParam(params)
|
||||
return ap.eventHandler.EL(param)
|
||||
case "L":
|
||||
return ap.eventHandler.IL(getInt(params, 1))
|
||||
case "M":
|
||||
return ap.eventHandler.DL(getInt(params, 1))
|
||||
case "P":
|
||||
return ap.eventHandler.DCH(getInt(params, 1))
|
||||
case "S":
|
||||
return ap.eventHandler.SU(getInt(params, 1))
|
||||
case "T":
|
||||
return ap.eventHandler.SD(getInt(params, 1))
|
||||
case "c":
|
||||
return ap.eventHandler.DA(params)
|
||||
case "d":
|
||||
return ap.eventHandler.VPA(getInt(params, 1))
|
||||
case "f":
|
||||
ints := getInts(params, 2, 1)
|
||||
x, y := ints[0], ints[1]
|
||||
return ap.eventHandler.HVP(x, y)
|
||||
case "h":
|
||||
return ap.hDispatch(params)
|
||||
case "l":
|
||||
return ap.lDispatch(params)
|
||||
case "m":
|
||||
return ap.eventHandler.SGR(getInts(params, 1, 0))
|
||||
case "r":
|
||||
ints := getInts(params, 2, 1)
|
||||
top, bottom := ints[0], ints[1]
|
||||
return ap.eventHandler.DECSTBM(top, bottom)
|
||||
default:
|
||||
ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) print() error {
|
||||
return ap.eventHandler.Print(ap.context.currentChar)
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) clear() error {
|
||||
ap.context = &ansiContext{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ap *AnsiParser) execute() error {
|
||||
return ap.eventHandler.Execute(ap.context.currentChar)
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package ansiterm
|
||||
|
||||
type stateID int
|
||||
|
||||
type state interface {
|
||||
Enter() error
|
||||
Exit() error
|
||||
Handle(byte) (state, error)
|
||||
Name() string
|
||||
Transition(state) error
|
||||
}
|
||||
|
||||
type baseState struct {
|
||||
name string
|
||||
parser *AnsiParser
|
||||
}
|
||||
|
||||
func (base baseState) Enter() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (base baseState) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (base baseState) Handle(b byte) (s state, e error) {
|
||||
|
||||
switch {
|
||||
case b == CSI_ENTRY:
|
||||
return base.parser.csiEntry, nil
|
||||
case b == DCS_ENTRY:
|
||||
return base.parser.dcsEntry, nil
|
||||
case b == ANSI_ESCAPE_PRIMARY:
|
||||
return base.parser.escape, nil
|
||||
case b == OSC_STRING:
|
||||
return base.parser.oscString, nil
|
||||
case sliceContains(toGroundBytes, b):
|
||||
return base.parser.ground, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (base baseState) Name() string {
|
||||
return base.name
|
||||
}
|
||||
|
||||
func (base baseState) Transition(s state) error {
|
||||
if s == base.parser.ground {
|
||||
execBytes := []byte{0x18}
|
||||
execBytes = append(execBytes, 0x1A)
|
||||
execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
|
||||
execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
|
||||
execBytes = append(execBytes, 0x99)
|
||||
execBytes = append(execBytes, 0x9A)
|
||||
|
||||
if sliceContains(execBytes, base.parser.context.currentChar) {
|
||||
return base.parser.execute()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type dcsEntryState struct {
|
||||
baseState
|
||||
}
|
||||
|
||||
type errorState struct {
|
||||
baseState
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package ansiterm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func sliceContains(bytes []byte, b byte) bool {
|
||||
for _, v := range bytes {
|
||||
if v == b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func convertBytesToInteger(bytes []byte) int {
|
||||
s := string(bytes)
|
||||
i, _ := strconv.Atoi(s)
|
||||
return i
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Azure/go-ansiterm"
|
||||
)
|
||||
|
||||
// Windows keyboard constants
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
|
||||
const (
|
||||
VK_PRIOR = 0x21 // PAGE UP key
|
||||
VK_NEXT = 0x22 // PAGE DOWN key
|
||||
VK_END = 0x23 // END key
|
||||
VK_HOME = 0x24 // HOME key
|
||||
VK_LEFT = 0x25 // LEFT ARROW key
|
||||
VK_UP = 0x26 // UP ARROW key
|
||||
VK_RIGHT = 0x27 // RIGHT ARROW key
|
||||
VK_DOWN = 0x28 // DOWN ARROW key
|
||||
VK_SELECT = 0x29 // SELECT key
|
||||
VK_PRINT = 0x2A // PRINT key
|
||||
VK_EXECUTE = 0x2B // EXECUTE key
|
||||
VK_SNAPSHOT = 0x2C // PRINT SCREEN key
|
||||
VK_INSERT = 0x2D // INS key
|
||||
VK_DELETE = 0x2E // DEL key
|
||||
VK_HELP = 0x2F // HELP key
|
||||
VK_F1 = 0x70 // F1 key
|
||||
VK_F2 = 0x71 // F2 key
|
||||
VK_F3 = 0x72 // F3 key
|
||||
VK_F4 = 0x73 // F4 key
|
||||
VK_F5 = 0x74 // F5 key
|
||||
VK_F6 = 0x75 // F6 key
|
||||
VK_F7 = 0x76 // F7 key
|
||||
VK_F8 = 0x77 // F8 key
|
||||
VK_F9 = 0x78 // F9 key
|
||||
VK_F10 = 0x79 // F10 key
|
||||
VK_F11 = 0x7A // F11 key
|
||||
VK_F12 = 0x7B // F12 key
|
||||
|
||||
RIGHT_ALT_PRESSED = 0x0001
|
||||
LEFT_ALT_PRESSED = 0x0002
|
||||
RIGHT_CTRL_PRESSED = 0x0004
|
||||
LEFT_CTRL_PRESSED = 0x0008
|
||||
SHIFT_PRESSED = 0x0010
|
||||
NUMLOCK_ON = 0x0020
|
||||
SCROLLLOCK_ON = 0x0040
|
||||
CAPSLOCK_ON = 0x0080
|
||||
ENHANCED_KEY = 0x0100
|
||||
)
|
||||
|
||||
type ansiCommand struct {
|
||||
CommandBytes []byte
|
||||
Command string
|
||||
Parameters []string
|
||||
IsSpecial bool
|
||||
}
|
||||
|
||||
func newAnsiCommand(command []byte) *ansiCommand {
|
||||
|
||||
if isCharacterSelectionCmdChar(command[1]) {
|
||||
// Is Character Set Selection commands
|
||||
return &ansiCommand{
|
||||
CommandBytes: command,
|
||||
Command: string(command),
|
||||
IsSpecial: true,
|
||||
}
|
||||
}
|
||||
|
||||
// last char is command character
|
||||
lastCharIndex := len(command) - 1
|
||||
|
||||
ac := &ansiCommand{
|
||||
CommandBytes: command,
|
||||
Command: string(command[lastCharIndex]),
|
||||
IsSpecial: false,
|
||||
}
|
||||
|
||||
// more than a single escape
|
||||
if lastCharIndex != 0 {
|
||||
start := 1
|
||||
// skip if double char escape sequence
|
||||
if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
|
||||
start++
|
||||
}
|
||||
// convert this to GetNextParam method
|
||||
ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
|
||||
}
|
||||
|
||||
return ac
|
||||
}
|
||||
|
||||
func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
|
||||
if index < 0 || index >= len(ac.Parameters) {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
return int16(param)
|
||||
}
|
||||
|
||||
func (ac *ansiCommand) String() string {
|
||||
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
|
||||
bytesToHex(ac.CommandBytes),
|
||||
ac.Command,
|
||||
strings.Join(ac.Parameters, "\",\""))
|
||||
}
|
||||
|
||||
// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
|
||||
// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
|
||||
func isAnsiCommandChar(b byte) bool {
|
||||
switch {
|
||||
case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
|
||||
return true
|
||||
case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
|
||||
// non-CSI escape sequence terminator
|
||||
return true
|
||||
case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
|
||||
// String escape sequence terminator
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isXtermOscSequence(command []byte, current byte) bool {
|
||||
return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
|
||||
}
|
||||
|
||||
func isCharacterSelectionCmdChar(b byte) bool {
|
||||
return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
|
||||
}
|
||||
|
||||
// bytesToHex converts a slice of bytes to a human-readable string.
|
||||
func bytesToHex(b []byte) string {
|
||||
hex := make([]string, len(b))
|
||||
for i, ch := range b {
|
||||
hex[i] = fmt.Sprintf("%X", ch)
|
||||
}
|
||||
return strings.Join(hex, "")
|
||||
}
|
||||
|
||||
// ensureInRange adjusts the passed value, if necessary, to ensure it is within
|
||||
// the passed min / max range.
|
||||
func ensureInRange(n int16, min int16, max int16) int16 {
|
||||
if n < min {
|
||||
return min
|
||||
} else if n > max {
|
||||
return max
|
||||
} else {
|
||||
return n
|
||||
}
|
||||
}
|
||||
|
||||
func GetStdFile(nFile int) (*os.File, uintptr) {
|
||||
var file *os.File
|
||||
switch nFile {
|
||||
case syscall.STD_INPUT_HANDLE:
|
||||
file = os.Stdin
|
||||
case syscall.STD_OUTPUT_HANDLE:
|
||||
file = os.Stdout
|
||||
case syscall.STD_ERROR_HANDLE:
|
||||
file = os.Stderr
|
||||
default:
|
||||
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
|
||||
}
|
||||
|
||||
fd, err := syscall.GetStdHandle(nFile)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
|
||||
}
|
||||
|
||||
return file, uintptr(fd)
|
||||
}
|
|
@ -0,0 +1,327 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//===========================================================================================================
|
||||
// IMPORTANT NOTE:
|
||||
//
|
||||
// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
|
||||
// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
|
||||
// variables) the pointers reference *before* the API completes.
|
||||
//
|
||||
// As a result, in those cases, the code must hint that the variables remain in active by invoking the
|
||||
// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
|
||||
// require unsafe pointers.
|
||||
//
|
||||
// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
|
||||
// the garbage collector the variables remain in use if:
|
||||
//
|
||||
// -- The value is not a pointer (e.g., int32, struct)
|
||||
// -- The value is not referenced by the method after passing the pointer to Windows
|
||||
//
|
||||
// See http://golang.org/doc/go1.3.
|
||||
//===========================================================================================================
|
||||
|
||||
var (
|
||||
kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
|
||||
setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
|
||||
setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
|
||||
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
|
||||
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
|
||||
setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
|
||||
scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
|
||||
setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
|
||||
setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
|
||||
writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
|
||||
readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
|
||||
waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
|
||||
)
|
||||
|
||||
// Windows Console constants
|
||||
const (
|
||||
// Console modes
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
|
||||
ENABLE_PROCESSED_INPUT = 0x0001
|
||||
ENABLE_LINE_INPUT = 0x0002
|
||||
ENABLE_ECHO_INPUT = 0x0004
|
||||
ENABLE_WINDOW_INPUT = 0x0008
|
||||
ENABLE_MOUSE_INPUT = 0x0010
|
||||
ENABLE_INSERT_MODE = 0x0020
|
||||
ENABLE_QUICK_EDIT_MODE = 0x0040
|
||||
ENABLE_EXTENDED_FLAGS = 0x0080
|
||||
ENABLE_AUTO_POSITION = 0x0100
|
||||
ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
|
||||
|
||||
ENABLE_PROCESSED_OUTPUT = 0x0001
|
||||
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
|
||||
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
|
||||
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
|
||||
ENABLE_LVB_GRID_WORLDWIDE = 0x0010
|
||||
|
||||
// Character attributes
|
||||
// Note:
|
||||
// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
|
||||
// Clearing all foreground or background colors results in black; setting all creates white.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
|
||||
FOREGROUND_BLUE uint16 = 0x0001
|
||||
FOREGROUND_GREEN uint16 = 0x0002
|
||||
FOREGROUND_RED uint16 = 0x0004
|
||||
FOREGROUND_INTENSITY uint16 = 0x0008
|
||||
FOREGROUND_MASK uint16 = 0x000F
|
||||
|
||||
BACKGROUND_BLUE uint16 = 0x0010
|
||||
BACKGROUND_GREEN uint16 = 0x0020
|
||||
BACKGROUND_RED uint16 = 0x0040
|
||||
BACKGROUND_INTENSITY uint16 = 0x0080
|
||||
BACKGROUND_MASK uint16 = 0x00F0
|
||||
|
||||
COMMON_LVB_MASK uint16 = 0xFF00
|
||||
COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
|
||||
COMMON_LVB_UNDERSCORE uint16 = 0x8000
|
||||
|
||||
// Input event types
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
|
||||
KEY_EVENT = 0x0001
|
||||
MOUSE_EVENT = 0x0002
|
||||
WINDOW_BUFFER_SIZE_EVENT = 0x0004
|
||||
MENU_EVENT = 0x0008
|
||||
FOCUS_EVENT = 0x0010
|
||||
|
||||
// WaitForSingleObject return codes
|
||||
WAIT_ABANDONED = 0x00000080
|
||||
WAIT_FAILED = 0xFFFFFFFF
|
||||
WAIT_SIGNALED = 0x0000000
|
||||
WAIT_TIMEOUT = 0x00000102
|
||||
|
||||
// WaitForSingleObject wait duration
|
||||
WAIT_INFINITE = 0xFFFFFFFF
|
||||
WAIT_ONE_SECOND = 1000
|
||||
WAIT_HALF_SECOND = 500
|
||||
WAIT_QUARTER_SECOND = 250
|
||||
)
|
||||
|
||||
// Windows API Console types
|
||||
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
|
||||
// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
|
||||
type (
|
||||
CHAR_INFO struct {
|
||||
UnicodeChar uint16
|
||||
Attributes uint16
|
||||
}
|
||||
|
||||
CONSOLE_CURSOR_INFO struct {
|
||||
Size uint32
|
||||
Visible int32
|
||||
}
|
||||
|
||||
CONSOLE_SCREEN_BUFFER_INFO struct {
|
||||
Size COORD
|
||||
CursorPosition COORD
|
||||
Attributes uint16
|
||||
Window SMALL_RECT
|
||||
MaximumWindowSize COORD
|
||||
}
|
||||
|
||||
COORD struct {
|
||||
X int16
|
||||
Y int16
|
||||
}
|
||||
|
||||
SMALL_RECT struct {
|
||||
Left int16
|
||||
Top int16
|
||||
Right int16
|
||||
Bottom int16
|
||||
}
|
||||
|
||||
// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
|
||||
INPUT_RECORD struct {
|
||||
EventType uint16
|
||||
KeyEvent KEY_EVENT_RECORD
|
||||
}
|
||||
|
||||
KEY_EVENT_RECORD struct {
|
||||
KeyDown int32
|
||||
RepeatCount uint16
|
||||
VirtualKeyCode uint16
|
||||
VirtualScanCode uint16
|
||||
UnicodeChar uint16
|
||||
ControlKeyState uint32
|
||||
}
|
||||
|
||||
WINDOW_BUFFER_SIZE struct {
|
||||
Size COORD
|
||||
}
|
||||
)
|
||||
|
||||
// boolToBOOL converts a Go bool into a Windows int32.
|
||||
func boolToBOOL(f bool) int32 {
|
||||
if f {
|
||||
return int32(1)
|
||||
} else {
|
||||
return int32(0)
|
||||
}
|
||||
}
|
||||
|
||||
// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
|
||||
func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
|
||||
r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
|
||||
func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
|
||||
r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleCursorPosition location of the console cursor.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
|
||||
func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
|
||||
r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
|
||||
use(coord)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// GetConsoleMode gets the console mode for given file descriptor
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
|
||||
func GetConsoleMode(handle uintptr) (mode uint32, err error) {
|
||||
err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
|
||||
return mode, err
|
||||
}
|
||||
|
||||
// SetConsoleMode sets the console mode for given file descriptor
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
|
||||
func SetConsoleMode(handle uintptr, mode uint32) error {
|
||||
r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
|
||||
use(mode)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
|
||||
func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
|
||||
info := CONSOLE_SCREEN_BUFFER_INFO{}
|
||||
err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
|
||||
r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
|
||||
use(scrollRect)
|
||||
use(clipRect)
|
||||
use(destOrigin)
|
||||
use(char)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleScreenBufferSize sets the size of the console screen buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
|
||||
func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
|
||||
r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
|
||||
use(coord)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleTextAttribute sets the attributes of characters written to the
|
||||
// console screen buffer by the WriteFile or WriteConsole function.
|
||||
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
|
||||
func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
|
||||
r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
|
||||
use(attribute)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
|
||||
// Note that the size and location must be within and no larger than the backing console screen buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
|
||||
func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
|
||||
r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
|
||||
use(isAbsolute)
|
||||
use(rect)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
|
||||
func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
|
||||
r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
|
||||
use(buffer)
|
||||
use(bufferSize)
|
||||
use(bufferCoord)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// ReadConsoleInput reads (and removes) data from the console input buffer.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
|
||||
func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
|
||||
r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
|
||||
use(buffer)
|
||||
return checkError(r1, r2, err)
|
||||
}
|
||||
|
||||
// WaitForSingleObject waits for the passed handle to be signaled.
|
||||
// It returns true if the handle was signaled; false otherwise.
|
||||
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
|
||||
func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
|
||||
r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
|
||||
switch r1 {
|
||||
case WAIT_ABANDONED, WAIT_TIMEOUT:
|
||||
return false, nil
|
||||
case WAIT_SIGNALED:
|
||||
return true, nil
|
||||
}
|
||||
use(msWait)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// String helpers
|
||||
func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
|
||||
return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
|
||||
}
|
||||
|
||||
func (coord COORD) String() string {
|
||||
return fmt.Sprintf("%v,%v", coord.X, coord.Y)
|
||||
}
|
||||
|
||||
func (rect SMALL_RECT) String() string {
|
||||
return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
|
||||
}
|
||||
|
||||
// checkError evaluates the results of a Windows API call and returns the error if it failed.
|
||||
func checkError(r1, r2 uintptr, err error) error {
|
||||
// Windows APIs return non-zero to indicate success
|
||||
if r1 != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return the error if provided, otherwise default to EINVAL
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
|
||||
// coordToPointer converts a COORD into a uintptr (by fooling the type system).
|
||||
func coordToPointer(c COORD) uintptr {
|
||||
// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
|
||||
return uintptr(*((*uint32)(unsafe.Pointer(&c))))
|
||||
}
|
||||
|
||||
// use is a no-op, but the compiler cannot see that it is.
|
||||
// Calling use(p) ensures that p is kept live until that point.
|
||||
func use(p interface{}) {}
|
|
@ -0,0 +1,100 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import "github.com/Azure/go-ansiterm"
|
||||
|
||||
const (
|
||||
FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||
BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||
)
|
||||
|
||||
// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
|
||||
// request represented by the passed ANSI mode.
|
||||
func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
|
||||
switch ansiMode {
|
||||
|
||||
// Mode styles
|
||||
case ansiterm.ANSI_SGR_BOLD:
|
||||
windowsMode = windowsMode | FOREGROUND_INTENSITY
|
||||
|
||||
case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
|
||||
windowsMode &^= FOREGROUND_INTENSITY
|
||||
|
||||
case ansiterm.ANSI_SGR_UNDERLINE:
|
||||
windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
|
||||
|
||||
case ansiterm.ANSI_SGR_REVERSE:
|
||||
inverted = true
|
||||
|
||||
case ansiterm.ANSI_SGR_REVERSE_OFF:
|
||||
inverted = false
|
||||
|
||||
case ansiterm.ANSI_SGR_UNDERLINE_OFF:
|
||||
windowsMode &^= COMMON_LVB_UNDERSCORE
|
||||
|
||||
// Foreground colors
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_RED:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
|
||||
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
|
||||
|
||||
// Background colors
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
|
||||
// Black with no intensity
|
||||
windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_RED:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||
|
||||
case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
|
||||
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
|
||||
}
|
||||
|
||||
return windowsMode, inverted
|
||||
}
|
||||
|
||||
// invertAttributes inverts the foreground and background colors of a Windows attributes value
|
||||
func invertAttributes(windowsMode uint16) uint16 {
|
||||
return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
const (
|
||||
horizontal = iota
|
||||
vertical
|
||||
)
|
||||
|
||||
func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
|
||||
if h.originMode {
|
||||
sr := h.effectiveSr(info.Window)
|
||||
return SMALL_RECT{
|
||||
Top: sr.top,
|
||||
Bottom: sr.bottom,
|
||||
Left: 0,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
} else {
|
||||
return SMALL_RECT{
|
||||
Top: info.Window.Top,
|
||||
Bottom: info.Window.Bottom,
|
||||
Left: 0,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setCursorPosition sets the cursor to the specified position, bounded to the screen size
|
||||
func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
|
||||
position.X = ensureInRange(position.X, window.Left, window.Right)
|
||||
position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
|
||||
err := SetConsoleCursorPosition(h.fd, position)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
|
||||
return h.moveCursor(vertical, param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
|
||||
return h.moveCursor(horizontal, param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position := info.CursorPosition
|
||||
switch moveMode {
|
||||
case horizontal:
|
||||
position.X += int16(param)
|
||||
case vertical:
|
||||
position.Y += int16(param)
|
||||
}
|
||||
|
||||
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position := info.CursorPosition
|
||||
position.X = 0
|
||||
position.Y += int16(param)
|
||||
|
||||
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
position := info.CursorPosition
|
||||
position.X = int16(param) - 1
|
||||
|
||||
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import "github.com/Azure/go-ansiterm"
|
||||
|
||||
func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
|
||||
// Ignore an invalid (negative area) request
|
||||
if toCoord.Y < fromCoord.Y {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
var coordStart = COORD{}
|
||||
var coordEnd = COORD{}
|
||||
|
||||
xCurrent, yCurrent := fromCoord.X, fromCoord.Y
|
||||
xEnd, yEnd := toCoord.X, toCoord.Y
|
||||
|
||||
// Clear any partial initial line
|
||||
if xCurrent > 0 {
|
||||
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||
coordEnd.X, coordEnd.Y = xEnd, yCurrent
|
||||
|
||||
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xCurrent = 0
|
||||
yCurrent += 1
|
||||
}
|
||||
|
||||
// Clear intervening rectangular section
|
||||
if yCurrent < yEnd {
|
||||
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||
coordEnd.X, coordEnd.Y = xEnd, yEnd-1
|
||||
|
||||
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xCurrent = 0
|
||||
yCurrent = yEnd
|
||||
}
|
||||
|
||||
// Clear remaining partial ending line
|
||||
coordStart.X, coordStart.Y = xCurrent, yCurrent
|
||||
coordEnd.X, coordEnd.Y = xEnd, yEnd
|
||||
|
||||
err = h.clearRect(attributes, coordStart, coordEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
|
||||
region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
|
||||
width := toCoord.X - fromCoord.X + 1
|
||||
height := toCoord.Y - fromCoord.Y + 1
|
||||
size := uint32(width) * uint32(height)
|
||||
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
buffer := make([]CHAR_INFO, size)
|
||||
|
||||
char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
|
||||
for i := 0; i < int(size); i++ {
|
||||
buffer[i] = char
|
||||
}
|
||||
|
||||
err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
// effectiveSr gets the current effective scroll region in buffer coordinates
|
||||
func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
|
||||
top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
|
||||
bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
|
||||
if top >= bottom {
|
||||
top = window.Top
|
||||
bottom = window.Bottom
|
||||
}
|
||||
return scrollRegion{top: top, bottom: bottom}
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) scrollUp(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := h.effectiveSr(info.Window)
|
||||
return h.scroll(param, sr, info)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) scrollDown(param int) error {
|
||||
return h.scrollUp(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) deleteLines(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
start := info.CursorPosition.Y
|
||||
sr := h.effectiveSr(info.Window)
|
||||
// Lines cannot be inserted or deleted outside the scrolling region.
|
||||
if start >= sr.top && start <= sr.bottom {
|
||||
sr.top = start
|
||||
return h.scroll(param, sr, info)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) insertLines(param int) error {
|
||||
return h.deleteLines(-param)
|
||||
}
|
||||
|
||||
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
|
||||
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
|
||||
h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
|
||||
h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
|
||||
|
||||
// Copy from and clip to the scroll region (full buffer width)
|
||||
scrollRect := SMALL_RECT{
|
||||
Top: sr.top,
|
||||
Bottom: sr.bottom,
|
||||
Left: 0,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
|
||||
// Origin to which area should be copied
|
||||
destOrigin := COORD{
|
||||
X: 0,
|
||||
Y: sr.top - int16(param),
|
||||
}
|
||||
|
||||
char := CHAR_INFO{
|
||||
UnicodeChar: ' ',
|
||||
Attributes: h.attributes,
|
||||
}
|
||||
|
||||
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return h.scrollLine(param, info.CursorPosition, info)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
|
||||
return h.deleteCharacters(-param)
|
||||
}
|
||||
|
||||
// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
|
||||
func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
|
||||
// Copy from and clip to the scroll region (full buffer width)
|
||||
scrollRect := SMALL_RECT{
|
||||
Top: position.Y,
|
||||
Bottom: position.Y,
|
||||
Left: position.X,
|
||||
Right: info.Size.X - 1,
|
||||
}
|
||||
|
||||
// Origin to which area should be copied
|
||||
destOrigin := COORD{
|
||||
X: position.X - int16(columns),
|
||||
Y: position.Y,
|
||||
}
|
||||
|
||||
char := CHAR_INFO{
|
||||
UnicodeChar: ' ',
|
||||
Attributes: h.attributes,
|
||||
}
|
||||
|
||||
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
// AddInRange increments a value by the passed quantity while ensuring the values
|
||||
// always remain within the supplied min / max range.
|
||||
func addInRange(n int16, increment int16, min int16, max int16) int16 {
|
||||
return ensureInRange(n+increment, min, max)
|
||||
}
|
|
@ -0,0 +1,743 @@
|
|||
// +build windows
|
||||
|
||||
package winterm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/go-ansiterm"
|
||||
)
|
||||
|
||||
type windowsAnsiEventHandler struct {
|
||||
fd uintptr
|
||||
file *os.File
|
||||
infoReset *CONSOLE_SCREEN_BUFFER_INFO
|
||||
sr scrollRegion
|
||||
buffer bytes.Buffer
|
||||
attributes uint16
|
||||
inverted bool
|
||||
wrapNext bool
|
||||
drewMarginByte bool
|
||||
originMode bool
|
||||
marginByte byte
|
||||
curInfo *CONSOLE_SCREEN_BUFFER_INFO
|
||||
curPos COORD
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
type Option func(*windowsAnsiEventHandler)
|
||||
|
||||
func WithLogf(f func(string, ...interface{})) Option {
|
||||
return func(w *windowsAnsiEventHandler) {
|
||||
w.logf = f
|
||||
}
|
||||
}
|
||||
|
||||
func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
|
||||
infoReset, err := GetConsoleScreenBufferInfo(fd)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
h := &windowsAnsiEventHandler{
|
||||
fd: fd,
|
||||
file: file,
|
||||
infoReset: infoReset,
|
||||
attributes: infoReset.Attributes,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(h)
|
||||
}
|
||||
|
||||
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
|
||||
logFile, _ := os.Create("winEventHandler.log")
|
||||
logger := log.New(logFile, "", log.LstdFlags)
|
||||
if h.logf != nil {
|
||||
l := h.logf
|
||||
h.logf = func(s string, v ...interface{}) {
|
||||
l(s, v...)
|
||||
logger.Printf(s, v...)
|
||||
}
|
||||
} else {
|
||||
h.logf = logger.Printf
|
||||
}
|
||||
}
|
||||
|
||||
if h.logf == nil {
|
||||
h.logf = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
type scrollRegion struct {
|
||||
top int16
|
||||
bottom int16
|
||||
}
|
||||
|
||||
// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
|
||||
// current cursor position and scroll region settings, in which case it returns
|
||||
// true. If no special handling is necessary, then it does nothing and returns
|
||||
// false.
|
||||
//
|
||||
// In the false case, the caller should ensure that a carriage return
|
||||
// and line feed are inserted or that the text is otherwise wrapped.
|
||||
func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
|
||||
if h.wrapNext {
|
||||
if err := h.Flush(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
h.clearWrap()
|
||||
}
|
||||
pos, info, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sr := h.effectiveSr(info.Window)
|
||||
if pos.Y == sr.bottom {
|
||||
// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
|
||||
// is the full window.
|
||||
if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
|
||||
if includeCR {
|
||||
pos.X = 0
|
||||
h.updatePos(pos)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// A custom scroll region is active. Scroll the window manually to simulate
|
||||
// the LF.
|
||||
if err := h.Flush(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
h.logf("Simulating LF inside scroll region")
|
||||
if err := h.scrollUp(1); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if includeCR {
|
||||
pos.X = 0
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
|
||||
} else if pos.Y < info.Window.Bottom {
|
||||
// Let Windows handle the LF.
|
||||
pos.Y++
|
||||
if includeCR {
|
||||
pos.X = 0
|
||||
}
|
||||
h.updatePos(pos)
|
||||
return false, nil
|
||||
} else {
|
||||
// The cursor is at the bottom of the screen but outside the scroll
|
||||
// region. Skip the LF.
|
||||
h.logf("Simulating LF outside scroll region")
|
||||
if includeCR {
|
||||
if err := h.Flush(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
pos.X = 0
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// executeLF executes a LF without a CR.
|
||||
func (h *windowsAnsiEventHandler) executeLF() error {
|
||||
handled, err := h.simulateLF(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !handled {
|
||||
// Windows LF will reset the cursor column position. Write the LF
|
||||
// and restore the cursor position.
|
||||
pos, _, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
|
||||
if pos.X != 0 {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("Resetting cursor position for LF without CR")
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) Print(b byte) error {
|
||||
if h.wrapNext {
|
||||
h.buffer.WriteByte(h.marginByte)
|
||||
h.clearWrap()
|
||||
if _, err := h.simulateLF(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pos, info, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pos.X == info.Size.X-1 {
|
||||
h.wrapNext = true
|
||||
h.marginByte = b
|
||||
} else {
|
||||
pos.X++
|
||||
h.updatePos(pos)
|
||||
h.buffer.WriteByte(b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) Execute(b byte) error {
|
||||
switch b {
|
||||
case ansiterm.ANSI_TAB:
|
||||
h.logf("Execute(TAB)")
|
||||
// Move to the next tab stop, but preserve auto-wrap if already set.
|
||||
if !h.wrapNext {
|
||||
pos, info, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pos.X = (pos.X + 8) - pos.X%8
|
||||
if pos.X >= info.Size.X {
|
||||
pos.X = info.Size.X - 1
|
||||
}
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case ansiterm.ANSI_BEL:
|
||||
h.buffer.WriteByte(ansiterm.ANSI_BEL)
|
||||
return nil
|
||||
|
||||
case ansiterm.ANSI_BACKSPACE:
|
||||
if h.wrapNext {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.clearWrap()
|
||||
}
|
||||
pos, _, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pos.X > 0 {
|
||||
pos.X--
|
||||
h.updatePos(pos)
|
||||
h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
|
||||
}
|
||||
return nil
|
||||
|
||||
case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
|
||||
// Treat as true LF.
|
||||
return h.executeLF()
|
||||
|
||||
case ansiterm.ANSI_LINE_FEED:
|
||||
// Simulate a CR and LF for now since there is no way in go-ansiterm
|
||||
// to tell if the LF should include CR (and more things break when it's
|
||||
// missing than when it's incorrectly added).
|
||||
handled, err := h.simulateLF(true)
|
||||
if handled || err != nil {
|
||||
return err
|
||||
}
|
||||
return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
|
||||
|
||||
case ansiterm.ANSI_CARRIAGE_RETURN:
|
||||
if h.wrapNext {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.clearWrap()
|
||||
}
|
||||
pos, _, err := h.getCurrentInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pos.X != 0 {
|
||||
pos.X = 0
|
||||
h.updatePos(pos)
|
||||
h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
|
||||
}
|
||||
return nil
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUU(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorVertical(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUD(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorVertical(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUF(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorHorizontal(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUB(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorHorizontal(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CNL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorLine(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CPL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorLine(-param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CHA(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.moveCursorColumn(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) VPA(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("VPA: [[%d]]", param)
|
||||
h.clearWrap()
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
window := h.getCursorWindow(info)
|
||||
position := info.CursorPosition
|
||||
position.Y = window.Top + int16(param) - 1
|
||||
return h.setCursorPosition(position, window)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("CUP: [[%d %d]]", row, col)
|
||||
h.clearWrap()
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
window := h.getCursorWindow(info)
|
||||
position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
|
||||
return h.setCursorPosition(position, window)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("HVP: [[%d %d]]", row, col)
|
||||
h.clearWrap()
|
||||
return h.CUP(row, col)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
|
||||
h.clearWrap()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
|
||||
h.clearWrap()
|
||||
h.originMode = enable
|
||||
return h.CUP(1, 1)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
|
||||
h.clearWrap()
|
||||
if err := h.ED(2); err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetWidth := int16(80)
|
||||
if use132 {
|
||||
targetWidth = 132
|
||||
}
|
||||
if info.Size.X < targetWidth {
|
||||
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
|
||||
h.logf("set buffer failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
window := info.Window
|
||||
window.Left = 0
|
||||
window.Right = targetWidth - 1
|
||||
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
|
||||
h.logf("set window failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if info.Size.X > targetWidth {
|
||||
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
|
||||
h.logf("set buffer failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return SetConsoleCursorPosition(h.fd, COORD{0, 0})
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) ED(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("ED: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
|
||||
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
|
||||
// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
|
||||
// [2J -- Erases the complete display. The cursor does not move.
|
||||
// Notes:
|
||||
// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var start COORD
|
||||
var end COORD
|
||||
|
||||
switch param {
|
||||
case 0:
|
||||
start = info.CursorPosition
|
||||
end = COORD{info.Size.X - 1, info.Size.Y - 1}
|
||||
|
||||
case 1:
|
||||
start = COORD{0, 0}
|
||||
end = info.CursorPosition
|
||||
|
||||
case 2:
|
||||
start = COORD{0, 0}
|
||||
end = COORD{info.Size.X - 1, info.Size.Y - 1}
|
||||
}
|
||||
|
||||
err = h.clearRange(h.attributes, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the whole buffer was cleared, move the window to the top while preserving
|
||||
// the window-relative cursor position.
|
||||
if param == 2 {
|
||||
pos := info.CursorPosition
|
||||
window := info.Window
|
||||
pos.Y -= window.Top
|
||||
window.Bottom -= window.Top
|
||||
window.Top = 0
|
||||
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) EL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("EL: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
|
||||
// [K -- Erases from the cursor to the end of the line, including the cursor position.
|
||||
// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
|
||||
// [2K -- Erases the complete line.
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var start COORD
|
||||
var end COORD
|
||||
|
||||
switch param {
|
||||
case 0:
|
||||
start = info.CursorPosition
|
||||
end = COORD{info.Size.X, info.CursorPosition.Y}
|
||||
|
||||
case 1:
|
||||
start = COORD{0, info.CursorPosition.Y}
|
||||
end = info.CursorPosition
|
||||
|
||||
case 2:
|
||||
start = COORD{0, info.CursorPosition.Y}
|
||||
end = COORD{info.Size.X, info.CursorPosition.Y}
|
||||
}
|
||||
|
||||
err = h.clearRange(h.attributes, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) IL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("IL: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.insertLines(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DL(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DL: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.deleteLines(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) ICH(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("ICH: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.insertCharacters(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DCH(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DCH: [%v]", strconv.Itoa(param))
|
||||
h.clearWrap()
|
||||
return h.deleteCharacters(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) SGR(params []int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
strings := []string{}
|
||||
for _, v := range params {
|
||||
strings = append(strings, strconv.Itoa(v))
|
||||
}
|
||||
|
||||
h.logf("SGR: [%v]", strings)
|
||||
|
||||
if len(params) <= 0 {
|
||||
h.attributes = h.infoReset.Attributes
|
||||
h.inverted = false
|
||||
} else {
|
||||
for _, attr := range params {
|
||||
|
||||
if attr == ansiterm.ANSI_SGR_RESET {
|
||||
h.attributes = h.infoReset.Attributes
|
||||
h.inverted = false
|
||||
continue
|
||||
}
|
||||
|
||||
h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
|
||||
}
|
||||
}
|
||||
|
||||
attributes := h.attributes
|
||||
if h.inverted {
|
||||
attributes = invertAttributes(attributes)
|
||||
}
|
||||
err := SetConsoleTextAttribute(h.fd, attributes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) SU(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("SU: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.scrollUp(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) SD(param int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("SD: [%v]", []string{strconv.Itoa(param)})
|
||||
h.clearWrap()
|
||||
return h.scrollDown(param)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DA(params []string) error {
|
||||
h.logf("DA: [%v]", params)
|
||||
// DA cannot be implemented because it must send data on the VT100 input stream,
|
||||
// which is not available to go-ansiterm.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("DECSTBM: [%d, %d]", top, bottom)
|
||||
|
||||
// Windows is 0 indexed, Linux is 1 indexed
|
||||
h.sr.top = int16(top - 1)
|
||||
h.sr.bottom = int16(bottom - 1)
|
||||
|
||||
// This command also moves the cursor to the origin.
|
||||
h.clearWrap()
|
||||
return h.CUP(1, 1)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) RI() error {
|
||||
if err := h.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
h.logf("RI: []")
|
||||
h.clearWrap()
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := h.effectiveSr(info.Window)
|
||||
if info.CursorPosition.Y == sr.top {
|
||||
return h.scrollDown(1)
|
||||
}
|
||||
|
||||
return h.moveCursorVertical(-1)
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) IND() error {
|
||||
h.logf("IND: []")
|
||||
return h.executeLF()
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) Flush() error {
|
||||
h.curInfo = nil
|
||||
if h.buffer.Len() > 0 {
|
||||
h.logf("Flush: [%s]", h.buffer.Bytes())
|
||||
if _, err := h.buffer.WriteTo(h.file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if h.wrapNext && !h.drewMarginByte {
|
||||
h.logf("Flush: drawing margin byte '%c'", h.marginByte)
|
||||
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
|
||||
size := COORD{1, 1}
|
||||
position := COORD{0, 0}
|
||||
region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
|
||||
if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil {
|
||||
return err
|
||||
}
|
||||
h.drewMarginByte = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheConsoleInfo ensures that the current console screen information has been queried
|
||||
// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
|
||||
func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
|
||||
if h.curInfo == nil {
|
||||
info, err := GetConsoleScreenBufferInfo(h.fd)
|
||||
if err != nil {
|
||||
return COORD{}, nil, err
|
||||
}
|
||||
h.curInfo = info
|
||||
h.curPos = info.CursorPosition
|
||||
}
|
||||
return h.curPos, h.curInfo, nil
|
||||
}
|
||||
|
||||
func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
|
||||
if h.curInfo == nil {
|
||||
panic("failed to call getCurrentInfo before calling updatePos")
|
||||
}
|
||||
h.curPos = pos
|
||||
}
|
||||
|
||||
// clearWrap clears the state where the cursor is in the margin
|
||||
// waiting for the next character before wrapping the line. This must
|
||||
// be done before most operations that act on the cursor.
|
||||
func (h *windowsAnsiEventHandler) clearWrap() {
|
||||
h.wrapNext = false
|
||||
h.drewMarginByte = false
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014-2019 TSUYUSATO Kitsune
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,52 @@
|
|||
# heredoc
|
||||
|
||||
[](https://circleci.com/gh/MakeNowJust/heredoc) [](https://godoc.org/github.com/MakeNowJust/heredoc)
|
||||
|
||||
## About
|
||||
|
||||
Package heredoc provides the here-document with keeping indent.
|
||||
|
||||
## Install
|
||||
|
||||
```console
|
||||
$ go get github.com/MakeNowJust/heredoc
|
||||
```
|
||||
|
||||
## Import
|
||||
|
||||
```go
|
||||
// usual
|
||||
import "github.com/MakeNowJust/heredoc"
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/MakeNowJust/heredoc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println(heredoc.Doc(`
|
||||
Lorem ipsum dolor sit amet, consectetur adipisicing elit,
|
||||
sed do eiusmod tempor incididunt ut labore et dolore magna
|
||||
aliqua. Ut enim ad minim veniam, ...
|
||||
`))
|
||||
// Output:
|
||||
// Lorem ipsum dolor sit amet, consectetur adipisicing elit,
|
||||
// sed do eiusmod tempor incididunt ut labore et dolore magna
|
||||
// aliqua. Ut enim ad minim veniam, ...
|
||||
//
|
||||
}
|
||||
```
|
||||
|
||||
## API Document
|
||||
|
||||
- [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc)
|
||||
|
||||
## License
|
||||
|
||||
This software is released under the MIT License, see LICENSE.
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/MakeNowJust/heredoc
|
||||
|
||||
go 1.12
|
|
@ -0,0 +1,105 @@
|
|||
// Copyright (c) 2014-2019 TSUYUSATO Kitsune
|
||||
// This software is released under the MIT License.
|
||||
// http://opensource.org/licenses/mit-license.php
|
||||
|
||||
// Package heredoc provides creation of here-documents from raw strings.
|
||||
//
|
||||
// Golang supports raw-string syntax.
|
||||
//
|
||||
// doc := `
|
||||
// Foo
|
||||
// Bar
|
||||
// `
|
||||
//
|
||||
// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to
|
||||
//
|
||||
// "\n\tFoo\n\tBar\n"
|
||||
//
|
||||
// I dont't want this!
|
||||
//
|
||||
// However this problem is solved by package heredoc.
|
||||
//
|
||||
// doc := heredoc.Doc(`
|
||||
// Foo
|
||||
// Bar
|
||||
// `)
|
||||
//
|
||||
// Is equivalent to
|
||||
//
|
||||
// "Foo\nBar\n"
|
||||
package heredoc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const maxInt = int(^uint(0) >> 1)
|
||||
|
||||
// Doc returns un-indented string as here-document.
|
||||
func Doc(raw string) string {
|
||||
skipFirstLine := false
|
||||
if len(raw) > 0 && raw[0] == '\n' {
|
||||
raw = raw[1:]
|
||||
} else {
|
||||
skipFirstLine = true
|
||||
}
|
||||
|
||||
lines := strings.Split(raw, "\n")
|
||||
|
||||
minIndentSize := getMinIndent(lines, skipFirstLine)
|
||||
lines = removeIndentation(lines, minIndentSize, skipFirstLine)
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// getMinIndent calculates the minimum indentation in lines, excluding empty lines.
|
||||
func getMinIndent(lines []string, skipFirstLine bool) int {
|
||||
minIndentSize := maxInt
|
||||
|
||||
for i, line := range lines {
|
||||
if i == 0 && skipFirstLine {
|
||||
continue
|
||||
}
|
||||
|
||||
indentSize := 0
|
||||
for _, r := range []rune(line) {
|
||||
if unicode.IsSpace(r) {
|
||||
indentSize += 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(line) == indentSize {
|
||||
if i == len(lines)-1 && indentSize < minIndentSize {
|
||||
lines[i] = ""
|
||||
}
|
||||
} else if indentSize < minIndentSize {
|
||||
minIndentSize = indentSize
|
||||
}
|
||||
}
|
||||
return minIndentSize
|
||||
}
|
||||
|
||||
// removeIndentation removes n characters from the front of each line in lines.
|
||||
// Skips first line if skipFirstLine is true, skips empty lines.
|
||||
func removeIndentation(lines []string, n int, skipFirstLine bool) []string {
|
||||
for i, line := range lines {
|
||||
if i == 0 && skipFirstLine {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(lines[i]) >= n {
|
||||
lines[i] = line[n:]
|
||||
}
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// Docf returns unindented and formatted string as here-document.
|
||||
// Formatting is done as for fmt.Printf().
|
||||
func Docf(raw string, args ...interface{}) string {
|
||||
return fmt.Sprintf(Doc(raw), args...)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var (
|
||||
reInit = regexp.MustCompile(`init·\d+$`) // main.init·1
|
||||
reClosure = regexp.MustCompile(`func·\d+$`) // main.func·001
|
||||
)
|
||||
|
||||
// caller types:
|
||||
// runtime.goexit
|
||||
// runtime.main
|
||||
// main.init
|
||||
// main.main
|
||||
// main.init·1 -> main.init
|
||||
// main.func·001 -> main.func
|
||||
// code.google.com/p/gettext-go/gettext.TestCallerName
|
||||
// ...
|
||||
func callerName(skip int) string {
|
||||
pc, _, _, ok := runtime.Caller(skip)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
name := runtime.FuncForPC(pc).Name()
|
||||
if reInit.MatchString(name) {
|
||||
return reInit.ReplaceAllString(name, "init")
|
||||
}
|
||||
if reClosure.MatchString(name) {
|
||||
return reClosure.ReplaceAllString(name, "func")
|
||||
}
|
||||
return name
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2013 <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gettext implements a basic GNU's gettext library.
|
||||
|
||||
Example:
|
||||
import (
|
||||
"github.com/chai2010/gettext-go/gettext"
|
||||
)
|
||||
|
||||
func main() {
|
||||
gettext.SetLocale("zh_CN")
|
||||
gettext.Textdomain("hello")
|
||||
|
||||
// gettext.BindTextdomain("hello", "local", nil) // from local dir
|
||||
// gettext.BindTextdomain("hello", "local.zip", nil) // from local zip file
|
||||
// gettext.BindTextdomain("hello", "local.zip", zipData) // from embedded zip data
|
||||
|
||||
gettext.BindTextdomain("hello", "local", nil)
|
||||
|
||||
// translate source text
|
||||
fmt.Println(gettext.Gettext("Hello, world!"))
|
||||
// Output: 你好, 世界!
|
||||
|
||||
// translate resource
|
||||
fmt.Println(string(gettext.Getdata("poems.txt")))
|
||||
// Output: ...
|
||||
}
|
||||
|
||||
Translate directory struct("../examples/local.zip"):
|
||||
|
||||
Root: "path" or "file.zip/zipBaseName"
|
||||
+-default # local: $(LC_MESSAGES) or $(LANG) or "default"
|
||||
| +-LC_MESSAGES # just for `gettext.Gettext`
|
||||
| | +-hello.mo # $(Root)/$(local)/LC_MESSAGES/$(domain).mo
|
||||
| | \-hello.po # $(Root)/$(local)/LC_MESSAGES/$(domain).mo
|
||||
| |
|
||||
| \-LC_RESOURCE # just for `gettext.Getdata`
|
||||
| +-hello # domain map a dir in resource translate
|
||||
| +-favicon.ico # $(Root)/$(local)/LC_RESOURCE/$(domain)/$(filename)
|
||||
| \-poems.txt
|
||||
|
|
||||
\-zh_CN # simple chinese translate
|
||||
+-LC_MESSAGES
|
||||
| +-hello.mo # try "$(domain).mo" first
|
||||
| \-hello.po # try "$(domain).po" second
|
||||
|
|
||||
\-LC_RESOURCE
|
||||
+-hello
|
||||
+-favicon.ico # try "$(local)/$(domain)/file" first
|
||||
\-poems.txt # try "default/$(domain)/file" second
|
||||
|
||||
See:
|
||||
http://en.wikipedia.org/wiki/Gettext
|
||||
http://www.gnu.org/software/gettext/manual/html_node
|
||||
http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html
|
||||
http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html
|
||||
http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
|
||||
http://www.poedit.net/
|
||||
|
||||
Please report bugs to <chaishushan{AT}gmail.com>.
|
||||
Thanks!
|
||||
*/
|
||||
package gettext
|
|
@ -0,0 +1,119 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type domainManager struct {
|
||||
mutex sync.Mutex
|
||||
locale string
|
||||
domain string
|
||||
domainMap map[string]*fileSystem
|
||||
trTextMap map[string]*translator
|
||||
}
|
||||
|
||||
func newDomainManager() *domainManager {
|
||||
return &domainManager{
|
||||
locale: DefaultLocale,
|
||||
domainMap: make(map[string]*fileSystem),
|
||||
trTextMap: make(map[string]*translator),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *domainManager) makeTrMapKey(domain, locale string) string {
|
||||
return domain + "_$$$_" + locale
|
||||
}
|
||||
|
||||
func (p *domainManager) Bind(domain, path string, data []byte) (domains, paths []string) {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
switch {
|
||||
case domain != "" && path != "": // bind new domain
|
||||
p.bindDomainTranslators(domain, path, data)
|
||||
case domain != "" && path == "": // delete domain
|
||||
p.deleteDomain(domain)
|
||||
}
|
||||
|
||||
// return all bind domain
|
||||
for k, fs := range p.domainMap {
|
||||
domains = append(domains, k)
|
||||
paths = append(paths, fs.FsName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *domainManager) SetLocale(locale string) string {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
if locale != "" {
|
||||
p.locale = locale
|
||||
}
|
||||
return p.locale
|
||||
}
|
||||
|
||||
func (p *domainManager) SetDomain(domain string) string {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
if domain != "" {
|
||||
p.domain = domain
|
||||
}
|
||||
return p.domain
|
||||
}
|
||||
|
||||
func (p *domainManager) Getdata(name string) []byte {
|
||||
return p.getdata(p.domain, name)
|
||||
}
|
||||
|
||||
func (p *domainManager) DGetdata(domain, name string) []byte {
|
||||
return p.getdata(domain, name)
|
||||
}
|
||||
|
||||
func (p *domainManager) PNGettext(msgctxt, msgid, msgidPlural string, n int) string {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
return p.gettext(p.domain, msgctxt, msgid, msgidPlural, n)
|
||||
}
|
||||
|
||||
func (p *domainManager) DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string {
|
||||
p.mutex.Lock()
|
||||
defer p.mutex.Unlock()
|
||||
return p.gettext(domain, msgctxt, msgid, msgidPlural, n)
|
||||
}
|
||||
|
||||
func (p *domainManager) gettext(domain, msgctxt, msgid, msgidPlural string, n int) string {
|
||||
if p.locale == "" || p.domain == "" {
|
||||
return msgid
|
||||
}
|
||||
if _, ok := p.domainMap[domain]; !ok {
|
||||
return msgid
|
||||
}
|
||||
if f, ok := p.trTextMap[p.makeTrMapKey(domain, p.locale)]; ok {
|
||||
return f.PNGettext(msgctxt, msgid, msgidPlural, n)
|
||||
}
|
||||
return msgid
|
||||
}
|
||||
|
||||
func (p *domainManager) getdata(domain, name string) []byte {
|
||||
if p.locale == "" || p.domain == "" {
|
||||
return nil
|
||||
}
|
||||
if _, ok := p.domainMap[domain]; !ok {
|
||||
return nil
|
||||
}
|
||||
if fs, ok := p.domainMap[domain]; ok {
|
||||
if data, err := fs.LoadResourceFile(domain, p.locale, name); err == nil {
|
||||
return data
|
||||
}
|
||||
if p.locale != "default" {
|
||||
if data, err := fs.LoadResourceFile(domain, "default", name); err == nil {
|
||||
return data
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (p *domainManager) bindDomainTranslators(domain, path string, data []byte) {
|
||||
if _, ok := p.domainMap[domain]; ok {
|
||||
p.deleteDomain(domain) // delete old domain
|
||||
}
|
||||
fs := newFileSystem(path, data)
|
||||
for locale, _ := range fs.LocaleMap {
|
||||
trMapKey := p.makeTrMapKey(domain, locale)
|
||||
if data, err := fs.LoadMessagesFile(domain, locale, ".mo"); err == nil {
|
||||
p.trTextMap[trMapKey], _ = newMoTranslator(
|
||||
fmt.Sprintf("%s_%s.mo", domain, locale),
|
||||
data,
|
||||
)
|
||||
continue
|
||||
}
|
||||
if data, err := fs.LoadMessagesFile(domain, locale, ".po"); err == nil {
|
||||
p.trTextMap[trMapKey], _ = newPoTranslator(
|
||||
fmt.Sprintf("%s_%s.po", domain, locale),
|
||||
data,
|
||||
)
|
||||
continue
|
||||
}
|
||||
p.trTextMap[p.makeTrMapKey(domain, locale)] = nilTranslator
|
||||
}
|
||||
p.domainMap[domain] = fs
|
||||
}
|
||||
|
||||
func (p *domainManager) deleteDomain(domain string) {
|
||||
if _, ok := p.domainMap[domain]; !ok {
|
||||
return
|
||||
}
|
||||
// delete all mo files
|
||||
trMapKeyPrefix := p.makeTrMapKey(domain, "")
|
||||
for k, _ := range p.trTextMap {
|
||||
if strings.HasPrefix(k, trMapKeyPrefix) {
|
||||
delete(p.trTextMap, k)
|
||||
}
|
||||
}
|
||||
delete(p.domainMap, domain)
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type fileSystem struct {
|
||||
FsName string
|
||||
FsRoot string
|
||||
FsZipData []byte
|
||||
LocaleMap map[string]bool
|
||||
}
|
||||
|
||||
func newFileSystem(path string, data []byte) *fileSystem {
|
||||
fs := &fileSystem{
|
||||
FsName: path,
|
||||
FsZipData: data,
|
||||
}
|
||||
if err := fs.init(); err != nil {
|
||||
log.Printf("gettext-go: invalid domain, err = %v", err)
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func (p *fileSystem) init() error {
|
||||
zipName := func(name string) string {
|
||||
if x := strings.LastIndexAny(name, `\/`); x != -1 {
|
||||
name = name[x+1:]
|
||||
}
|
||||
name = strings.TrimSuffix(name, ".zip")
|
||||
return name
|
||||
}
|
||||
|
||||
// zip data
|
||||
if len(p.FsZipData) != 0 {
|
||||
p.FsRoot = zipName(p.FsName)
|
||||
p.LocaleMap = p.lsZip(p.FsZipData)
|
||||
return nil
|
||||
}
|
||||
|
||||
// local dir or zip file
|
||||
fi, err := os.Stat(p.FsName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// local dir
|
||||
if fi.IsDir() {
|
||||
p.FsRoot = p.FsName
|
||||
p.LocaleMap = p.lsDir(p.FsName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// local zip file
|
||||
p.FsZipData, err = ioutil.ReadFile(p.FsName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.FsRoot = zipName(p.FsName)
|
||||
p.LocaleMap = p.lsZip(p.FsZipData)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *fileSystem) LoadMessagesFile(domain, local, ext string) ([]byte, error) {
|
||||
if len(p.FsZipData) == 0 {
|
||||
trName := p.makeMessagesFileName(domain, local, ext)
|
||||
rcData, err := ioutil.ReadFile(trName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rcData, nil
|
||||
} else {
|
||||
r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
trName := p.makeMessagesFileName(domain, local, ext)
|
||||
for _, f := range r.File {
|
||||
if f.Name != trName {
|
||||
continue
|
||||
}
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rcData, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
return rcData, err
|
||||
}
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *fileSystem) LoadResourceFile(domain, local, name string) ([]byte, error) {
|
||||
if len(p.FsZipData) == 0 {
|
||||
rcName := p.makeResourceFileName(domain, local, name)
|
||||
rcData, err := ioutil.ReadFile(rcName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rcData, nil
|
||||
} else {
|
||||
r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rcName := p.makeResourceFileName(domain, local, name)
|
||||
for _, f := range r.File {
|
||||
if f.Name != rcName {
|
||||
continue
|
||||
}
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rcData, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
return rcData, err
|
||||
}
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *fileSystem) makeMessagesFileName(domain, local, ext string) string {
|
||||
return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.FsRoot, local, domain, ext)
|
||||
}
|
||||
|
||||
func (p *fileSystem) makeResourceFileName(domain, local, name string) string {
|
||||
return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.FsRoot, local, domain, name)
|
||||
}
|
||||
|
||||
func (p *fileSystem) lsZip(data []byte) map[string]bool {
|
||||
r, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
ssMap := make(map[string]bool)
|
||||
for _, f := range r.File {
|
||||
if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 {
|
||||
s := strings.TrimRight(f.Name[:x], `\/`)
|
||||
if x = strings.LastIndexAny(s, `\/`); x != -1 {
|
||||
s = s[x+1:]
|
||||
}
|
||||
if s != "" {
|
||||
ssMap[s] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 {
|
||||
s := strings.TrimRight(f.Name[:x], `\/`)
|
||||
if x = strings.LastIndexAny(s, `\/`); x != -1 {
|
||||
s = s[x+1:]
|
||||
}
|
||||
if s != "" {
|
||||
ssMap[s] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return ssMap
|
||||
}
|
||||
|
||||
func (p *fileSystem) lsDir(path string) map[string]bool {
|
||||
list, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
ssMap := make(map[string]bool)
|
||||
for _, dir := range list {
|
||||
if dir.IsDir() {
|
||||
ssMap[dir.Name()] = true
|
||||
}
|
||||
}
|
||||
return ssMap
|
||||
}
|
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
var (
|
||||
defaultManager = newDomainManager()
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultLocale = getDefaultLocale() // use $(LC_MESSAGES) or $(LANG) or "default"
|
||||
)
|
||||
|
||||
// SetLocale sets and queries the program's current locale.
|
||||
//
|
||||
// If the locale is not empty string, set the new local.
|
||||
//
|
||||
// If the locale is empty string, don't change anything.
|
||||
//
|
||||
// Returns is the current locale.
|
||||
//
|
||||
// Examples:
|
||||
// SetLocale("") // get locale: return DefaultLocale
|
||||
// SetLocale("zh_CN") // set locale: return zh_CN
|
||||
// SetLocale("") // get locale: return zh_CN
|
||||
func SetLocale(locale string) string {
|
||||
return defaultManager.SetLocale(locale)
|
||||
}
|
||||
|
||||
// BindTextdomain sets and queries program's domains.
|
||||
//
|
||||
// If the domain and path are all not empty string, bind the new domain.
|
||||
// If the domain already exists, return error.
|
||||
//
|
||||
// If the domain is not empty string, but the path is the empty string,
|
||||
// delete the domain.
|
||||
// If the domain don't exists, return error.
|
||||
//
|
||||
// If the domain and the path are all empty string, don't change anything.
|
||||
//
|
||||
// Returns is the all bind domains.
|
||||
//
|
||||
// Examples:
|
||||
// BindTextdomain("poedit", "local", nil) // bind "poedit" domain
|
||||
// BindTextdomain("", "", nil) // return all domains
|
||||
// BindTextdomain("poedit", "", nil) // delete "poedit" domain
|
||||
// BindTextdomain("", "", nil) // return all domains
|
||||
//
|
||||
// Use zip file:
|
||||
// BindTextdomain("poedit", "local.zip", nil) // bind "poedit" domain
|
||||
// BindTextdomain("poedit", "local.zip", zipData) // bind "poedit" domain
|
||||
//
|
||||
func BindTextdomain(domain, path string, zipData []byte) (domains, paths []string) {
|
||||
return defaultManager.Bind(domain, path, zipData)
|
||||
}
|
||||
|
||||
// Textdomain sets and retrieves the current message domain.
|
||||
//
|
||||
// If the domain is not empty string, set the new domains.
|
||||
//
|
||||
// If the domain is empty string, don't change anything.
|
||||
//
|
||||
// Returns is the all used domains.
|
||||
//
|
||||
// Examples:
|
||||
// Textdomain("poedit") // set domain: poedit
|
||||
// Textdomain("") // get domain: return poedit
|
||||
func Textdomain(domain string) string {
|
||||
return defaultManager.SetDomain(domain)
|
||||
}
|
||||
|
||||
// Gettext attempt to translate a text string into the user's native language,
|
||||
// by looking up the translation in a message catalog.
|
||||
//
|
||||
// It use the caller's function name as the msgctxt.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.Gettext("Hello") // msgctxt is "some/package/name.Foo"
|
||||
// }
|
||||
func Gettext(msgid string) string {
|
||||
return PGettext(callerName(2), msgid)
|
||||
}
|
||||
|
||||
// Getdata attempt to translate a resource file into the user's native language,
|
||||
// by looking up the translation in a message catalog.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// Textdomain("hello")
|
||||
// BindTextdomain("hello", "local.zip", nilOrZipData)
|
||||
// poems := gettext.Getdata("poems.txt")
|
||||
// }
|
||||
func Getdata(name string) []byte {
|
||||
return defaultManager.Getdata(name)
|
||||
}
|
||||
|
||||
// NGettext attempt to translate a text string into the user's native language,
|
||||
// by looking up the appropriate plural form of the translation in a message
|
||||
// catalog.
|
||||
//
|
||||
// It use the caller's function name as the msgctxt.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.NGettext("%d people", "%d peoples", 2)
|
||||
// }
|
||||
func NGettext(msgid, msgidPlural string, n int) string {
|
||||
return PNGettext(callerName(2), msgid, msgidPlural, n)
|
||||
}
|
||||
|
||||
// PGettext attempt to translate a text string into the user's native language,
|
||||
// by looking up the translation in a message catalog.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.PGettext("gettext-go.example", "Hello") // msgctxt is "gettext-go.example"
|
||||
// }
|
||||
func PGettext(msgctxt, msgid string) string {
|
||||
return PNGettext(msgctxt, msgid, "", 0)
|
||||
}
|
||||
|
||||
// PNGettext attempt to translate a text string into the user's native language,
|
||||
// by looking up the appropriate plural form of the translation in a message
|
||||
// catalog.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.PNGettext("gettext-go.example", "%d people", "%d peoples", 2)
|
||||
// }
|
||||
func PNGettext(msgctxt, msgid, msgidPlural string, n int) string {
|
||||
return defaultManager.PNGettext(msgctxt, msgid, msgidPlural, n)
|
||||
}
|
||||
|
||||
// DGettext like Gettext(), but looking up the message in the specified domain.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.DGettext("poedit", "Hello")
|
||||
// }
|
||||
func DGettext(domain, msgid string) string {
|
||||
return DPGettext(domain, callerName(2), msgid)
|
||||
}
|
||||
|
||||
// DNGettext like NGettext(), but looking up the message in the specified domain.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.PNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2)
|
||||
// }
|
||||
func DNGettext(domain, msgid, msgidPlural string, n int) string {
|
||||
return DPNGettext(domain, callerName(2), msgid, msgidPlural, n)
|
||||
}
|
||||
|
||||
// DPGettext like PGettext(), but looking up the message in the specified domain.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.DPGettext("poedit", "gettext-go.example", "Hello")
|
||||
// }
|
||||
func DPGettext(domain, msgctxt, msgid string) string {
|
||||
return DPNGettext(domain, msgctxt, msgid, "", 0)
|
||||
}
|
||||
|
||||
// DPNGettext like PNGettext(), but looking up the message in the specified domain.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.DPNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2)
|
||||
// }
|
||||
func DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string {
|
||||
return defaultManager.DPNGettext(domain, msgctxt, msgid, msgidPlural, n)
|
||||
}
|
||||
|
||||
// DGetdata like Getdata(), but looking up the resource in the specified domain.
|
||||
//
|
||||
// Examples:
|
||||
// func Foo() {
|
||||
// msg := gettext.DGetdata("hello", "poems.txt")
|
||||
// }
|
||||
func DGetdata(domain, name string) []byte {
|
||||
return defaultManager.DGetdata(domain, name)
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getDefaultLocale() string {
|
||||
if v := os.Getenv("LC_MESSAGES"); v != "" {
|
||||
return simplifiedLocale(v)
|
||||
}
|
||||
if v := os.Getenv("LANG"); v != "" {
|
||||
return simplifiedLocale(v)
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
func simplifiedLocale(lang string) string {
|
||||
// en_US/en_US.UTF-8/zh_CN/zh_TW/el_GR@euro/...
|
||||
if idx := strings.Index(lang, ":"); idx != -1 {
|
||||
lang = lang[:idx]
|
||||
}
|
||||
if idx := strings.Index(lang, "@"); idx != -1 {
|
||||
lang = lang[:idx]
|
||||
}
|
||||
if idx := strings.Index(lang, "."); idx != -1 {
|
||||
lang = lang[:idx]
|
||||
}
|
||||
return strings.TrimSpace(lang)
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package mo provides support for reading and writing GNU MO file.
|
||||
|
||||
Examples:
|
||||
import (
|
||||
"github.com/chai2010/gettext-go/gettext/mo"
|
||||
)
|
||||
|
||||
func main() {
|
||||
moFile, err := mo.Load("test.mo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%v", moFile)
|
||||
}
|
||||
|
||||
GNU MO file struct:
|
||||
|
||||
byte
|
||||
+------------------------------------------+
|
||||
0 | magic number = 0x950412de |
|
||||
| |
|
||||
4 | file format revision = 0 |
|
||||
| |
|
||||
8 | number of strings | == N
|
||||
| |
|
||||
12 | offset of table with original strings | == O
|
||||
| |
|
||||
16 | offset of table with translation strings | == T
|
||||
| |
|
||||
20 | size of hashing table | == S
|
||||
| |
|
||||
24 | offset of hashing table | == H
|
||||
| |
|
||||
. .
|
||||
. (possibly more entries later) .
|
||||
. .
|
||||
| |
|
||||
O | length & offset 0th string ----------------.
|
||||
O + 8 | length & offset 1st string ------------------.
|
||||
... ... | |
|
||||
O + ((N-1)*8)| length & offset (N-1)th string | | |
|
||||
| | | |
|
||||
T | length & offset 0th translation ---------------.
|
||||
T + 8 | length & offset 1st translation -----------------.
|
||||
... ... | | | |
|
||||
T + ((N-1)*8)| length & offset (N-1)th translation | | | | |
|
||||
| | | | | |
|
||||
H | start hash table | | | | |
|
||||
... ... | | | |
|
||||
H + S * 4 | end hash table | | | | |
|
||||
| | | | | |
|
||||
| NUL terminated 0th string <----------------' | | |
|
||||
| | | | |
|
||||
| NUL terminated 1st string <------------------' | |
|
||||
| | | |
|
||||
... ... | |
|
||||
| | | |
|
||||
| NUL terminated 0th translation <---------------' |
|
||||
| | |
|
||||
| NUL terminated 1st translation <-----------------'
|
||||
| |
|
||||
... ...
|
||||
| |
|
||||
+------------------------------------------+
|
||||
|
||||
The GNU MO file specification is at
|
||||
http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html.
|
||||
*/
|
||||
package mo
|
|
@ -0,0 +1,124 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type moHeader struct {
|
||||
MagicNumber uint32
|
||||
MajorVersion uint16
|
||||
MinorVersion uint16
|
||||
MsgIdCount uint32
|
||||
MsgIdOffset uint32
|
||||
MsgStrOffset uint32
|
||||
HashSize uint32
|
||||
HashOffset uint32
|
||||
}
|
||||
|
||||
type moStrPos struct {
|
||||
Size uint32 // must keep fields order
|
||||
Addr uint32
|
||||
}
|
||||
|
||||
func encodeFile(f *File) []byte {
|
||||
hdr := &moHeader{
|
||||
MagicNumber: MoMagicLittleEndian,
|
||||
}
|
||||
data := encodeData(hdr, f)
|
||||
data = append(encodeHeader(hdr), data...)
|
||||
return data
|
||||
}
|
||||
|
||||
// encode data and init moHeader
|
||||
func encodeData(hdr *moHeader, f *File) []byte {
|
||||
msgList := []Message{f.MimeHeader.toMessage()}
|
||||
for _, v := range f.Messages {
|
||||
if len(v.MsgId) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(v.MsgStr) == 0 && len(v.MsgStrPlural) == 0 {
|
||||
continue
|
||||
}
|
||||
msgList = append(msgList, v)
|
||||
}
|
||||
sort.Sort(byMessages(msgList))
|
||||
|
||||
var buf bytes.Buffer
|
||||
var msgIdPosList = make([]moStrPos, len(msgList))
|
||||
var msgStrPosList = make([]moStrPos, len(msgList))
|
||||
for i, v := range msgList {
|
||||
// write msgid
|
||||
msgId := encodeMsgId(v)
|
||||
msgIdPosList[i].Addr = uint32(buf.Len() + MoHeaderSize)
|
||||
msgIdPosList[i].Size = uint32(len(msgId))
|
||||
buf.WriteString(msgId)
|
||||
// write msgstr
|
||||
msgStr := encodeMsgStr(v)
|
||||
msgStrPosList[i].Addr = uint32(buf.Len() + MoHeaderSize)
|
||||
msgStrPosList[i].Size = uint32(len(msgStr))
|
||||
buf.WriteString(msgStr)
|
||||
}
|
||||
|
||||
hdr.MsgIdOffset = uint32(buf.Len() + MoHeaderSize)
|
||||
binary.Write(&buf, binary.LittleEndian, msgIdPosList)
|
||||
hdr.MsgStrOffset = uint32(buf.Len() + MoHeaderSize)
|
||||
binary.Write(&buf, binary.LittleEndian, msgStrPosList)
|
||||
|
||||
hdr.MsgIdCount = uint32(len(msgList))
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// must called after encodeData
|
||||
func encodeHeader(hdr *moHeader) []byte {
|
||||
var buf bytes.Buffer
|
||||
binary.Write(&buf, binary.LittleEndian, hdr)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func encodeMsgId(v Message) string {
|
||||
if v.MsgContext != "" && v.MsgIdPlural != "" {
|
||||
return v.MsgContext + EotSeparator + v.MsgId + NulSeparator + v.MsgIdPlural
|
||||
}
|
||||
if v.MsgContext != "" && v.MsgIdPlural == "" {
|
||||
return v.MsgContext + EotSeparator + v.MsgId
|
||||
}
|
||||
if v.MsgContext == "" && v.MsgIdPlural != "" {
|
||||
return v.MsgId + NulSeparator + v.MsgIdPlural
|
||||
}
|
||||
return v.MsgId
|
||||
}
|
||||
|
||||
func encodeMsgStr(v Message) string {
|
||||
if v.MsgIdPlural != "" {
|
||||
return strings.Join(v.MsgStrPlural, NulSeparator)
|
||||
}
|
||||
return v.MsgStr
|
||||
}
|
||||
|
||||
type byMessages []Message
|
||||
|
||||
func (d byMessages) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
func (d byMessages) Less(i, j int) bool {
|
||||
if a, b := d[i].MsgContext, d[j].MsgContext; a != b {
|
||||
return a < b
|
||||
}
|
||||
if a, b := d[i].MsgId, d[j].MsgId; a != b {
|
||||
return a < b
|
||||
}
|
||||
if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b {
|
||||
return a < b
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (d byMessages) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
|
@ -0,0 +1,193 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
MoHeaderSize = 28
|
||||
MoMagicLittleEndian = 0x950412de
|
||||
MoMagicBigEndian = 0xde120495
|
||||
|
||||
EotSeparator = "\x04" // msgctxt and msgid separator
|
||||
NulSeparator = "\x00" // msgid and msgstr separator
|
||||
)
|
||||
|
||||
// File represents an MO File.
|
||||
//
|
||||
// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
|
||||
type File struct {
|
||||
MagicNumber uint32
|
||||
MajorVersion uint16
|
||||
MinorVersion uint16
|
||||
MsgIdCount uint32
|
||||
MsgIdOffset uint32
|
||||
MsgStrOffset uint32
|
||||
HashSize uint32
|
||||
HashOffset uint32
|
||||
MimeHeader Header
|
||||
Messages []Message
|
||||
}
|
||||
|
||||
// Load loads a named mo file.
|
||||
func Load(name string) (*File, error) {
|
||||
data, err := ioutil.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadData(data)
|
||||
}
|
||||
|
||||
// LoadData loads mo file format data.
|
||||
func LoadData(data []byte) (*File, error) {
|
||||
r := bytes.NewReader(data)
|
||||
|
||||
var magicNumber uint32
|
||||
if err := binary.Read(r, binary.LittleEndian, &magicNumber); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
var bo binary.ByteOrder
|
||||
switch magicNumber {
|
||||
case MoMagicLittleEndian:
|
||||
bo = binary.LittleEndian
|
||||
case MoMagicBigEndian:
|
||||
bo = binary.BigEndian
|
||||
default:
|
||||
return nil, fmt.Errorf("gettext: %v", "invalid magic number")
|
||||
}
|
||||
|
||||
var header struct {
|
||||
MajorVersion uint16
|
||||
MinorVersion uint16
|
||||
MsgIdCount uint32
|
||||
MsgIdOffset uint32
|
||||
MsgStrOffset uint32
|
||||
HashSize uint32
|
||||
HashOffset uint32
|
||||
}
|
||||
if err := binary.Read(r, bo, &header); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
if v := header.MajorVersion; v != 0 && v != 1 {
|
||||
return nil, fmt.Errorf("gettext: %v", "invalid version number")
|
||||
}
|
||||
if v := header.MinorVersion; v != 0 && v != 1 {
|
||||
return nil, fmt.Errorf("gettext: %v", "invalid version number")
|
||||
}
|
||||
|
||||
msgIdStart := make([]uint32, header.MsgIdCount)
|
||||
msgIdLen := make([]uint32, header.MsgIdCount)
|
||||
if _, err := r.Seek(int64(header.MsgIdOffset), 0); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
for i := 0; i < int(header.MsgIdCount); i++ {
|
||||
if err := binary.Read(r, bo, &msgIdLen[i]); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
if err := binary.Read(r, bo, &msgIdStart[i]); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
msgStrStart := make([]int32, header.MsgIdCount)
|
||||
msgStrLen := make([]int32, header.MsgIdCount)
|
||||
if _, err := r.Seek(int64(header.MsgStrOffset), 0); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
for i := 0; i < int(header.MsgIdCount); i++ {
|
||||
if err := binary.Read(r, bo, &msgStrLen[i]); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
if err := binary.Read(r, bo, &msgStrStart[i]); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
file := &File{
|
||||
MagicNumber: magicNumber,
|
||||
MajorVersion: header.MajorVersion,
|
||||
MinorVersion: header.MinorVersion,
|
||||
MsgIdCount: header.MsgIdCount,
|
||||
MsgIdOffset: header.MsgIdOffset,
|
||||
MsgStrOffset: header.MsgStrOffset,
|
||||
HashSize: header.HashSize,
|
||||
HashOffset: header.HashOffset,
|
||||
}
|
||||
for i := 0; i < int(header.MsgIdCount); i++ {
|
||||
if _, err := r.Seek(int64(msgIdStart[i]), 0); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
msgIdData := make([]byte, msgIdLen[i])
|
||||
if _, err := r.Read(msgIdData); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Seek(int64(msgStrStart[i]), 0); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
msgStrData := make([]byte, msgStrLen[i])
|
||||
if _, err := r.Read(msgStrData); err != nil {
|
||||
return nil, fmt.Errorf("gettext: %v", err)
|
||||
}
|
||||
|
||||
if len(msgIdData) == 0 {
|
||||
var msg = Message{
|
||||
MsgId: string(msgIdData),
|
||||
MsgStr: string(msgStrData),
|
||||
}
|
||||
file.MimeHeader.fromMessage(&msg)
|
||||
} else {
|
||||
var msg = Message{
|
||||
MsgId: string(msgIdData),
|
||||
MsgStr: string(msgStrData),
|
||||
}
|
||||
// Is this a context message?
|
||||
if idx := strings.Index(msg.MsgId, EotSeparator); idx != -1 {
|
||||
msg.MsgContext, msg.MsgId = msg.MsgId[:idx], msg.MsgId[idx+1:]
|
||||
}
|
||||
// Is this a plural message?
|
||||
if idx := strings.Index(msg.MsgId, NulSeparator); idx != -1 {
|
||||
msg.MsgId, msg.MsgIdPlural = msg.MsgId[:idx], msg.MsgId[idx+1:]
|
||||
msg.MsgStrPlural = strings.Split(msg.MsgStr, NulSeparator)
|
||||
msg.MsgStr = ""
|
||||
}
|
||||
file.Messages = append(file.Messages, msg)
|
||||
}
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Save saves a mo file.
|
||||
func (f *File) Save(name string) error {
|
||||
return ioutil.WriteFile(name, f.Data(), 0666)
|
||||
}
|
||||
|
||||
// Save returns a mo file format data.
|
||||
func (f *File) Data() []byte {
|
||||
return encodeFile(f)
|
||||
}
|
||||
|
||||
// String returns the po format file string.
|
||||
func (f *File) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "# version: %d.%d\n", f.MajorVersion, f.MinorVersion)
|
||||
fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String())
|
||||
fmt.Fprintf(&buf, "\n")
|
||||
|
||||
for k, v := range f.Messages {
|
||||
fmt.Fprintf(&buf, `msgid "%v"`+"\n", k)
|
||||
fmt.Fprintf(&buf, `msgstr "%s"`+"\n", v.MsgStr)
|
||||
fmt.Fprintf(&buf, "\n")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR"
|
||||
// and "FIRST AUTHOR <EMAIL@ADDRESS>, YEAR" ought to be replaced by sensible information.
|
||||
//
|
||||
// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry
|
||||
type Header struct {
|
||||
ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION
|
||||
ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR <EMAIL@ADDRESS>
|
||||
POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE
|
||||
PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
|
||||
LastTranslator string // Last-Translator: FIRST AUTHOR <EMAIL@ADDRESS>
|
||||
LanguageTeam string // Language-Team: golang-china
|
||||
Language string // Language: zh_CN
|
||||
MimeVersion string // MIME-Version: 1.0
|
||||
ContentType string // Content-Type: text/plain; charset=UTF-8
|
||||
ContentTransferEncoding string // Content-Transfer-Encoding: 8bit
|
||||
PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1;
|
||||
XGenerator string // X-Generator: Poedit 1.5.5
|
||||
UnknowFields map[string]string
|
||||
}
|
||||
|
||||
func (p *Header) fromMessage(msg *Message) {
|
||||
if msg.MsgId != "" || msg.MsgStr == "" {
|
||||
return
|
||||
}
|
||||
lines := strings.Split(msg.MsgStr, "\n")
|
||||
for i := 0; i < len(lines); i++ {
|
||||
idx := strings.Index(lines[i], ":")
|
||||
if idx < 0 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(lines[i][:idx])
|
||||
val := strings.TrimSpace(lines[i][idx+1:])
|
||||
switch strings.ToUpper(key) {
|
||||
case strings.ToUpper("Project-Id-Version"):
|
||||
p.ProjectIdVersion = val
|
||||
case strings.ToUpper("Report-Msgid-Bugs-To"):
|
||||
p.ReportMsgidBugsTo = val
|
||||
case strings.ToUpper("POT-Creation-Date"):
|
||||
p.POTCreationDate = val
|
||||
case strings.ToUpper("PO-Revision-Date"):
|
||||
p.PORevisionDate = val
|
||||
case strings.ToUpper("Last-Translator"):
|
||||
p.LastTranslator = val
|
||||
case strings.ToUpper("Language-Team"):
|
||||
p.LanguageTeam = val
|
||||
case strings.ToUpper("Language"):
|
||||
p.Language = val
|
||||
case strings.ToUpper("MIME-Version"):
|
||||
p.MimeVersion = val
|
||||
case strings.ToUpper("Content-Type"):
|
||||
p.ContentType = val
|
||||
case strings.ToUpper("Content-Transfer-Encoding"):
|
||||
p.ContentTransferEncoding = val
|
||||
case strings.ToUpper("Plural-Forms"):
|
||||
p.PluralForms = val
|
||||
case strings.ToUpper("X-Generator"):
|
||||
p.XGenerator = val
|
||||
default:
|
||||
if p.UnknowFields == nil {
|
||||
p.UnknowFields = make(map[string]string)
|
||||
}
|
||||
p.UnknowFields[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Header) toMessage() Message {
|
||||
return Message{
|
||||
MsgStr: p.String(),
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the po format header string.
|
||||
func (p Header) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, `msgid ""`+"\n")
|
||||
fmt.Fprintf(&buf, `msgstr ""`+"\n")
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language)
|
||||
if p.MimeVersion != "" {
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion)
|
||||
}
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding)
|
||||
if p.XGenerator != "" {
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator)
|
||||
}
|
||||
for k, v := range p.UnknowFields {
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A MO file is made up of many entries,
|
||||
// each entry holding the relation between an original untranslated string
|
||||
// and its corresponding translation.
|
||||
//
|
||||
// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
|
||||
type Message struct {
|
||||
MsgContext string // msgctxt context
|
||||
MsgId string // msgid untranslated-string
|
||||
MsgIdPlural string // msgid_plural untranslated-string-plural
|
||||
MsgStr string // msgstr translated-string
|
||||
MsgStrPlural []string // msgstr[0] translated-string-case-0
|
||||
}
|
||||
|
||||
// String returns the po format entry string.
|
||||
func (p Message) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId))
|
||||
if p.MsgIdPlural != "" {
|
||||
fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural))
|
||||
}
|
||||
if p.MsgStr != "" {
|
||||
fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr))
|
||||
}
|
||||
for i := 0; i < len(p.MsgStrPlural); i++ {
|
||||
fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i]))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func decodePoString(text string) string {
|
||||
lines := strings.Split(text, "\n")
|
||||
for i := 0; i < len(lines); i++ {
|
||||
left := strings.Index(lines[i], `"`)
|
||||
right := strings.LastIndex(lines[i], `"`)
|
||||
if left < 0 || right < 0 || left == right {
|
||||
lines[i] = ""
|
||||
continue
|
||||
}
|
||||
line := lines[i][left+1 : right]
|
||||
data := make([]byte, 0, len(line))
|
||||
for i := 0; i < len(line); i++ {
|
||||
if line[i] != '\\' {
|
||||
data = append(data, line[i])
|
||||
continue
|
||||
}
|
||||
if i+1 >= len(line) {
|
||||
break
|
||||
}
|
||||
switch line[i+1] {
|
||||
case 'n': // \\n -> \n
|
||||
data = append(data, '\n')
|
||||
i++
|
||||
case 't': // \\t -> \n
|
||||
data = append(data, '\t')
|
||||
i++
|
||||
case '\\': // \\\ -> ?
|
||||
data = append(data, '\\')
|
||||
i++
|
||||
}
|
||||
}
|
||||
lines[i] = string(data)
|
||||
}
|
||||
return strings.Join(lines, "")
|
||||
}
|
||||
|
||||
func encodePoString(text string) string {
|
||||
var buf bytes.Buffer
|
||||
lines := strings.Split(text, "\n")
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if lines[i] == "" {
|
||||
if i != len(lines)-1 {
|
||||
buf.WriteString(`"\n"` + "\n")
|
||||
}
|
||||
continue
|
||||
}
|
||||
buf.WriteRune('"')
|
||||
for _, r := range lines[i] {
|
||||
switch r {
|
||||
case '\\':
|
||||
buf.WriteString(`\\`)
|
||||
case '"':
|
||||
buf.WriteString(`\"`)
|
||||
case '\n':
|
||||
buf.WriteString(`\n`)
|
||||
case '\t':
|
||||
buf.WriteString(`\t`)
|
||||
default:
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
buf.WriteString(`\n"` + "\n")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func encodeCommentPoString(text string) string {
|
||||
var buf bytes.Buffer
|
||||
lines := strings.Split(text, "\n")
|
||||
if len(lines) > 1 {
|
||||
buf.WriteString(`""` + "\n")
|
||||
}
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if len(lines) > 0 {
|
||||
buf.WriteString("#| ")
|
||||
}
|
||||
buf.WriteRune('"')
|
||||
for _, r := range lines[i] {
|
||||
switch r {
|
||||
case '\\':
|
||||
buf.WriteString(`\\`)
|
||||
case '"':
|
||||
buf.WriteString(`\"`)
|
||||
case '\n':
|
||||
buf.WriteString(`\n`)
|
||||
case '\t':
|
||||
buf.WriteString(`\t`)
|
||||
default:
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
if i < len(lines)-1 {
|
||||
buf.WriteString(`\n"` + "\n")
|
||||
} else {
|
||||
buf.WriteString(`"`)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package plural provides standard plural formulas.
|
||||
|
||||
Examples:
|
||||
import (
|
||||
"code.google.com/p/gettext-go/gettext/plural"
|
||||
)
|
||||
|
||||
func main() {
|
||||
enFormula := plural.Formula("en_US")
|
||||
xxFormula := plural.Formula("zh_CN")
|
||||
|
||||
fmt.Printf("%s: %d\n", "en", enFormula(0))
|
||||
fmt.Printf("%s: %d\n", "en", enFormula(1))
|
||||
fmt.Printf("%s: %d\n", "en", enFormula(2))
|
||||
fmt.Printf("%s: %d\n", "??", xxFormula(0))
|
||||
fmt.Printf("%s: %d\n", "??", xxFormula(1))
|
||||
fmt.Printf("%s: %d\n", "??", xxFormula(2))
|
||||
fmt.Printf("%s: %d\n", "??", xxFormula(9))
|
||||
// Output:
|
||||
// en: 0
|
||||
// en: 0
|
||||
// en: 1
|
||||
// ??: 0
|
||||
// ??: 0
|
||||
// ??: 1
|
||||
// ??: 8
|
||||
}
|
||||
|
||||
See http://www.gnu.org/software/gettext/manual/html_node/Plural-forms.html
|
||||
*/
|
||||
package plural
|
|
@ -0,0 +1,181 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package plural
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Formula provides the language's standard plural formula.
|
||||
func Formula(lang string) func(n int) int {
|
||||
if idx := index(lang); idx != -1 {
|
||||
return formulaTable[fmtForms(FormsTable[idx].Value)]
|
||||
}
|
||||
if idx := index("??"); idx != -1 {
|
||||
return formulaTable[fmtForms(FormsTable[idx].Value)]
|
||||
}
|
||||
return func(n int) int {
|
||||
return n
|
||||
}
|
||||
}
|
||||
|
||||
func index(lang string) int {
|
||||
for i := 0; i < len(FormsTable); i++ {
|
||||
if strings.HasPrefix(lang, FormsTable[i].Lang) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func fmtForms(forms string) string {
|
||||
forms = strings.TrimSpace(forms)
|
||||
forms = strings.Replace(forms, " ", "", -1)
|
||||
return forms
|
||||
}
|
||||
|
||||
var formulaTable = map[string]func(n int) int{
|
||||
fmtForms("nplurals=n; plural=n-1;"): func(n int) int {
|
||||
if n > 0 {
|
||||
return n - 1
|
||||
}
|
||||
return 0
|
||||
},
|
||||
fmtForms("nplurals=1; plural=0;"): func(n int) int {
|
||||
return 0
|
||||
},
|
||||
fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int {
|
||||
if n <= 1 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
},
|
||||
fmtForms("nplurals=2; plural=(n > 1);"): func(n int) int {
|
||||
if n <= 1 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n != 0 {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"): func(n int) int {
|
||||
if n == 1 {
|
||||
return 0
|
||||
}
|
||||
if n == 2 {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"): func(n int) int {
|
||||
if n == 1 {
|
||||
return 0
|
||||
}
|
||||
if n == 0 || (n%100 > 0 && n%100 < 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n%10 == 1 && n%100 != 11 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int {
|
||||
if n == 1 {
|
||||
return 0
|
||||
}
|
||||
if n >= 2 && n <= 4 {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int {
|
||||
if n == 1 {
|
||||
return 0
|
||||
}
|
||||
if n >= 2 && n <= 4 {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int {
|
||||
if n == 1 {
|
||||
return 0
|
||||
}
|
||||
if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
},
|
||||
fmtForms("nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"): func(n int) int {
|
||||
if n%100 == 1 {
|
||||
return 0
|
||||
}
|
||||
if n%100 == 2 {
|
||||
return 1
|
||||
}
|
||||
if n%100 == 3 || n%100 == 4 {
|
||||
return 2
|
||||
}
|
||||
return 3
|
||||
},
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package plural
|
||||
|
||||
// FormsTable are standard hard-coded plural rules.
|
||||
// The application developers and the translators need to understand them.
|
||||
//
|
||||
// See GNU's gettext library source code: gettext/gettext-tools/src/plural-table.c
|
||||
var FormsTable = []struct {
|
||||
Lang string
|
||||
Language string
|
||||
Value string
|
||||
}{
|
||||
{"??", "Unknown", "nplurals=1; plural=0;"},
|
||||
{"ja", "Japanese", "nplurals=1; plural=0;"},
|
||||
{"vi", "Vietnamese", "nplurals=1; plural=0;"},
|
||||
{"ko", "Korean", "nplurals=1; plural=0;"},
|
||||
{"en", "English", "nplurals=2; plural=(n != 1);"},
|
||||
{"de", "German", "nplurals=2; plural=(n != 1);"},
|
||||
{"nl", "Dutch", "nplurals=2; plural=(n != 1);"},
|
||||
{"sv", "Swedish", "nplurals=2; plural=(n != 1);"},
|
||||
{"da", "Danish", "nplurals=2; plural=(n != 1);"},
|
||||
{"no", "Norwegian", "nplurals=2; plural=(n != 1);"},
|
||||
{"nb", "Norwegian Bokmal", "nplurals=2; plural=(n != 1);"},
|
||||
{"nn", "Norwegian Nynorsk", "nplurals=2; plural=(n != 1);"},
|
||||
{"fo", "Faroese", "nplurals=2; plural=(n != 1);"},
|
||||
{"es", "Spanish", "nplurals=2; plural=(n != 1);"},
|
||||
{"pt", "Portuguese", "nplurals=2; plural=(n != 1);"},
|
||||
{"it", "Italian", "nplurals=2; plural=(n != 1);"},
|
||||
{"bg", "Bulgarian", "nplurals=2; plural=(n != 1);"},
|
||||
{"el", "Greek", "nplurals=2; plural=(n != 1);"},
|
||||
{"fi", "Finnish", "nplurals=2; plural=(n != 1);"},
|
||||
{"et", "Estonian", "nplurals=2; plural=(n != 1);"},
|
||||
{"he", "Hebrew", "nplurals=2; plural=(n != 1);"},
|
||||
{"eo", "Esperanto", "nplurals=2; plural=(n != 1);"},
|
||||
{"hu", "Hungarian", "nplurals=2; plural=(n != 1);"},
|
||||
{"tr", "Turkish", "nplurals=2; plural=(n != 1);"},
|
||||
{"pt_BR", "Brazilian", "nplurals=2; plural=(n > 1);"},
|
||||
{"fr", "French", "nplurals=2; plural=(n > 1);"},
|
||||
{"lv", "Latvian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"},
|
||||
{"ga", "Irish", "nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"},
|
||||
{"ro", "Romanian", "nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"},
|
||||
{"lt", "Lithuanian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"ru", "Russian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"uk", "Ukrainian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"be", "Belarusian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"sr", "Serbian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"hr", "Croatian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"cs", "Czech", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"},
|
||||
{"sk", "Slovak", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"},
|
||||
{"pl", "Polish", "nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"},
|
||||
{"sl", "Slovenian", "nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"},
|
||||
}
|
|
@ -0,0 +1,270 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Comment represents every message's comments.
|
||||
type Comment struct {
|
||||
StartLine int // comment start line
|
||||
TranslatorComment string // # translator-comments // TrimSpace
|
||||
ExtractedComment string // #. extracted-comments
|
||||
ReferenceFile []string // #: src/msgcmp.c:338 src/po-lex.c:699
|
||||
ReferenceLine []int // #: src/msgcmp.c:338 src/po-lex.c:699
|
||||
Flags []string // #, fuzzy,c-format,range:0..10
|
||||
PrevMsgContext string // #| msgctxt previous-context
|
||||
PrevMsgId string // #| msgid previous-untranslated-string
|
||||
}
|
||||
|
||||
func (p *Comment) less(q *Comment) bool {
|
||||
if p.StartLine != 0 || q.StartLine != 0 {
|
||||
return p.StartLine < q.StartLine
|
||||
}
|
||||
if a, b := len(p.ReferenceFile), len(q.ReferenceFile); a != b {
|
||||
return a < b
|
||||
}
|
||||
for i := 0; i < len(p.ReferenceFile); i++ {
|
||||
if a, b := p.ReferenceFile[i], q.ReferenceFile[i]; a != b {
|
||||
return a < b
|
||||
}
|
||||
if a, b := p.ReferenceLine[i], q.ReferenceLine[i]; a != b {
|
||||
return a < b
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Comment) readPoComment(r *lineReader) (err error) {
|
||||
*p = Comment{}
|
||||
if err = r.skipBlankLine(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func(oldPos int) {
|
||||
newPos := r.currentPos()
|
||||
if newPos != oldPos && err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
}(r.currentPos())
|
||||
|
||||
p.StartLine = r.currentPos() + 1
|
||||
for {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if len(s) == 0 || s[0] != '#' {
|
||||
return
|
||||
}
|
||||
|
||||
if err = p.readTranslatorComment(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readExtractedComment(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readReferenceComment(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readFlagsComment(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readPrevMsgContext(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readPrevMsgId(r); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Comment) readTranslatorComment(r *lineReader) (err error) {
|
||||
const prefix = "# " // .,:|
|
||||
for {
|
||||
var s string
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) < 1 || s[0] != '#' {
|
||||
r.unreadLine()
|
||||
return nil
|
||||
}
|
||||
if len(s) >= 2 {
|
||||
switch s[1] {
|
||||
case '.', ',', ':', '|':
|
||||
r.unreadLine()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if p.TranslatorComment != "" {
|
||||
p.TranslatorComment += "\n"
|
||||
}
|
||||
p.TranslatorComment += strings.TrimSpace(s[1:])
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Comment) readExtractedComment(r *lineReader) (err error) {
|
||||
const prefix = "#."
|
||||
for {
|
||||
var s string
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) < len(prefix) || s[:len(prefix)] != prefix {
|
||||
r.unreadLine()
|
||||
return nil
|
||||
}
|
||||
if p.ExtractedComment != "" {
|
||||
p.ExtractedComment += "\n"
|
||||
}
|
||||
p.ExtractedComment += strings.TrimSpace(s[len(prefix):])
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Comment) readReferenceComment(r *lineReader) (err error) {
|
||||
const prefix = "#:"
|
||||
for {
|
||||
var s string
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) < len(prefix) || s[:len(prefix)] != prefix {
|
||||
r.unreadLine()
|
||||
return nil
|
||||
}
|
||||
ss := strings.Split(strings.TrimSpace(s[len(prefix):]), " ")
|
||||
for i := 0; i < len(ss); i++ {
|
||||
idx := strings.Index(ss[i], ":")
|
||||
if idx <= 0 {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimSpace(ss[i][:idx])
|
||||
line, _ := strconv.Atoi(strings.TrimSpace(ss[i][idx+1:]))
|
||||
p.ReferenceFile = append(p.ReferenceFile, name)
|
||||
p.ReferenceLine = append(p.ReferenceLine, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Comment) readFlagsComment(r *lineReader) (err error) {
|
||||
const prefix = "#,"
|
||||
for {
|
||||
var s string
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) < len(prefix) || s[:len(prefix)] != prefix {
|
||||
r.unreadLine()
|
||||
return nil
|
||||
}
|
||||
ss := strings.Split(strings.TrimSpace(s[len(prefix):]), ",")
|
||||
for i := 0; i < len(ss); i++ {
|
||||
p.Flags = append(p.Flags, strings.TrimSpace(ss[i]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Comment) readPrevMsgContext(r *lineReader) (err error) {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !rePrevMsgContextComments.MatchString(s) {
|
||||
return
|
||||
}
|
||||
p.PrevMsgContext, err = p.readString(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Comment) readPrevMsgId(r *lineReader) (err error) {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !rePrevMsgIdComments.MatchString(s) {
|
||||
return
|
||||
}
|
||||
p.PrevMsgId, err = p.readString(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Comment) readString(r *lineReader) (msg string, err error) {
|
||||
var s string
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return
|
||||
}
|
||||
msg += decodePoString(s)
|
||||
for {
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !reStringLineComments.MatchString(s) {
|
||||
r.unreadLine()
|
||||
break
|
||||
}
|
||||
msg += decodePoString(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetFuzzy gets the fuzzy flag.
|
||||
func (p *Comment) GetFuzzy() bool {
|
||||
for _, s := range p.Flags {
|
||||
if s == "fuzzy" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetFuzzy sets the fuzzy flag.
|
||||
func (p *Comment) SetFuzzy(fuzzy bool) {
|
||||
//
|
||||
}
|
||||
|
||||
// String returns the po format comment string.
|
||||
func (p Comment) String() string {
|
||||
var buf bytes.Buffer
|
||||
if p.TranslatorComment != "" {
|
||||
ss := strings.Split(p.TranslatorComment, "\n")
|
||||
for i := 0; i < len(ss); i++ {
|
||||
fmt.Fprintf(&buf, "# %s\n", ss[i])
|
||||
}
|
||||
}
|
||||
if p.ExtractedComment != "" {
|
||||
ss := strings.Split(p.ExtractedComment, "\n")
|
||||
for i := 0; i < len(ss); i++ {
|
||||
fmt.Fprintf(&buf, "#. %s\n", ss[i])
|
||||
}
|
||||
}
|
||||
if a, b := len(p.ReferenceFile), len(p.ReferenceLine); a != 0 && a == b {
|
||||
fmt.Fprintf(&buf, "#:")
|
||||
for i := 0; i < len(p.ReferenceFile); i++ {
|
||||
fmt.Fprintf(&buf, " %s:%d", p.ReferenceFile[i], p.ReferenceLine[i])
|
||||
}
|
||||
fmt.Fprintf(&buf, "\n")
|
||||
}
|
||||
if len(p.Flags) != 0 {
|
||||
fmt.Fprintf(&buf, "#, %s", p.Flags[0])
|
||||
for i := 1; i < len(p.Flags); i++ {
|
||||
fmt.Fprintf(&buf, ", %s", p.Flags[i])
|
||||
}
|
||||
fmt.Fprintf(&buf, "\n")
|
||||
}
|
||||
if p.PrevMsgContext != "" {
|
||||
s := encodeCommentPoString(p.PrevMsgContext)
|
||||
fmt.Fprintf(&buf, "#| msgctxt %s\n", s)
|
||||
}
|
||||
if p.PrevMsgId != "" {
|
||||
s := encodeCommentPoString(p.PrevMsgId)
|
||||
fmt.Fprintf(&buf, "#| msgid %s\n", s)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package po provides support for reading and writing GNU PO file.
|
||||
|
||||
Examples:
|
||||
import (
|
||||
"github.com/chai2010/gettext-go/gettext/po"
|
||||
)
|
||||
|
||||
func main() {
|
||||
poFile, err := po.Load("test.po")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%v", poFile)
|
||||
}
|
||||
|
||||
The GNU PO file specification is at
|
||||
http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html.
|
||||
*/
|
||||
package po
|
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// File represents an PO File.
|
||||
//
|
||||
// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html
|
||||
type File struct {
|
||||
MimeHeader Header
|
||||
Messages []Message
|
||||
}
|
||||
|
||||
// Load loads a named po file.
|
||||
func Load(name string) (*File, error) {
|
||||
data, err := ioutil.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadData(data)
|
||||
}
|
||||
|
||||
// LoadData loads po file format data.
|
||||
func LoadData(data []byte) (*File, error) {
|
||||
r := newLineReader(string(data))
|
||||
var file File
|
||||
for {
|
||||
var msg Message
|
||||
if err := msg.readPoEntry(r); err != nil {
|
||||
if err == io.EOF {
|
||||
return &file, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if msg.MsgId == "" {
|
||||
file.MimeHeader.parseHeader(&msg)
|
||||
continue
|
||||
}
|
||||
file.Messages = append(file.Messages, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Save saves a po file.
|
||||
func (f *File) Save(name string) error {
|
||||
return ioutil.WriteFile(name, []byte(f.String()), 0666)
|
||||
}
|
||||
|
||||
// Save returns a po file format data.
|
||||
func (f *File) Data() []byte {
|
||||
// sort the massge as ReferenceFile/ReferenceLine field
|
||||
var messages []Message
|
||||
messages = append(messages, f.Messages...)
|
||||
sort.Sort(byMessages(messages))
|
||||
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String())
|
||||
for i := 0; i < len(messages); i++ {
|
||||
fmt.Fprintf(&buf, "%s\n", messages[i].String())
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// String returns the po format file string.
|
||||
func (f *File) String() string {
|
||||
return string(f.Data())
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR"
|
||||
// and "FIRST AUTHOR <EMAIL@ADDRESS>, YEAR" ought to be replaced by sensible information.
|
||||
//
|
||||
// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry
|
||||
type Header struct {
|
||||
Comment // Header Comments
|
||||
ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION
|
||||
ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR <EMAIL@ADDRESS>
|
||||
POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE
|
||||
PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
|
||||
LastTranslator string // Last-Translator: FIRST AUTHOR <EMAIL@ADDRESS>
|
||||
LanguageTeam string // Language-Team: golang-china
|
||||
Language string // Language: zh_CN
|
||||
MimeVersion string // MIME-Version: 1.0
|
||||
ContentType string // Content-Type: text/plain; charset=UTF-8
|
||||
ContentTransferEncoding string // Content-Transfer-Encoding: 8bit
|
||||
PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1;
|
||||
XGenerator string // X-Generator: Poedit 1.5.5
|
||||
UnknowFields map[string]string
|
||||
}
|
||||
|
||||
func (p *Header) parseHeader(msg *Message) {
|
||||
if msg.MsgId != "" || msg.MsgStr == "" {
|
||||
return
|
||||
}
|
||||
lines := strings.Split(msg.MsgStr, "\n")
|
||||
for i := 0; i < len(lines); i++ {
|
||||
idx := strings.Index(lines[i], ":")
|
||||
if idx < 0 {
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(lines[i][:idx])
|
||||
val := strings.TrimSpace(lines[i][idx+1:])
|
||||
switch strings.ToUpper(key) {
|
||||
case strings.ToUpper("Project-Id-Version"):
|
||||
p.ProjectIdVersion = val
|
||||
case strings.ToUpper("Report-Msgid-Bugs-To"):
|
||||
p.ReportMsgidBugsTo = val
|
||||
case strings.ToUpper("POT-Creation-Date"):
|
||||
p.POTCreationDate = val
|
||||
case strings.ToUpper("PO-Revision-Date"):
|
||||
p.PORevisionDate = val
|
||||
case strings.ToUpper("Last-Translator"):
|
||||
p.LastTranslator = val
|
||||
case strings.ToUpper("Language-Team"):
|
||||
p.LanguageTeam = val
|
||||
case strings.ToUpper("Language"):
|
||||
p.Language = val
|
||||
case strings.ToUpper("MIME-Version"):
|
||||
p.MimeVersion = val
|
||||
case strings.ToUpper("Content-Type"):
|
||||
p.ContentType = val
|
||||
case strings.ToUpper("Content-Transfer-Encoding"):
|
||||
p.ContentTransferEncoding = val
|
||||
case strings.ToUpper("Plural-Forms"):
|
||||
p.PluralForms = val
|
||||
case strings.ToUpper("X-Generator"):
|
||||
p.XGenerator = val
|
||||
default:
|
||||
if p.UnknowFields == nil {
|
||||
p.UnknowFields = make(map[string]string)
|
||||
}
|
||||
p.UnknowFields[key] = val
|
||||
}
|
||||
}
|
||||
p.Comment = msg.Comment
|
||||
}
|
||||
|
||||
// String returns the po format header string.
|
||||
func (p Header) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "%s", p.Comment.String())
|
||||
fmt.Fprintf(&buf, `msgid ""`+"\n")
|
||||
fmt.Fprintf(&buf, `msgstr ""`+"\n")
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language)
|
||||
if p.MimeVersion != "" {
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion)
|
||||
}
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType)
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding)
|
||||
if p.XGenerator != "" {
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator)
|
||||
}
|
||||
for k, v := range p.UnknowFields {
|
||||
fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type lineReader struct {
|
||||
lines []string
|
||||
pos int
|
||||
}
|
||||
|
||||
func newLineReader(data string) *lineReader {
|
||||
data = strings.Replace(data, "\r", "", -1)
|
||||
lines := strings.Split(data, "\n")
|
||||
return &lineReader{lines: lines}
|
||||
}
|
||||
|
||||
func (r *lineReader) skipBlankLine() error {
|
||||
for ; r.pos < len(r.lines); r.pos++ {
|
||||
if strings.TrimSpace(r.lines[r.pos]) != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if r.pos >= len(r.lines) {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *lineReader) currentPos() int {
|
||||
return r.pos
|
||||
}
|
||||
|
||||
func (r *lineReader) currentLine() (s string, pos int, err error) {
|
||||
if r.pos >= len(r.lines) {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
s, pos = r.lines[r.pos], r.pos
|
||||
return
|
||||
}
|
||||
|
||||
func (r *lineReader) readLine() (s string, pos int, err error) {
|
||||
if r.pos >= len(r.lines) {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
s, pos = r.lines[r.pos], r.pos
|
||||
r.pos++
|
||||
return
|
||||
}
|
||||
|
||||
func (r *lineReader) unreadLine() {
|
||||
if r.pos >= 0 {
|
||||
r.pos--
|
||||
}
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A PO file is made up of many entries,
|
||||
// each entry holding the relation between an original untranslated string
|
||||
// and its corresponding translation.
|
||||
//
|
||||
// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html
|
||||
type Message struct {
|
||||
Comment // Coments
|
||||
MsgContext string // msgctxt context
|
||||
MsgId string // msgid untranslated-string
|
||||
MsgIdPlural string // msgid_plural untranslated-string-plural
|
||||
MsgStr string // msgstr translated-string
|
||||
MsgStrPlural []string // msgstr[0] translated-string-case-0
|
||||
}
|
||||
|
||||
type byMessages []Message
|
||||
|
||||
func (d byMessages) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
func (d byMessages) Less(i, j int) bool {
|
||||
if d[i].Comment.less(&d[j].Comment) {
|
||||
return true
|
||||
}
|
||||
if a, b := d[i].MsgContext, d[j].MsgContext; a != b {
|
||||
return a < b
|
||||
}
|
||||
if a, b := d[i].MsgId, d[j].MsgId; a != b {
|
||||
return a < b
|
||||
}
|
||||
if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b {
|
||||
return a < b
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (d byMessages) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
|
||||
func (p *Message) readPoEntry(r *lineReader) (err error) {
|
||||
*p = Message{}
|
||||
if err = r.skipBlankLine(); err != nil {
|
||||
return
|
||||
}
|
||||
defer func(oldPos int) {
|
||||
newPos := r.currentPos()
|
||||
if newPos != oldPos && err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
}(r.currentPos())
|
||||
|
||||
if err = p.Comment.readPoComment(r); err != nil {
|
||||
return
|
||||
}
|
||||
for {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if p.isInvalidLine(s) {
|
||||
err = fmt.Errorf("gettext: line %d, %v", r.currentPos(), "invalid line")
|
||||
return
|
||||
}
|
||||
if reComment.MatchString(s) || reBlankLine.MatchString(s) {
|
||||
return
|
||||
}
|
||||
|
||||
if err = p.readMsgContext(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readMsgId(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readMsgIdPlural(r); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.readMsgStrOrPlural(r); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Message) readMsgContext(r *lineReader) (err error) {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !reMsgContext.MatchString(s) {
|
||||
return
|
||||
}
|
||||
p.MsgContext, err = p.readString(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Message) readMsgId(r *lineReader) (err error) {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !reMsgId.MatchString(s) {
|
||||
return
|
||||
}
|
||||
p.MsgId, err = p.readString(r)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Message) readMsgIdPlural(r *lineReader) (err error) {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !reMsgIdPlural.MatchString(s) {
|
||||
return
|
||||
}
|
||||
p.MsgIdPlural, err = p.readString(r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Message) readMsgStrOrPlural(r *lineReader) (err error) {
|
||||
var s string
|
||||
if s, _, err = r.currentLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !reMsgStr.MatchString(s) && !reMsgStrPlural.MatchString(s) {
|
||||
return
|
||||
}
|
||||
if reMsgStrPlural.MatchString(s) {
|
||||
left, right := strings.Index(s, `[`), strings.LastIndex(s, `]`)
|
||||
idx, _ := strconv.Atoi(s[left+1 : right])
|
||||
s, err = p.readString(r)
|
||||
if n := len(p.MsgStrPlural); (idx + 1) > n {
|
||||
p.MsgStrPlural = append(p.MsgStrPlural, make([]string, (idx+1)-n)...)
|
||||
}
|
||||
p.MsgStrPlural[idx] = s
|
||||
} else {
|
||||
p.MsgStr, err = p.readString(r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Message) readString(r *lineReader) (msg string, err error) {
|
||||
var s string
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return
|
||||
}
|
||||
msg += decodePoString(s)
|
||||
for {
|
||||
if s, _, err = r.readLine(); err != nil {
|
||||
return
|
||||
}
|
||||
if !reStringLine.MatchString(s) {
|
||||
r.unreadLine()
|
||||
break
|
||||
}
|
||||
msg += decodePoString(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the po format entry string.
|
||||
func (p Message) String() string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "%s", p.Comment.String())
|
||||
fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId))
|
||||
if p.MsgIdPlural != "" {
|
||||
fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural))
|
||||
}
|
||||
if p.MsgStr != "" {
|
||||
fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr))
|
||||
}
|
||||
for i := 0; i < len(p.MsgStrPlural); i++ {
|
||||
fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i]))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
reComment = regexp.MustCompile(`^#`) // #
|
||||
reExtractedComments = regexp.MustCompile(`^#\.`) // #.
|
||||
reReferenceComments = regexp.MustCompile(`^#:`) // #:
|
||||
reFlagsComments = regexp.MustCompile(`^#,`) // #, fuzzy,c-format
|
||||
rePrevMsgContextComments = regexp.MustCompile(`^#\|\s+msgctxt`) // #| msgctxt
|
||||
rePrevMsgIdComments = regexp.MustCompile(`^#\|\s+msgid`) // #| msgid
|
||||
reStringLineComments = regexp.MustCompile(`^#\|\s+".*"\s*$`) // #| "message"
|
||||
|
||||
reMsgContext = regexp.MustCompile(`^msgctxt\s+".*"\s*$`) // msgctxt
|
||||
reMsgId = regexp.MustCompile(`^msgid\s+".*"\s*$`) // msgid
|
||||
reMsgIdPlural = regexp.MustCompile(`^msgid_plural\s+".*"\s*$`) // msgid_plural
|
||||
reMsgStr = regexp.MustCompile(`^msgstr\s*".*"\s*$`) // msgstr
|
||||
reMsgStrPlural = regexp.MustCompile(`^msgstr\s*(\[\d+\])\s*".*"\s*$`) // msgstr[0]
|
||||
reStringLine = regexp.MustCompile(`^\s*".*"\s*$`) // "message"
|
||||
reBlankLine = regexp.MustCompile(`^\s*$`) //
|
||||
)
|
||||
|
||||
func (p *Message) isInvalidLine(s string) bool {
|
||||
if reComment.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
if reBlankLine.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
|
||||
if reMsgContext.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
if reMsgId.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
if reMsgIdPlural.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
if reMsgStr.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
if reMsgStrPlural.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
|
||||
if reStringLine.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package po
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func decodePoString(text string) string {
|
||||
lines := strings.Split(text, "\n")
|
||||
for i := 0; i < len(lines); i++ {
|
||||
left := strings.Index(lines[i], `"`)
|
||||
right := strings.LastIndex(lines[i], `"`)
|
||||
if left < 0 || right < 0 || left == right {
|
||||
lines[i] = ""
|
||||
continue
|
||||
}
|
||||
line := lines[i][left+1 : right]
|
||||
data := make([]byte, 0, len(line))
|
||||
for i := 0; i < len(line); i++ {
|
||||
if line[i] != '\\' {
|
||||
data = append(data, line[i])
|
||||
continue
|
||||
}
|
||||
if i+1 >= len(line) {
|
||||
break
|
||||
}
|
||||
switch line[i+1] {
|
||||
case 'n': // \\n -> \n
|
||||
data = append(data, '\n')
|
||||
i++
|
||||
case 't': // \\t -> \n
|
||||
data = append(data, '\t')
|
||||
i++
|
||||
case '\\': // \\\ -> ?
|
||||
data = append(data, '\\')
|
||||
i++
|
||||
}
|
||||
}
|
||||
lines[i] = string(data)
|
||||
}
|
||||
return strings.Join(lines, "")
|
||||
}
|
||||
|
||||
func encodePoString(text string) string {
|
||||
var buf bytes.Buffer
|
||||
lines := strings.Split(text, "\n")
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if lines[i] == "" {
|
||||
if i != len(lines)-1 {
|
||||
buf.WriteString(`"\n"` + "\n")
|
||||
}
|
||||
continue
|
||||
}
|
||||
buf.WriteRune('"')
|
||||
for _, r := range lines[i] {
|
||||
switch r {
|
||||
case '\\':
|
||||
buf.WriteString(`\\`)
|
||||
case '"':
|
||||
buf.WriteString(`\"`)
|
||||
case '\n':
|
||||
buf.WriteString(`\n`)
|
||||
case '\t':
|
||||
buf.WriteString(`\t`)
|
||||
default:
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
buf.WriteString(`\n"` + "\n")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func encodeCommentPoString(text string) string {
|
||||
var buf bytes.Buffer
|
||||
lines := strings.Split(text, "\n")
|
||||
if len(lines) > 1 {
|
||||
buf.WriteString(`""` + "\n")
|
||||
}
|
||||
for i := 0; i < len(lines); i++ {
|
||||
if len(lines) > 0 {
|
||||
buf.WriteString("#| ")
|
||||
}
|
||||
buf.WriteRune('"')
|
||||
for _, r := range lines[i] {
|
||||
switch r {
|
||||
case '\\':
|
||||
buf.WriteString(`\\`)
|
||||
case '"':
|
||||
buf.WriteString(`\"`)
|
||||
case '\n':
|
||||
buf.WriteString(`\n`)
|
||||
case '\t':
|
||||
buf.WriteString(`\t`)
|
||||
default:
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
if i < len(lines)-1 {
|
||||
buf.WriteString(`\n"` + "\n")
|
||||
} else {
|
||||
buf.WriteString(`"`)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2013 ChaiShushan <chaishushan{AT}gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gettext
|
||||
|
||||
import (
|
||||
"github.com/chai2010/gettext-go/gettext/mo"
|
||||
"github.com/chai2010/gettext-go/gettext/plural"
|
||||
"github.com/chai2010/gettext-go/gettext/po"
|
||||
)
|
||||
|
||||
var nilTranslator = &translator{
|
||||
MessageMap: make(map[string]mo.Message),
|
||||
PluralFormula: plural.Formula("??"),
|
||||
}
|
||||
|
||||
type translator struct {
|
||||
MessageMap map[string]mo.Message
|
||||
PluralFormula func(n int) int
|
||||
}
|
||||
|
||||
func newMoTranslator(name string, data []byte) (*translator, error) {
|
||||
var (
|
||||
f *mo.File
|
||||
err error
|
||||
)
|
||||
if len(data) != 0 {
|
||||
f, err = mo.LoadData(data)
|
||||
} else {
|
||||
f, err = mo.Load(name)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tr = &translator{
|
||||
MessageMap: make(map[string]mo.Message),
|
||||
}
|
||||
for _, v := range f.Messages {
|
||||
tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = v
|
||||
}
|
||||
if lang := f.MimeHeader.Language; lang != "" {
|
||||
tr.PluralFormula = plural.Formula(lang)
|
||||
} else {
|
||||
tr.PluralFormula = plural.Formula("??")
|
||||
}
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
func newPoTranslator(name string, data []byte) (*translator, error) {
|
||||
var (
|
||||
f *po.File
|
||||
err error
|
||||
)
|
||||
if len(data) != 0 {
|
||||
f, err = po.LoadData(data)
|
||||
} else {
|
||||
f, err = po.Load(name)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tr = &translator{
|
||||
MessageMap: make(map[string]mo.Message),
|
||||
}
|
||||
for _, v := range f.Messages {
|
||||
tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{
|
||||
MsgContext: v.MsgContext,
|
||||
MsgId: v.MsgId,
|
||||
MsgIdPlural: v.MsgIdPlural,
|
||||
MsgStr: v.MsgStr,
|
||||
MsgStrPlural: v.MsgStrPlural,
|
||||
}
|
||||
}
|
||||
if lang := f.MimeHeader.Language; lang != "" {
|
||||
tr.PluralFormula = plural.Formula(lang)
|
||||
} else {
|
||||
tr.PluralFormula = plural.Formula("??")
|
||||
}
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
func (p *translator) PGettext(msgctxt, msgid string) string {
|
||||
return p.PNGettext(msgctxt, msgid, "", 0)
|
||||
}
|
||||
|
||||
func (p *translator) PNGettext(msgctxt, msgid, msgidPlural string, n int) string {
|
||||
n = p.PluralFormula(n)
|
||||
if ss := p.findMsgStrPlural(msgctxt, msgid, msgidPlural); len(ss) != 0 {
|
||||
if n >= len(ss) {
|
||||
n = len(ss) - 1
|
||||
}
|
||||
if ss[n] != "" {
|
||||
return ss[n]
|
||||
}
|
||||
}
|
||||
if msgidPlural != "" && n > 0 {
|
||||
return msgidPlural
|
||||
}
|
||||
return msgid
|
||||
}
|
||||
|
||||
func (p *translator) findMsgStrPlural(msgctxt, msgid, msgidPlural string) []string {
|
||||
key := p.makeMapKey(msgctxt, msgid)
|
||||
if v, ok := p.MessageMap[key]; ok {
|
||||
if len(v.MsgIdPlural) != 0 {
|
||||
if len(v.MsgStrPlural) != 0 {
|
||||
return v.MsgStrPlural
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if len(v.MsgStr) != 0 {
|
||||
return []string{v.MsgStr}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *translator) makeMapKey(msgctxt, msgid string) string {
|
||||
if msgctxt != "" {
|
||||
return msgctxt + mo.EotSeparator + msgid
|
||||
}
|
||||
return msgid
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
|
@ -0,0 +1,5 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- tip
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Exponent Labs LLC
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,66 @@
|
|||
[](https://godoc.org/github.com/exponent-io/jsonpath)
|
||||
[](https://travis-ci.org/exponent-io/jsonpath)
|
||||
|
||||
# jsonpath
|
||||
|
||||
This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
|
||||
|
||||
This Decoder has the following enhancements...
|
||||
* The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
|
||||
* The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
|
||||
* The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
|
||||
* The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u github.com/exponent-io/jsonpath
|
||||
|
||||
## Example Usage
|
||||
|
||||
#### SeekTo
|
||||
|
||||
```go
|
||||
import "github.com/exponent-io/jsonpath"
|
||||
|
||||
var j = []byte(`[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
||||
]`)
|
||||
|
||||
w := json.NewDecoder(bytes.NewReader(j))
|
||||
var v interface{}
|
||||
|
||||
w.SeekTo(1, "Point", "G")
|
||||
w.Decode(&v) // v is 218
|
||||
```
|
||||
|
||||
#### Scan with PathActions
|
||||
|
||||
```go
|
||||
var j = []byte(`{"colors":[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
|
||||
]}`)
|
||||
|
||||
var actions PathActions
|
||||
|
||||
// Extract the value at Point.A
|
||||
actions.Add(func(d *Decoder) error {
|
||||
var alpha int
|
||||
err := d.Decode(&alpha)
|
||||
fmt.Printf("Alpha: %v\n", alpha)
|
||||
return err
|
||||
}, "Point", "A")
|
||||
|
||||
w := NewDecoder(bytes.NewReader(j))
|
||||
w.SeekTo("colors", 0)
|
||||
|
||||
var ok = true
|
||||
var err error
|
||||
for ok {
|
||||
ok, err = w.Scan(&actions)
|
||||
if err != nil && err != io.EOF {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
|
@ -0,0 +1,210 @@
|
|||
package jsonpath
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
|
||||
type KeyString string
|
||||
|
||||
// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
|
||||
type Decoder struct {
|
||||
json.Decoder
|
||||
|
||||
path JsonPath
|
||||
context jsonContext
|
||||
}
|
||||
|
||||
// NewDecoder creates a new instance of the extended JSON Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{Decoder: *json.NewDecoder(r)}
|
||||
}
|
||||
|
||||
// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
|
||||
//
|
||||
// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
//
|
||||
// Consider the JSON structure
|
||||
//
|
||||
// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
|
||||
//
|
||||
// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
|
||||
// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
|
||||
// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
|
||||
//
|
||||
// SeekTo returns a boolean value indicating whether a match was found.
|
||||
//
|
||||
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
|
||||
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
|
||||
|
||||
if len(path) == 0 {
|
||||
return len(d.path) == 0, nil
|
||||
}
|
||||
last := len(path) - 1
|
||||
if i, ok := path[last].(int); ok {
|
||||
path[last] = i - 1
|
||||
}
|
||||
|
||||
for {
|
||||
if d.path.Equal(path) {
|
||||
return true, nil
|
||||
}
|
||||
_, err := d.Token()
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
|
||||
// equivalent to encoding/json.Decode().
|
||||
func (d *Decoder) Decode(v interface{}) error {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return d.Decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
|
||||
// position of the most-recently parsed token.
|
||||
func (d *Decoder) Path() JsonPath {
|
||||
p := make(JsonPath, len(d.path))
|
||||
copy(p, d.path)
|
||||
return p
|
||||
}
|
||||
|
||||
// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
|
||||
// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
|
||||
// KeyString rather than as a native string.
|
||||
func (d *Decoder) Token() (json.Token, error) {
|
||||
t, err := d.Decoder.Token()
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
switch t := t.(type) {
|
||||
case json.Delim:
|
||||
switch t {
|
||||
case json.Delim('{'):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push("")
|
||||
d.context = objKey
|
||||
break
|
||||
case json.Delim('}'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
case json.Delim('['):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push(-1)
|
||||
d.context = arrValue
|
||||
break
|
||||
case json.Delim(']'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
}
|
||||
case float64, json.Number, bool:
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
break
|
||||
case string:
|
||||
switch d.context {
|
||||
case objKey:
|
||||
d.path.nameTop(t)
|
||||
d.context = objValue
|
||||
return KeyString(t), err
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
|
||||
// invoking each matching PathAction along the way.
|
||||
//
|
||||
// Scan returns true if there are more contiguous values to scan (for example in an array).
|
||||
func (d *Decoder) Scan(ext *PathActions) (bool, error) {
|
||||
|
||||
rootPath := d.Path()
|
||||
|
||||
// If this is an array path, increment the root path in our local copy.
|
||||
if rootPath.inferContext() == arrValue {
|
||||
rootPath.incTop()
|
||||
}
|
||||
|
||||
for {
|
||||
// advance the token position
|
||||
_, err := d.Token()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match:
|
||||
var relPath JsonPath
|
||||
|
||||
// capture the new JSON path
|
||||
path := d.Path()
|
||||
|
||||
if len(path) > len(rootPath) {
|
||||
// capture the path relative to where the scan started
|
||||
relPath = path[len(rootPath):]
|
||||
} else {
|
||||
// if the path is not longer than the root, then we are done with this scan
|
||||
// return boolean flag indicating if there are more items to scan at the same level
|
||||
return d.Decoder.More(), nil
|
||||
}
|
||||
|
||||
// match the relative path against the path actions
|
||||
if node := ext.node.match(relPath); node != nil {
|
||||
if node.action != nil {
|
||||
// we have a match so execute the action
|
||||
err = node.action(d)
|
||||
if err != nil {
|
||||
return d.Decoder.More(), err
|
||||
}
|
||||
// The action may have advanced the decoder. If we are in an array, advancing it further would
|
||||
// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
|
||||
if d.path.inferContext() == arrValue && d.Decoder.More() {
|
||||
goto match
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
|
||||
package jsonpath
|
||||
|
||||
import "fmt"
|
||||
|
||||
type jsonContext int
|
||||
|
||||
const (
|
||||
none jsonContext = iota
|
||||
objKey
|
||||
objValue
|
||||
arrValue
|
||||
)
|
||||
|
||||
// AnyIndex can be used in a pattern to match any array index.
|
||||
const AnyIndex = -2
|
||||
|
||||
// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
type JsonPath []interface{}
|
||||
|
||||
func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
|
||||
func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
|
||||
|
||||
// increment the index at the top of the stack (must be an array index)
|
||||
func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
|
||||
|
||||
// name the key at the top of the stack (must be an object key)
|
||||
func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
|
||||
|
||||
// infer the context from the item at the top of the stack
|
||||
func (p *JsonPath) inferContext() jsonContext {
|
||||
if len(*p) == 0 {
|
||||
return none
|
||||
}
|
||||
t := (*p)[len(*p)-1]
|
||||
switch t.(type) {
|
||||
case string:
|
||||
return objKey
|
||||
case int:
|
||||
return arrValue
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid stack type %T", t))
|
||||
}
|
||||
}
|
||||
|
||||
// Equal tests for equality between two JsonPath types.
|
||||
func (p *JsonPath) Equal(o JsonPath) bool {
|
||||
if len(*p) != len(o) {
|
||||
return false
|
||||
}
|
||||
for i, v := range *p {
|
||||
if v != o[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *JsonPath) HasPrefix(o JsonPath) bool {
|
||||
for i, v := range o {
|
||||
if v != (*p)[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package jsonpath
|
||||
|
||||
// pathNode is used to construct a trie of paths to be matched
|
||||
type pathNode struct {
|
||||
matchOn interface{} // string, or integer
|
||||
childNodes []pathNode
|
||||
action DecodeAction
|
||||
}
|
||||
|
||||
// match climbs the trie to find a node that matches the given JSON path.
|
||||
func (n *pathNode) match(path JsonPath) *pathNode {
|
||||
var node *pathNode = n
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
|
||||
// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
|
||||
type PathActions struct {
|
||||
node pathNode
|
||||
}
|
||||
|
||||
// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
|
||||
type DecodeAction func(d *Decoder) error
|
||||
|
||||
// Add specifies an action to call on the Decoder when the specified path is encountered.
|
||||
func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
|
||||
|
||||
var node *pathNode = &je.node
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
|
||||
node = &node.childNodes[len(node.childNodes)-1]
|
||||
}
|
||||
}
|
||||
node.action = action
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
_testmain.go
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
|
@ -0,0 +1,17 @@
|
|||
The MIT License (MIT)
|
||||
Copyright (c) 2015 Frits van Bommel
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,5 @@
|
|||
# sortorder [](https://pkg.go.dev/github.com/fvbommel/sortorder)
|
||||
|
||||
import "github.com/fvbommel/sortorder"
|
||||
|
||||
Sort orders and comparison functions.
|
|
@ -0,0 +1,5 @@
|
|||
// Package sortorder implements sort orders and comparison functions.
|
||||
//
|
||||
// Currently, it only implements so-called "natural order", where integers
|
||||
// embedded in strings are compared by value.
|
||||
package sortorder
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/fvbommel/sortorder
|
||||
|
||||
go 1.13
|
|
@ -0,0 +1,76 @@
|
|||
package sortorder
|
||||
|
||||
// Natural implements sort.Interface to sort strings in natural order. This
|
||||
// means that e.g. "abc2" < "abc12".
|
||||
//
|
||||
// Non-digit sequences and numbers are compared separately. The former are
|
||||
// compared bytewise, while the latter are compared numerically (except that
|
||||
// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02")
|
||||
//
|
||||
// Limitation: only ASCII digits (0-9) are considered.
|
||||
type Natural []string
|
||||
|
||||
func (n Natural) Len() int { return len(n) }
|
||||
func (n Natural) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
|
||||
func (n Natural) Less(i, j int) bool { return NaturalLess(n[i], n[j]) }
|
||||
|
||||
func isdigit(b byte) bool { return '0' <= b && b <= '9' }
|
||||
|
||||
// NaturalLess compares two strings using natural ordering. This means that e.g.
|
||||
// "abc2" < "abc12".
|
||||
//
|
||||
// Non-digit sequences and numbers are compared separately. The former are
|
||||
// compared bytewise, while the latter are compared numerically (except that
|
||||
// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02")
|
||||
//
|
||||
// Limitation: only ASCII digits (0-9) are considered.
|
||||
func NaturalLess(str1, str2 string) bool {
|
||||
idx1, idx2 := 0, 0
|
||||
for idx1 < len(str1) && idx2 < len(str2) {
|
||||
c1, c2 := str1[idx1], str2[idx2]
|
||||
dig1, dig2 := isdigit(c1), isdigit(c2)
|
||||
switch {
|
||||
case dig1 != dig2: // Digits before other characters.
|
||||
return dig1 // True if LHS is a digit, false if the RHS is one.
|
||||
case !dig1: // && !dig2, because dig1 == dig2
|
||||
// UTF-8 compares bytewise-lexicographically, no need to decode
|
||||
// codepoints.
|
||||
if c1 != c2 {
|
||||
return c1 < c2
|
||||
}
|
||||
idx1++
|
||||
idx2++
|
||||
default: // Digits
|
||||
// Eat zeros.
|
||||
for ; idx1 < len(str1) && str1[idx1] == '0'; idx1++ {
|
||||
}
|
||||
for ; idx2 < len(str2) && str2[idx2] == '0'; idx2++ {
|
||||
}
|
||||
// Eat all digits.
|
||||
nonZero1, nonZero2 := idx1, idx2
|
||||
for ; idx1 < len(str1) && isdigit(str1[idx1]); idx1++ {
|
||||
}
|
||||
for ; idx2 < len(str2) && isdigit(str2[idx2]); idx2++ {
|
||||
}
|
||||
// If lengths of numbers with non-zero prefix differ, the shorter
|
||||
// one is less.
|
||||
if len1, len2 := idx1-nonZero1, idx2-nonZero2; len1 != len2 {
|
||||
return len1 < len2
|
||||
}
|
||||
// If they're equal, string comparison is correct.
|
||||
if nr1, nr2 := str1[nonZero1:idx1], str2[nonZero2:idx2]; nr1 != nr2 {
|
||||
return nr1 < nr2
|
||||
}
|
||||
// Otherwise, the one with less zeros is less.
|
||||
// Because everything up to the number is equal, comparing the index
|
||||
// after the zeros is sufficient.
|
||||
if nonZero1 != nonZero2 {
|
||||
return nonZero1 < nonZero2
|
||||
}
|
||||
}
|
||||
// They're identical so far, so continue comparing.
|
||||
}
|
||||
// So far they are identical. At least one is ended. If the other continues,
|
||||
// it sorts last.
|
||||
return len(str1) < len(str2)
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- "1.8.x"
|
||||
- "1.10.x"
|
|
@ -0,0 +1,7 @@
|
|||
Copyright (c) 2015 Conrad Irwin <conrad@bugsnag.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,66 @@
|
|||
go-errors/errors
|
||||
================
|
||||
|
||||
[](https://travis-ci.org/go-errors/errors)
|
||||
|
||||
Package errors adds stacktrace support to errors in go.
|
||||
|
||||
This is particularly useful when you want to understand the state of execution
|
||||
when an error was returned unexpectedly.
|
||||
|
||||
It provides the type \*Error which implements the standard golang error
|
||||
interface, so you can use this library interchangably with code that is
|
||||
expecting a normal error return.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Full documentation is available on
|
||||
[godoc](https://godoc.org/github.com/go-errors/errors), but here's a simple
|
||||
example:
|
||||
|
||||
```go
|
||||
package crashy
|
||||
|
||||
import "github.com/go-errors/errors"
|
||||
|
||||
var Crashed = errors.Errorf("oh dear")
|
||||
|
||||
func Crash() error {
|
||||
return errors.New(Crashed)
|
||||
}
|
||||
```
|
||||
|
||||
This can be called as follows:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"crashy"
|
||||
"fmt"
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := crashy.Crash()
|
||||
if err != nil {
|
||||
if errors.Is(err, crashy.Crashed) {
|
||||
fmt.Println(err.(*errors.Error).ErrorStack())
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Meta-fu
|
||||
-------
|
||||
|
||||
This package was original written to allow reporting to
|
||||
[Bugsnag](https://bugsnag.com/) from
|
||||
[bugsnag-go](https://github.com/bugsnag/bugsnag-go), but after I found similar
|
||||
packages by Facebook and Dropbox, it was moved to one canonical location so
|
||||
everyone can benefit.
|
||||
|
||||
This package is licensed under the MIT license, see LICENSE.MIT for details.
|
|
@ -0,0 +1,89 @@
|
|||
mode: set
|
||||
github.com/go-errors/errors/stackframe.go:27.51,30.25 2 1
|
||||
github.com/go-errors/errors/stackframe.go:33.2,38.8 3 1
|
||||
github.com/go-errors/errors/stackframe.go:30.25,32.3 1 0
|
||||
github.com/go-errors/errors/stackframe.go:43.47,44.31 1 1
|
||||
github.com/go-errors/errors/stackframe.go:47.2,47.48 1 1
|
||||
github.com/go-errors/errors/stackframe.go:44.31,46.3 1 1
|
||||
github.com/go-errors/errors/stackframe.go:52.42,56.16 3 1
|
||||
github.com/go-errors/errors/stackframe.go:60.2,60.60 1 1
|
||||
github.com/go-errors/errors/stackframe.go:56.16,58.3 1 0
|
||||
github.com/go-errors/errors/stackframe.go:64.55,67.16 2 1
|
||||
github.com/go-errors/errors/stackframe.go:71.2,72.61 2 1
|
||||
github.com/go-errors/errors/stackframe.go:76.2,76.66 1 1
|
||||
github.com/go-errors/errors/stackframe.go:67.16,69.3 1 0
|
||||
github.com/go-errors/errors/stackframe.go:72.61,74.3 1 0
|
||||
github.com/go-errors/errors/stackframe.go:79.56,91.63 3 1
|
||||
github.com/go-errors/errors/stackframe.go:95.2,95.53 1 1
|
||||
github.com/go-errors/errors/stackframe.go:100.2,101.18 2 1
|
||||
github.com/go-errors/errors/stackframe.go:91.63,94.3 2 1
|
||||
github.com/go-errors/errors/stackframe.go:95.53,98.3 2 1
|
||||
github.com/go-errors/errors/error.go:70.32,73.23 2 1
|
||||
github.com/go-errors/errors/error.go:80.2,85.3 3 1
|
||||
github.com/go-errors/errors/error.go:74.2,75.10 1 1
|
||||
github.com/go-errors/errors/error.go:76.2,77.28 1 1
|
||||
github.com/go-errors/errors/error.go:92.43,95.23 2 1
|
||||
github.com/go-errors/errors/error.go:104.2,109.3 3 1
|
||||
github.com/go-errors/errors/error.go:96.2,97.11 1 1
|
||||
github.com/go-errors/errors/error.go:98.2,99.10 1 1
|
||||
github.com/go-errors/errors/error.go:100.2,101.28 1 1
|
||||
github.com/go-errors/errors/error.go:115.39,117.19 1 1
|
||||
github.com/go-errors/errors/error.go:121.2,121.29 1 1
|
||||
github.com/go-errors/errors/error.go:125.2,125.43 1 1
|
||||
github.com/go-errors/errors/error.go:129.2,129.14 1 1
|
||||
github.com/go-errors/errors/error.go:117.19,119.3 1 1
|
||||
github.com/go-errors/errors/error.go:121.29,123.3 1 1
|
||||
github.com/go-errors/errors/error.go:125.43,127.3 1 1
|
||||
github.com/go-errors/errors/error.go:135.53,137.2 1 1
|
||||
github.com/go-errors/errors/error.go:140.34,142.2 1 1
|
||||
github.com/go-errors/errors/error.go:146.34,149.42 2 1
|
||||
github.com/go-errors/errors/error.go:153.2,153.20 1 1
|
||||
github.com/go-errors/errors/error.go:149.42,151.3 1 1
|
||||
github.com/go-errors/errors/error.go:158.39,160.2 1 1
|
||||
github.com/go-errors/errors/error.go:164.46,165.23 1 1
|
||||
github.com/go-errors/errors/error.go:173.2,173.19 1 1
|
||||
github.com/go-errors/errors/error.go:165.23,168.32 2 1
|
||||
github.com/go-errors/errors/error.go:168.32,170.4 1 1
|
||||
github.com/go-errors/errors/error.go:177.37,178.42 1 1
|
||||
github.com/go-errors/errors/error.go:181.2,181.41 1 1
|
||||
github.com/go-errors/errors/error.go:178.42,180.3 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:10.39,12.2 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:16.46,24.34 5 1
|
||||
github.com/go-errors/errors/parse_panic.go:70.2,70.43 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:73.2,73.55 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:24.34,27.23 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:27.23,28.42 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:28.42,31.5 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:31.6,33.5 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:35.5,35.29 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:35.29,36.86 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:36.86,38.5 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:40.5,40.32 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:40.32,41.18 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:45.4,46.46 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:51.4,53.23 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:57.4,58.18 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:62.4,63.17 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:41.18,43.10 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:46.46,49.5 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:53.23,55.5 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:58.18,60.5 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:63.17,65.10 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:70.43,72.3 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:80.85,82.29 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:85.2,85.15 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:88.2,90.63 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:94.2,94.53 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:99.2,101.36 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:105.2,106.15 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:109.2,112.49 3 1
|
||||
github.com/go-errors/errors/parse_panic.go:116.2,117.16 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:121.2,126.8 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:82.29,84.3 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:85.15,87.3 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:90.63,93.3 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:94.53,97.3 2 1
|
||||
github.com/go-errors/errors/parse_panic.go:101.36,103.3 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:106.15,108.3 1 0
|
||||
github.com/go-errors/errors/parse_panic.go:112.49,114.3 1 1
|
||||
github.com/go-errors/errors/parse_panic.go:117.16,119.3 1 0
|
|
@ -0,0 +1,217 @@
|
|||
// Package errors provides errors that have stack-traces.
|
||||
//
|
||||
// This is particularly useful when you want to understand the
|
||||
// state of execution when an error was returned unexpectedly.
|
||||
//
|
||||
// It provides the type *Error which implements the standard
|
||||
// golang error interface, so you can use this library interchangably
|
||||
// with code that is expecting a normal error return.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// package crashy
|
||||
//
|
||||
// import "github.com/go-errors/errors"
|
||||
//
|
||||
// var Crashed = errors.Errorf("oh dear")
|
||||
//
|
||||
// func Crash() error {
|
||||
// return errors.New(Crashed)
|
||||
// }
|
||||
//
|
||||
// This can be called as follows:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "crashy"
|
||||
// "fmt"
|
||||
// "github.com/go-errors/errors"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// err := crashy.Crash()
|
||||
// if err != nil {
|
||||
// if errors.Is(err, crashy.Crashed) {
|
||||
// fmt.Println(err.(*errors.Error).ErrorStack())
|
||||
// } else {
|
||||
// panic(err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// This package was original written to allow reporting to Bugsnag,
|
||||
// but after I found similar packages by Facebook and Dropbox, it
|
||||
// was moved to one canonical location so everyone can benefit.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// The maximum number of stackframes on any error.
|
||||
var MaxStackDepth = 50
|
||||
|
||||
// Error is an error with an attached stacktrace. It can be used
|
||||
// wherever the builtin error interface is expected.
|
||||
type Error struct {
|
||||
Err error
|
||||
stack []uintptr
|
||||
frames []StackFrame
|
||||
prefix string
|
||||
}
|
||||
|
||||
// New makes an Error from the given value. If that value is already an
|
||||
// error then it will be used directly, if not, it will be passed to
|
||||
// fmt.Errorf("%v"). The stacktrace will point to the line of code that
|
||||
// called New.
|
||||
func New(e interface{}) *Error {
|
||||
var err error
|
||||
|
||||
switch e := e.(type) {
|
||||
case error:
|
||||
err = e
|
||||
default:
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
|
||||
stack := make([]uintptr, MaxStackDepth)
|
||||
length := runtime.Callers(2, stack[:])
|
||||
return &Error{
|
||||
Err: err,
|
||||
stack: stack[:length],
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap makes an Error from the given value. If that value is already an
|
||||
// error then it will be used directly, if not, it will be passed to
|
||||
// fmt.Errorf("%v"). The skip parameter indicates how far up the stack
|
||||
// to start the stacktrace. 0 is from the current call, 1 from its caller, etc.
|
||||
func Wrap(e interface{}, skip int) *Error {
|
||||
var err error
|
||||
|
||||
switch e := e.(type) {
|
||||
case *Error:
|
||||
return e
|
||||
case error:
|
||||
err = e
|
||||
default:
|
||||
err = fmt.Errorf("%v", e)
|
||||
}
|
||||
|
||||
stack := make([]uintptr, MaxStackDepth)
|
||||
length := runtime.Callers(2+skip, stack[:])
|
||||
return &Error{
|
||||
Err: err,
|
||||
stack: stack[:length],
|
||||
}
|
||||
}
|
||||
|
||||
// WrapPrefix makes an Error from the given value. If that value is already an
|
||||
// error then it will be used directly, if not, it will be passed to
|
||||
// fmt.Errorf("%v"). The prefix parameter is used to add a prefix to the
|
||||
// error message when calling Error(). The skip parameter indicates how far
|
||||
// up the stack to start the stacktrace. 0 is from the current call,
|
||||
// 1 from its caller, etc.
|
||||
func WrapPrefix(e interface{}, prefix string, skip int) *Error {
|
||||
|
||||
err := Wrap(e, 1+skip)
|
||||
|
||||
if err.prefix != "" {
|
||||
prefix = fmt.Sprintf("%s: %s", prefix, err.prefix)
|
||||
}
|
||||
|
||||
return &Error{
|
||||
Err: err.Err,
|
||||
stack: err.stack,
|
||||
prefix: prefix,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Is detects whether the error is equal to a given error. Errors
|
||||
// are considered equal by this function if they are the same object,
|
||||
// or if they both contain the same error inside an errors.Error.
|
||||
func Is(e error, original error) bool {
|
||||
|
||||
if e == original {
|
||||
return true
|
||||
}
|
||||
|
||||
if e, ok := e.(*Error); ok {
|
||||
return Is(e.Err, original)
|
||||
}
|
||||
|
||||
if original, ok := original.(*Error); ok {
|
||||
return Is(e, original.Err)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Errorf creates a new error with the given message. You can use it
|
||||
// as a drop-in replacement for fmt.Errorf() to provide descriptive
|
||||
// errors in return values.
|
||||
func Errorf(format string, a ...interface{}) *Error {
|
||||
return Wrap(fmt.Errorf(format, a...), 1)
|
||||
}
|
||||
|
||||
// Error returns the underlying error's message.
|
||||
func (err *Error) Error() string {
|
||||
|
||||
msg := err.Err.Error()
|
||||
if err.prefix != "" {
|
||||
msg = fmt.Sprintf("%s: %s", err.prefix, msg)
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
// Stack returns the callstack formatted the same way that go does
|
||||
// in runtime/debug.Stack()
|
||||
func (err *Error) Stack() []byte {
|
||||
buf := bytes.Buffer{}
|
||||
|
||||
for _, frame := range err.StackFrames() {
|
||||
buf.WriteString(frame.String())
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Callers satisfies the bugsnag ErrorWithCallerS() interface
|
||||
// so that the stack can be read out.
|
||||
func (err *Error) Callers() []uintptr {
|
||||
return err.stack
|
||||
}
|
||||
|
||||
// ErrorStack returns a string that contains both the
|
||||
// error message and the callstack.
|
||||
func (err *Error) ErrorStack() string {
|
||||
return err.TypeName() + " " + err.Error() + "\n" + string(err.Stack())
|
||||
}
|
||||
|
||||
// StackFrames returns an array of frames containing information about the
|
||||
// stack.
|
||||
func (err *Error) StackFrames() []StackFrame {
|
||||
if err.frames == nil {
|
||||
err.frames = make([]StackFrame, len(err.stack))
|
||||
|
||||
for i, pc := range err.stack {
|
||||
err.frames[i] = NewStackFrame(pc)
|
||||
}
|
||||
}
|
||||
|
||||
return err.frames
|
||||
}
|
||||
|
||||
// TypeName returns the type this error. e.g. *errors.stringError.
|
||||
func (err *Error) TypeName() string {
|
||||
if _, ok := err.Err.(uncaughtPanic); ok {
|
||||
return "panic"
|
||||
}
|
||||
return reflect.TypeOf(err.Err).String()
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type uncaughtPanic struct{ message string }
|
||||
|
||||
func (p uncaughtPanic) Error() string {
|
||||
return p.message
|
||||
}
|
||||
|
||||
// ParsePanic allows you to get an error object from the output of a go program
|
||||
// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap.
|
||||
func ParsePanic(text string) (*Error, error) {
|
||||
lines := strings.Split(text, "\n")
|
||||
|
||||
state := "start"
|
||||
|
||||
var message string
|
||||
var stack []StackFrame
|
||||
|
||||
for i := 0; i < len(lines); i++ {
|
||||
line := lines[i]
|
||||
|
||||
if state == "start" {
|
||||
if strings.HasPrefix(line, "panic: ") {
|
||||
message = strings.TrimPrefix(line, "panic: ")
|
||||
state = "seek"
|
||||
} else {
|
||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line)
|
||||
}
|
||||
|
||||
} else if state == "seek" {
|
||||
if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") {
|
||||
state = "parsing"
|
||||
}
|
||||
|
||||
} else if state == "parsing" {
|
||||
if line == "" {
|
||||
state = "done"
|
||||
break
|
||||
}
|
||||
createdBy := false
|
||||
if strings.HasPrefix(line, "created by ") {
|
||||
line = strings.TrimPrefix(line, "created by ")
|
||||
createdBy = true
|
||||
}
|
||||
|
||||
i++
|
||||
|
||||
if i >= len(lines) {
|
||||
return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line)
|
||||
}
|
||||
|
||||
frame, err := parsePanicFrame(line, lines[i], createdBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stack = append(stack, *frame)
|
||||
if createdBy {
|
||||
state = "done"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if state == "done" || state == "parsing" {
|
||||
return &Error{Err: uncaughtPanic{message}, frames: stack}, nil
|
||||
}
|
||||
return nil, Errorf("could not parse panic: %v", text)
|
||||
}
|
||||
|
||||
// The lines we're passing look like this:
|
||||
//
|
||||
// main.(*foo).destruct(0xc208067e98)
|
||||
// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151
|
||||
func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) {
|
||||
idx := strings.LastIndex(name, "(")
|
||||
if idx == -1 && !createdBy {
|
||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name)
|
||||
}
|
||||
if idx != -1 {
|
||||
name = name[:idx]
|
||||
}
|
||||
pkg := ""
|
||||
|
||||
if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
|
||||
pkg += name[:lastslash] + "/"
|
||||
name = name[lastslash+1:]
|
||||
}
|
||||
if period := strings.Index(name, "."); period >= 0 {
|
||||
pkg += name[:period]
|
||||
name = name[period+1:]
|
||||
}
|
||||
|
||||
name = strings.Replace(name, "·", ".", -1)
|
||||
|
||||
if !strings.HasPrefix(line, "\t") {
|
||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line)
|
||||
}
|
||||
|
||||
idx = strings.LastIndex(line, ":")
|
||||
if idx == -1 {
|
||||
return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line)
|
||||
}
|
||||
file := line[1:idx]
|
||||
|
||||
number := line[idx+1:]
|
||||
if idx = strings.Index(number, " +"); idx > -1 {
|
||||
number = number[:idx]
|
||||
}
|
||||
|
||||
lno, err := strconv.ParseInt(number, 10, 32)
|
||||
if err != nil {
|
||||
return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line)
|
||||
}
|
||||
|
||||
return &StackFrame{
|
||||
File: file,
|
||||
LineNumber: int(lno),
|
||||
Package: pkg,
|
||||
Name: name,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A StackFrame contains all necessary information about to generate a line
|
||||
// in a callstack.
|
||||
type StackFrame struct {
|
||||
// The path to the file containing this ProgramCounter
|
||||
File string
|
||||
// The LineNumber in that file
|
||||
LineNumber int
|
||||
// The Name of the function that contains this ProgramCounter
|
||||
Name string
|
||||
// The Package that contains this function
|
||||
Package string
|
||||
// The underlying ProgramCounter
|
||||
ProgramCounter uintptr
|
||||
}
|
||||
|
||||
// NewStackFrame popoulates a stack frame object from the program counter.
|
||||
func NewStackFrame(pc uintptr) (frame StackFrame) {
|
||||
|
||||
frame = StackFrame{ProgramCounter: pc}
|
||||
if frame.Func() == nil {
|
||||
return
|
||||
}
|
||||
frame.Package, frame.Name = packageAndName(frame.Func())
|
||||
|
||||
// pc -1 because the program counters we use are usually return addresses,
|
||||
// and we want to show the line that corresponds to the function call
|
||||
frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1)
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Func returns the function that contained this frame.
|
||||
func (frame *StackFrame) Func() *runtime.Func {
|
||||
if frame.ProgramCounter == 0 {
|
||||
return nil
|
||||
}
|
||||
return runtime.FuncForPC(frame.ProgramCounter)
|
||||
}
|
||||
|
||||
// String returns the stackframe formatted in the same way as go does
|
||||
// in runtime/debug.Stack()
|
||||
func (frame *StackFrame) String() string {
|
||||
str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter)
|
||||
|
||||
source, err := frame.SourceLine()
|
||||
if err != nil {
|
||||
return str
|
||||
}
|
||||
|
||||
return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source)
|
||||
}
|
||||
|
||||
// SourceLine gets the line of code (from File and Line) of the original source if possible.
|
||||
func (frame *StackFrame) SourceLine() (string, error) {
|
||||
data, err := ioutil.ReadFile(frame.File)
|
||||
|
||||
if err != nil {
|
||||
return "", New(err)
|
||||
}
|
||||
|
||||
lines := bytes.Split(data, []byte{'\n'})
|
||||
if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) {
|
||||
return "???", nil
|
||||
}
|
||||
// -1 because line-numbers are 1 based, but our array is 0 based
|
||||
return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil
|
||||
}
|
||||
|
||||
func packageAndName(fn *runtime.Func) (string, string) {
|
||||
name := fn.Name()
|
||||
pkg := ""
|
||||
|
||||
// The name includes the path name to the package, which is unnecessary
|
||||
// since the file name is already included. Plus, it has center dots.
|
||||
// That is, we see
|
||||
// runtime/debug.*T·ptrmethod
|
||||
// and want
|
||||
// *T.ptrmethod
|
||||
// Since the package path might contains dots (e.g. code.google.com/...),
|
||||
// we first remove the path prefix if there is one.
|
||||
if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
|
||||
pkg += name[:lastslash] + "/"
|
||||
name = name[lastslash+1:]
|
||||
}
|
||||
if period := strings.Index(name, "."); period >= 0 {
|
||||
pkg += name[:period]
|
||||
name = name[period+1:]
|
||||
}
|
||||
|
||||
name = strings.Replace(name, "·", ".", -1)
|
||||
return pkg, name
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
language: go
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,12 @@
|
|||
# BTree implementation for Go
|
||||
|
||||

|
||||
|
||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
||||
an ordered, mutable data structure.
|
||||
|
||||
The API is based off of the wonderful
|
||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
||||
act as a drop-in replacement for gollrb trees.
|
||||
|
||||
See http://godoc.org/github.com/google/btree for documentation.
|
|
@ -0,0 +1,890 @@
|
|||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
// It is not meant for persistent storage solutions.
|
||||
//
|
||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
// which in some cases yields better memory usage and/or performance.
|
||||
// See some discussion on the matter here:
|
||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
// implementation written about there.
|
||||
//
|
||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
// efficiency differences when compared to equivalent C++ template code that
|
||||
// stores values in arrays within the node:
|
||||
// * Due to the overhead of storing values as interfaces (each
|
||||
// value needs to be stored as the value itself, then 2 words for the
|
||||
// interface pointing to that value and its type), resulting in higher
|
||||
// memory use.
|
||||
// * Since interfaces can point to values anywhere in memory, values are
|
||||
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
// number of cache misses.
|
||||
// These issues don't tend to matter, though, when working with strings or other
|
||||
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
// pointers and also distribute their values across the heap.
|
||||
//
|
||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
// Its functions, therefore, exactly mirror those of
|
||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
// support storing multiple equivalent values.
|
||||
package btree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Item represents a single object in the tree.
|
||||
type Item interface {
|
||||
// Less tests whether the current item is less than the given argument.
|
||||
//
|
||||
// This must provide a strict weak ordering.
|
||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
// hold one of either a or b in the tree).
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultFreeListSize = 32
|
||||
)
|
||||
|
||||
var (
|
||||
nilItems = make(items, 16)
|
||||
nilChildren = make(children, 16)
|
||||
)
|
||||
|
||||
// FreeList represents a free list of btree nodes. By default each
|
||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
// FreeList.
|
||||
// Two Btrees using the same freelist are safe for concurrent write access.
|
||||
type FreeList struct {
|
||||
mu sync.Mutex
|
||||
freelist []*node
|
||||
}
|
||||
|
||||
// NewFreeList creates a new free list.
|
||||
// size is the maximum size of the returned free list.
|
||||
func NewFreeList(size int) *FreeList {
|
||||
return &FreeList{freelist: make([]*node, 0, size)}
|
||||
}
|
||||
|
||||
func (f *FreeList) newNode() (n *node) {
|
||||
f.mu.Lock()
|
||||
index := len(f.freelist) - 1
|
||||
if index < 0 {
|
||||
f.mu.Unlock()
|
||||
return new(node)
|
||||
}
|
||||
n = f.freelist[index]
|
||||
f.freelist[index] = nil
|
||||
f.freelist = f.freelist[:index]
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// freeNode adds the given node to the list, returning true if it was added
|
||||
// and false if it was discarded.
|
||||
func (f *FreeList) freeNode(n *node) (out bool) {
|
||||
f.mu.Lock()
|
||||
if len(f.freelist) < cap(f.freelist) {
|
||||
f.freelist = append(f.freelist, n)
|
||||
out = true
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
// the tree. When this function returns false, iteration will stop and the
|
||||
// associated Ascend* function will immediately return.
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
// New creates a new B-Tree with the given degree.
|
||||
//
|
||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
// and 2-4 children).
|
||||
func New(degree int) *BTree {
|
||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
||||
}
|
||||
|
||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
||||
if degree <= 1 {
|
||||
panic("bad degree")
|
||||
}
|
||||
return &BTree{
|
||||
degree: degree,
|
||||
cow: ©OnWriteContext{freelist: f},
|
||||
}
|
||||
}
|
||||
|
||||
// items stores items in a node.
|
||||
type items []Item
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *items) insertAt(index int, item Item) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = item
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *items) removeAt(index int) Item {
|
||||
item := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *items) pop() (out Item) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index items. index must be less than or equal to length.
|
||||
func (s *items) truncate(index int) {
|
||||
var toClear items
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilItems):]
|
||||
}
|
||||
}
|
||||
|
||||
// find returns the index where the given item should be inserted into this
|
||||
// list. 'found' is true if the item already exists in the list at the given
|
||||
// index.
|
||||
func (s items) find(item Item) (index int, found bool) {
|
||||
i := sort.Search(len(s), func(i int) bool {
|
||||
return item.Less(s[i])
|
||||
})
|
||||
if i > 0 && !s[i-1].Less(item) {
|
||||
return i - 1, true
|
||||
}
|
||||
return i, false
|
||||
}
|
||||
|
||||
// children stores child nodes in a node.
|
||||
type children []*node
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *children) insertAt(index int, n *node) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = n
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *children) removeAt(index int) *node {
|
||||
n := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *children) pop() (out *node) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index children. index must be less than or equal to length.
|
||||
func (s *children) truncate(index int) {
|
||||
var toClear children
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilChildren):]
|
||||
}
|
||||
}
|
||||
|
||||
// node is an internal node in a tree.
|
||||
//
|
||||
// It must at all times maintain the invariant that either
|
||||
// * len(children) == 0, len(items) unconstrained
|
||||
// * len(children) == len(items) + 1
|
||||
type node struct {
|
||||
items items
|
||||
children children
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
|
||||
if n.cow == cow {
|
||||
return n
|
||||
}
|
||||
out := cow.newNode()
|
||||
if cap(out.items) >= len(n.items) {
|
||||
out.items = out.items[:len(n.items)]
|
||||
} else {
|
||||
out.items = make(items, len(n.items), cap(n.items))
|
||||
}
|
||||
copy(out.items, n.items)
|
||||
// Copy children
|
||||
if cap(out.children) >= len(n.children) {
|
||||
out.children = out.children[:len(n.children)]
|
||||
} else {
|
||||
out.children = make(children, len(n.children), cap(n.children))
|
||||
}
|
||||
copy(out.children, n.children)
|
||||
return out
|
||||
}
|
||||
|
||||
func (n *node) mutableChild(i int) *node {
|
||||
c := n.children[i].mutableFor(n.cow)
|
||||
n.children[i] = c
|
||||
return c
|
||||
}
|
||||
|
||||
// split splits the given node at the given index. The current node shrinks,
|
||||
// and this function returns the item that existed at that index and a new node
|
||||
// containing all items/children after it.
|
||||
func (n *node) split(i int) (Item, *node) {
|
||||
item := n.items[i]
|
||||
next := n.cow.newNode()
|
||||
next.items = append(next.items, n.items[i+1:]...)
|
||||
n.items.truncate(i)
|
||||
if len(n.children) > 0 {
|
||||
next.children = append(next.children, n.children[i+1:]...)
|
||||
n.children.truncate(i + 1)
|
||||
}
|
||||
return item, next
|
||||
}
|
||||
|
||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
// Returns whether or not a split occurred.
|
||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
||||
if len(n.children[i].items) < maxItems {
|
||||
return false
|
||||
}
|
||||
first := n.mutableChild(i)
|
||||
item, second := first.split(maxItems / 2)
|
||||
n.items.insertAt(i, item)
|
||||
n.children.insertAt(i+1, second)
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
// be found/replaced by insert, it will be returned.
|
||||
func (n *node) insert(item Item, maxItems int) Item {
|
||||
i, found := n.items.find(item)
|
||||
if found {
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.items.insertAt(i, item)
|
||||
return nil
|
||||
}
|
||||
if n.maybeSplitChild(i, maxItems) {
|
||||
inTree := n.items[i]
|
||||
switch {
|
||||
case item.Less(inTree):
|
||||
// no change, we want first split node
|
||||
case inTree.Less(item):
|
||||
i++ // we want second split node
|
||||
default:
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
}
|
||||
return n.mutableChild(i).insert(item, maxItems)
|
||||
}
|
||||
|
||||
// get finds the given key in the subtree and returns it.
|
||||
func (n *node) get(key Item) Item {
|
||||
i, found := n.items.find(key)
|
||||
if found {
|
||||
return n.items[i]
|
||||
} else if len(n.children) > 0 {
|
||||
return n.children[i].get(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the first item in the subtree.
|
||||
func min(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[0]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[0]
|
||||
}
|
||||
|
||||
// max returns the last item in the subtree.
|
||||
func max(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[len(n.children)-1]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[len(n.items)-1]
|
||||
}
|
||||
|
||||
// toRemove details what item to remove in a node.remove call.
|
||||
type toRemove int
|
||||
|
||||
const (
|
||||
removeItem toRemove = iota // removes the given item
|
||||
removeMin // removes smallest item in the subtree
|
||||
removeMax // removes largest item in the subtree
|
||||
)
|
||||
|
||||
// remove removes an item from the subtree rooted at this node.
|
||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
||||
var i int
|
||||
var found bool
|
||||
switch typ {
|
||||
case removeMax:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.pop()
|
||||
}
|
||||
i = len(n.items)
|
||||
case removeMin:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.removeAt(0)
|
||||
}
|
||||
i = 0
|
||||
case removeItem:
|
||||
i, found = n.items.find(item)
|
||||
if len(n.children) == 0 {
|
||||
if found {
|
||||
return n.items.removeAt(i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
// If we get to here, we have children.
|
||||
if len(n.children[i].items) <= minItems {
|
||||
return n.growChildAndRemove(i, item, minItems, typ)
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// Either we had enough items to begin with, or we've done some
|
||||
// merging/stealing, because we've got enough now and we're ready to return
|
||||
// stuff.
|
||||
if found {
|
||||
// The item exists at index 'i', and the child we've selected can give us a
|
||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
out := n.items[i]
|
||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
// and set it into where we pulled the item from.
|
||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
||||
return out
|
||||
}
|
||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
// node and that the child is big enough to remove from.
|
||||
return child.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
// item from it while keeping it at minItems, then calls remove to actually
|
||||
// remove it.
|
||||
//
|
||||
// Most documentation says we have to do two sets of special casing:
|
||||
// 1) item is in this node
|
||||
// 2) item is in child
|
||||
// In both cases, we need to handle the two subcases:
|
||||
// A) node has enough values that it can spare one
|
||||
// B) node doesn't have enough values
|
||||
// For the latter, we have to check:
|
||||
// a) left sibling has node to spare
|
||||
// b) right sibling has node to spare
|
||||
// c) we must merge
|
||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
// We then simply redo our remove call, and the second time (regardless of
|
||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
// that we hit case A.
|
||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
||||
// Steal from left child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i - 1)
|
||||
stolenItem := stealFrom.items.pop()
|
||||
child.items.insertAt(0, n.items[i-1])
|
||||
n.items[i-1] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children.insertAt(0, stealFrom.children.pop())
|
||||
}
|
||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
||||
// steal from right child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i + 1)
|
||||
stolenItem := stealFrom.items.removeAt(0)
|
||||
child.items = append(child.items, n.items[i])
|
||||
n.items[i] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
||||
}
|
||||
} else {
|
||||
if i >= len(n.items) {
|
||||
i--
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
n.cow.freeNode(mergeChild)
|
||||
}
|
||||
return n.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
type direction int
|
||||
|
||||
const (
|
||||
descend = direction(-1)
|
||||
ascend = direction(+1)
|
||||
)
|
||||
|
||||
// iterate provides a simple method for iterating over elements in the tree.
|
||||
//
|
||||
// When ascending, the 'start' should be less than 'stop' and when descending,
|
||||
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
|
||||
// will force the iterator to include the first item when it equals 'start',
|
||||
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
|
||||
// "greaterThan" or "lessThan" queries.
|
||||
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
|
||||
var ok, found bool
|
||||
var index int
|
||||
switch dir {
|
||||
case ascend:
|
||||
if start != nil {
|
||||
index, _ = n.items.find(start)
|
||||
}
|
||||
for i := index; i < len(n.items); i++ {
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
|
||||
hit = true
|
||||
continue
|
||||
}
|
||||
hit = true
|
||||
if stop != nil && !n.items[i].Less(stop) {
|
||||
return hit, false
|
||||
}
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
case descend:
|
||||
if start != nil {
|
||||
index, found = n.items.find(start)
|
||||
if !found {
|
||||
index = index - 1
|
||||
}
|
||||
} else {
|
||||
index = len(n.items) - 1
|
||||
}
|
||||
for i := index; i >= 0; i-- {
|
||||
if start != nil && !n.items[i].Less(start) {
|
||||
if !includeStart || hit || start.Less(n.items[i]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if stop != nil && !stop.Less(n.items[i]) {
|
||||
return hit, false // continue
|
||||
}
|
||||
hit = true
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return hit, true
|
||||
}
|
||||
|
||||
// Used for testing/debugging purposes.
|
||||
func (n *node) print(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
||||
for _, c := range n.children {
|
||||
c.print(w, level+1)
|
||||
}
|
||||
}
|
||||
|
||||
// BTree is an implementation of a B-Tree.
|
||||
//
|
||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
// removal, and iteration.
|
||||
//
|
||||
// Write operations are not safe for concurrent mutation by multiple
|
||||
// goroutines, but Read operations are.
|
||||
type BTree struct {
|
||||
degree int
|
||||
length int
|
||||
root *node
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
// copyOnWriteContext pointers determine node ownership... a tree with a write
|
||||
// context equivalent to a node's write context is allowed to modify that node.
|
||||
// A tree whose write context does not match a node's is not allowed to modify
|
||||
// it, and must create a new, writable copy (IE: it's a Clone).
|
||||
//
|
||||
// When doing any write operation, we maintain the invariant that the current
|
||||
// node's context is equal to the context of the tree that requested the write.
|
||||
// We do this by, before we descend into any node, creating a copy with the
|
||||
// correct context if the contexts don't match.
|
||||
//
|
||||
// Since the node we're currently visiting on any write has the requesting
|
||||
// tree's context, that node is modifiable in place. Children of that node may
|
||||
// not share context, but before we descend into them, we'll make a mutable
|
||||
// copy.
|
||||
type copyOnWriteContext struct {
|
||||
freelist *FreeList
|
||||
}
|
||||
|
||||
// Clone clones the btree, lazily. Clone should not be called concurrently,
|
||||
// but the original tree (t) and the new tree (t2) can be used concurrently
|
||||
// once the Clone call completes.
|
||||
//
|
||||
// The internal tree structure of b is marked read-only and shared between t and
|
||||
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
|
||||
// whenever one of b's original nodes would have been modified. Read operations
|
||||
// should have no performance degredation. Write operations for both t and t2
|
||||
// will initially experience minor slow-downs caused by additional allocs and
|
||||
// copies due to the aforementioned copy-on-write logic, but should converge to
|
||||
// the original performance characteristics of the original tree.
|
||||
func (t *BTree) Clone() (t2 *BTree) {
|
||||
// Create two entirely new copy-on-write contexts.
|
||||
// This operation effectively creates three trees:
|
||||
// the original, shared nodes (old b.cow)
|
||||
// the new b.cow nodes
|
||||
// the new out.cow nodes
|
||||
cow1, cow2 := *t.cow, *t.cow
|
||||
out := *t
|
||||
t.cow = &cow1
|
||||
out.cow = &cow2
|
||||
return &out
|
||||
}
|
||||
|
||||
// maxItems returns the max number of items to allow per node.
|
||||
func (t *BTree) maxItems() int {
|
||||
return t.degree*2 - 1
|
||||
}
|
||||
|
||||
// minItems returns the min number of items to allow per node (ignored for the
|
||||
// root node).
|
||||
func (t *BTree) minItems() int {
|
||||
return t.degree - 1
|
||||
}
|
||||
|
||||
func (c *copyOnWriteContext) newNode() (n *node) {
|
||||
n = c.freelist.newNode()
|
||||
n.cow = c
|
||||
return
|
||||
}
|
||||
|
||||
type freeType int
|
||||
|
||||
const (
|
||||
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
|
||||
ftStored // node was stored in the freelist for later use
|
||||
ftNotOwned // node was ignored by COW, since it's owned by another one
|
||||
)
|
||||
|
||||
// freeNode frees a node within a given COW context, if it's owned by that
|
||||
// context. It returns what happened to the node (see freeType const
|
||||
// documentation).
|
||||
func (c *copyOnWriteContext) freeNode(n *node) freeType {
|
||||
if n.cow == c {
|
||||
// clear to allow GC
|
||||
n.items.truncate(0)
|
||||
n.children.truncate(0)
|
||||
n.cow = nil
|
||||
if c.freelist.freeNode(n) {
|
||||
return ftStored
|
||||
} else {
|
||||
return ftFreelistFull
|
||||
}
|
||||
} else {
|
||||
return ftNotOwned
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
// already equals the given one, it is removed from the tree and returned.
|
||||
// Otherwise, nil is returned.
|
||||
//
|
||||
// nil cannot be added to the tree (will panic).
|
||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("nil item being added to BTree")
|
||||
}
|
||||
if t.root == nil {
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item)
|
||||
t.length++
|
||||
return nil
|
||||
} else {
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
if len(t.root.items) >= t.maxItems() {
|
||||
item2, second := t.root.split(t.maxItems() / 2)
|
||||
oldroot := t.root
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item2)
|
||||
t.root.children = append(t.root.children, oldroot, second)
|
||||
}
|
||||
}
|
||||
out := t.root.insert(item, t.maxItems())
|
||||
if out == nil {
|
||||
t.length++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
// it. If no such item exists, returns nil.
|
||||
func (t *BTree) Delete(item Item) Item {
|
||||
return t.deleteItem(item, removeItem)
|
||||
}
|
||||
|
||||
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMin() Item {
|
||||
return t.deleteItem(nil, removeMin)
|
||||
}
|
||||
|
||||
// DeleteMax removes the largest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMax() Item {
|
||||
return t.deleteItem(nil, removeMax)
|
||||
}
|
||||
|
||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
||||
if t.root == nil || len(t.root.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
out := t.root.remove(item, t.minItems(), typ)
|
||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
||||
oldroot := t.root
|
||||
t.root = t.root.children[0]
|
||||
t.cow.freeNode(oldroot)
|
||||
}
|
||||
if out != nil {
|
||||
t.length--
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AscendRange calls the iterator for every value in the tree within the range
|
||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
// [first, pivot), until iterator returns false.
|
||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
// the range [pivot, last], until iterator returns false.
|
||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// Ascend calls the iterator for every value in the tree within the range
|
||||
// [first, last], until iterator returns false.
|
||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// DescendRange calls the iterator for every value in the tree within the range
|
||||
// [lessOrEqual, greaterThan), until iterator returns false.
|
||||
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendLessOrEqual calls the iterator for every value in the tree within the range
|
||||
// [pivot, first], until iterator returns false.
|
||||
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendGreaterThan calls the iterator for every value in the tree within
|
||||
// the range (pivot, last], until iterator returns false.
|
||||
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// Descend calls the iterator for every value in the tree within the range
|
||||
// [last, first], until iterator returns false.
|
||||
func (t *BTree) Descend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
// unable to find that item.
|
||||
func (t *BTree) Get(key Item) Item {
|
||||
if t.root == nil {
|
||||
return nil
|
||||
}
|
||||
return t.root.get(key)
|
||||
}
|
||||
|
||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Min() Item {
|
||||
return min(t.root)
|
||||
}
|
||||
|
||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Max() Item {
|
||||
return max(t.root)
|
||||
}
|
||||
|
||||
// Has returns true if the given key is in the tree.
|
||||
func (t *BTree) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the tree.
|
||||
func (t *BTree) Len() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
// Clear removes all items from the btree. If addNodesToFreelist is true,
|
||||
// t's nodes are added to its freelist as part of this call, until the freelist
|
||||
// is full. Otherwise, the root node is simply dereferenced and the subtree
|
||||
// left to Go's normal GC processes.
|
||||
//
|
||||
// This can be much faster
|
||||
// than calling Delete on all elements, because that requires finding/removing
|
||||
// each element in the tree and updating the tree accordingly. It also is
|
||||
// somewhat faster than creating a new tree to replace the old one, because
|
||||
// nodes from the old tree are reclaimed into the freelist for use by the new
|
||||
// one, instead of being lost to the garbage collector.
|
||||
//
|
||||
// This call takes:
|
||||
// O(1): when addNodesToFreelist is false, this is a single operation.
|
||||
// O(1): when the freelist is already full, it breaks out immediately
|
||||
// O(freelist size): when the freelist is empty and the nodes are all owned
|
||||
// by this tree, nodes are added to the freelist until full.
|
||||
// O(tree size): when all nodes are owned by another tree, all nodes are
|
||||
// iterated over looking for nodes to add to the freelist, and due to
|
||||
// ownership, none are.
|
||||
func (t *BTree) Clear(addNodesToFreelist bool) {
|
||||
if t.root != nil && addNodesToFreelist {
|
||||
t.root.reset(t.cow)
|
||||
}
|
||||
t.root, t.length = nil, 0
|
||||
}
|
||||
|
||||
// reset returns a subtree to the freelist. It breaks out immediately if the
|
||||
// freelist is full, since the only benefit of iterating is to fill that
|
||||
// freelist up. Returns true if parent reset call should continue.
|
||||
func (n *node) reset(c *copyOnWriteContext) bool {
|
||||
for _, child := range n.children {
|
||||
if !child.reset(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return c.freeNode(n) != ftFreelistFull
|
||||
}
|
||||
|
||||
// Int implements the Item interface for integers.
|
||||
type Int int
|
||||
|
||||
// Less returns true if int(a) < int(b).
|
||||
func (a Int) Less(b Item) bool {
|
||||
return a < b.(Int)
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,2 @@
|
|||
go-shlex is a simple lexer for go that supports shell-style quoting,
|
||||
commenting, and escaping.
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/google/shlex
|
||||
|
||||
go 1.13
|
|
@ -0,0 +1,416 @@
|
|||
/*
|
||||
Copyright 2012 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package shlex implements a simple lexer which splits input in to tokens using
|
||||
shell-style rules for quoting and commenting.
|
||||
|
||||
The basic use case uses the default ASCII lexer to split a string into sub-strings:
|
||||
|
||||
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
|
||||
|
||||
To process a stream of strings:
|
||||
|
||||
l := NewLexer(os.Stdin)
|
||||
for ; token, err := l.Next(); err != nil {
|
||||
// process token
|
||||
}
|
||||
|
||||
To access the raw token stream (which includes tokens for comments):
|
||||
|
||||
t := NewTokenizer(os.Stdin)
|
||||
for ; token, err := t.Next(); err != nil {
|
||||
// process token
|
||||
}
|
||||
|
||||
*/
|
||||
package shlex
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TokenType is a top-level token classification: A word, space, comment, unknown.
|
||||
type TokenType int
|
||||
|
||||
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
|
||||
type runeTokenClass int
|
||||
|
||||
// the internal state used by the lexer state machine
|
||||
type lexerState int
|
||||
|
||||
// Token is a (type, value) pair representing a lexographical token.
|
||||
type Token struct {
|
||||
tokenType TokenType
|
||||
value string
|
||||
}
|
||||
|
||||
// Equal reports whether tokens a, and b, are equal.
|
||||
// Two tokens are equal if both their types and values are equal. A nil token can
|
||||
// never be equal to another token.
|
||||
func (a *Token) Equal(b *Token) bool {
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
if a.tokenType != b.tokenType {
|
||||
return false
|
||||
}
|
||||
return a.value == b.value
|
||||
}
|
||||
|
||||
// Named classes of UTF-8 runes
|
||||
const (
|
||||
spaceRunes = " \t\r\n"
|
||||
escapingQuoteRunes = `"`
|
||||
nonEscapingQuoteRunes = "'"
|
||||
escapeRunes = `\`
|
||||
commentRunes = "#"
|
||||
)
|
||||
|
||||
// Classes of rune token
|
||||
const (
|
||||
unknownRuneClass runeTokenClass = iota
|
||||
spaceRuneClass
|
||||
escapingQuoteRuneClass
|
||||
nonEscapingQuoteRuneClass
|
||||
escapeRuneClass
|
||||
commentRuneClass
|
||||
eofRuneClass
|
||||
)
|
||||
|
||||
// Classes of lexographic token
|
||||
const (
|
||||
UnknownToken TokenType = iota
|
||||
WordToken
|
||||
SpaceToken
|
||||
CommentToken
|
||||
)
|
||||
|
||||
// Lexer state machine states
|
||||
const (
|
||||
startState lexerState = iota // no runes have been seen
|
||||
inWordState // processing regular runes in a word
|
||||
escapingState // we have just consumed an escape rune; the next rune is literal
|
||||
escapingQuotedState // we have just consumed an escape rune within a quoted string
|
||||
quotingEscapingState // we are within a quoted string that supports escaping ("...")
|
||||
quotingState // we are within a string that does not support escaping ('...')
|
||||
commentState // we are within a comment (everything following an unquoted or unescaped #
|
||||
)
|
||||
|
||||
// tokenClassifier is used for classifying rune characters.
|
||||
type tokenClassifier map[rune]runeTokenClass
|
||||
|
||||
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
|
||||
for _, runeChar := range runes {
|
||||
typeMap[runeChar] = tokenType
|
||||
}
|
||||
}
|
||||
|
||||
// newDefaultClassifier creates a new classifier for ASCII characters.
|
||||
func newDefaultClassifier() tokenClassifier {
|
||||
t := tokenClassifier{}
|
||||
t.addRuneClass(spaceRunes, spaceRuneClass)
|
||||
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
|
||||
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
|
||||
t.addRuneClass(escapeRunes, escapeRuneClass)
|
||||
t.addRuneClass(commentRunes, commentRuneClass)
|
||||
return t
|
||||
}
|
||||
|
||||
// ClassifyRune classifiees a rune
|
||||
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
|
||||
return t[runeVal]
|
||||
}
|
||||
|
||||
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
|
||||
type Lexer Tokenizer
|
||||
|
||||
// NewLexer creates a new lexer from an input stream.
|
||||
func NewLexer(r io.Reader) *Lexer {
|
||||
|
||||
return (*Lexer)(NewTokenizer(r))
|
||||
}
|
||||
|
||||
// Next returns the next word, or an error. If there are no more words,
|
||||
// the error will be io.EOF.
|
||||
func (l *Lexer) Next() (string, error) {
|
||||
for {
|
||||
token, err := (*Tokenizer)(l).Next()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch token.tokenType {
|
||||
case WordToken:
|
||||
return token.value, nil
|
||||
case CommentToken:
|
||||
// skip comments
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tokenizer turns an input stream into a sequence of typed tokens
|
||||
type Tokenizer struct {
|
||||
input bufio.Reader
|
||||
classifier tokenClassifier
|
||||
}
|
||||
|
||||
// NewTokenizer creates a new tokenizer from an input stream.
|
||||
func NewTokenizer(r io.Reader) *Tokenizer {
|
||||
input := bufio.NewReader(r)
|
||||
classifier := newDefaultClassifier()
|
||||
return &Tokenizer{
|
||||
input: *input,
|
||||
classifier: classifier}
|
||||
}
|
||||
|
||||
// scanStream scans the stream for the next token using the internal state machine.
|
||||
// It will panic if it encounters a rune which it does not know how to handle.
|
||||
func (t *Tokenizer) scanStream() (*Token, error) {
|
||||
state := startState
|
||||
var tokenType TokenType
|
||||
var value []rune
|
||||
var nextRune rune
|
||||
var nextRuneType runeTokenClass
|
||||
var err error
|
||||
|
||||
for {
|
||||
nextRune, _, err = t.input.ReadRune()
|
||||
nextRuneType = t.classifier.ClassifyRune(nextRune)
|
||||
|
||||
if err == io.EOF {
|
||||
nextRuneType = eofRuneClass
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch state {
|
||||
case startState: // no runes read yet
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
return nil, io.EOF
|
||||
}
|
||||
case spaceRuneClass:
|
||||
{
|
||||
}
|
||||
case escapingQuoteRuneClass:
|
||||
{
|
||||
tokenType = WordToken
|
||||
state = quotingEscapingState
|
||||
}
|
||||
case nonEscapingQuoteRuneClass:
|
||||
{
|
||||
tokenType = WordToken
|
||||
state = quotingState
|
||||
}
|
||||
case escapeRuneClass:
|
||||
{
|
||||
tokenType = WordToken
|
||||
state = escapingState
|
||||
}
|
||||
case commentRuneClass:
|
||||
{
|
||||
tokenType = CommentToken
|
||||
state = commentState
|
||||
}
|
||||
default:
|
||||
{
|
||||
tokenType = WordToken
|
||||
value = append(value, nextRune)
|
||||
state = inWordState
|
||||
}
|
||||
}
|
||||
}
|
||||
case inWordState: // in a regular word
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
case spaceRuneClass:
|
||||
{
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
case escapingQuoteRuneClass:
|
||||
{
|
||||
state = quotingEscapingState
|
||||
}
|
||||
case nonEscapingQuoteRuneClass:
|
||||
{
|
||||
state = quotingState
|
||||
}
|
||||
case escapeRuneClass:
|
||||
{
|
||||
state = escapingState
|
||||
}
|
||||
default:
|
||||
{
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
}
|
||||
case escapingState: // the rune after an escape character
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
err = fmt.Errorf("EOF found after escape character")
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
default:
|
||||
{
|
||||
state = inWordState
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
}
|
||||
case escapingQuotedState: // the next rune after an escape character, in double quotes
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
err = fmt.Errorf("EOF found after escape character")
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
default:
|
||||
{
|
||||
state = quotingEscapingState
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
}
|
||||
case quotingEscapingState: // in escaping double quotes
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
err = fmt.Errorf("EOF found when expecting closing quote")
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
case escapingQuoteRuneClass:
|
||||
{
|
||||
state = inWordState
|
||||
}
|
||||
case escapeRuneClass:
|
||||
{
|
||||
state = escapingQuotedState
|
||||
}
|
||||
default:
|
||||
{
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
}
|
||||
case quotingState: // in non-escaping single quotes
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
err = fmt.Errorf("EOF found when expecting closing quote")
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
case nonEscapingQuoteRuneClass:
|
||||
{
|
||||
state = inWordState
|
||||
}
|
||||
default:
|
||||
{
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
}
|
||||
case commentState: // in a comment
|
||||
{
|
||||
switch nextRuneType {
|
||||
case eofRuneClass:
|
||||
{
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
}
|
||||
case spaceRuneClass:
|
||||
{
|
||||
if nextRune == '\n' {
|
||||
state = startState
|
||||
token := &Token{
|
||||
tokenType: tokenType,
|
||||
value: string(value)}
|
||||
return token, err
|
||||
} else {
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
default:
|
||||
{
|
||||
value = append(value, nextRune)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
{
|
||||
return nil, fmt.Errorf("Unexpected state: %v", state)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns the next token in the stream.
|
||||
func (t *Tokenizer) Next() (*Token, error) {
|
||||
return t.scanStream()
|
||||
}
|
||||
|
||||
// Split partitions a string into a slice of strings.
|
||||
func Split(s string) ([]string, error) {
|
||||
l := NewLexer(strings.NewReader(s))
|
||||
subStrings := make([]string, 0)
|
||||
for {
|
||||
word, err := l.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return subStrings, nil
|
||||
}
|
||||
return subStrings, err
|
||||
}
|
||||
subStrings = append(subStrings, word)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
|
@ -0,0 +1,7 @@
|
|||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,25 @@
|
|||
httpcache
|
||||
=========
|
||||
|
||||
[](https://travis-ci.org/gregjones/httpcache) [](https://godoc.org/github.com/gregjones/httpcache)
|
||||
|
||||
Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses.
|
||||
|
||||
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
|
||||
|
||||
Cache Backends
|
||||
--------------
|
||||
|
||||
- The built-in 'memory' cache stores responses in an in-memory map.
|
||||
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
|
||||
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
|
||||
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
|
||||
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
|
||||
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
|
||||
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
|
||||
- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork).
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
- [MIT License](LICENSE.txt)
|
|
@ -0,0 +1,61 @@
|
|||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
||||
// to supplement an in-memory map with persistent storage
|
||||
//
|
||||
package diskcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"github.com/peterbourgon/diskv"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
||||
type Cache struct {
|
||||
d *diskv.Diskv
|
||||
}
|
||||
|
||||
// Get returns the response corresponding to key if present
|
||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
||||
key = keyToFilename(key)
|
||||
resp, err := c.d.Read(key)
|
||||
if err != nil {
|
||||
return []byte{}, false
|
||||
}
|
||||
return resp, true
|
||||
}
|
||||
|
||||
// Set saves a response to the cache as key
|
||||
func (c *Cache) Set(key string, resp []byte) {
|
||||
key = keyToFilename(key)
|
||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
||||
}
|
||||
|
||||
// Delete removes the response with key from the cache
|
||||
func (c *Cache) Delete(key string) {
|
||||
key = keyToFilename(key)
|
||||
c.d.Erase(key)
|
||||
}
|
||||
|
||||
func keyToFilename(key string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, key)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// New returns a new Cache that will store files in basePath
|
||||
func New(basePath string) *Cache {
|
||||
return &Cache{
|
||||
d: diskv.New(diskv.Options{
|
||||
BasePath: basePath,
|
||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
||||
// storage.
|
||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
||||
return &Cache{d}
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
||||
// mostly RFC-compliant cache for http responses.
|
||||
//
|
||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
||||
// and not for a shared proxy).
|
||||
//
|
||||
package httpcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
stale = iota
|
||||
fresh
|
||||
transparent
|
||||
// XFromCache is the header added to responses that are returned from the cache
|
||||
XFromCache = "X-From-Cache"
|
||||
)
|
||||
|
||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
||||
type Cache interface {
|
||||
// Get returns the []byte representation of a cached response and a bool
|
||||
// set to true if the value isn't empty
|
||||
Get(key string) (responseBytes []byte, ok bool)
|
||||
// Set stores the []byte representation of a response against a key
|
||||
Set(key string, responseBytes []byte)
|
||||
// Delete removes the value associated with the key
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// cacheKey returns the cache key for req.
|
||||
func cacheKey(req *http.Request) string {
|
||||
if req.Method == http.MethodGet {
|
||||
return req.URL.String()
|
||||
} else {
|
||||
return req.Method + " " + req.URL.String()
|
||||
}
|
||||
}
|
||||
|
||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
||||
// otherwise.
|
||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
||||
cachedVal, ok := c.Get(cacheKey(req))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(cachedVal)
|
||||
return http.ReadResponse(bufio.NewReader(b), req)
|
||||
}
|
||||
|
||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
||||
type MemoryCache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string][]byte
|
||||
}
|
||||
|
||||
// Get returns the []byte representation of the response and true if present, false if not
|
||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
||||
c.mu.RLock()
|
||||
resp, ok = c.items[key]
|
||||
c.mu.RUnlock()
|
||||
return resp, ok
|
||||
}
|
||||
|
||||
// Set saves response resp to the cache with key
|
||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
||||
c.mu.Lock()
|
||||
c.items[key] = resp
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Delete removes key from the cache
|
||||
func (c *MemoryCache) Delete(key string) {
|
||||
c.mu.Lock()
|
||||
delete(c.items, key)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
||||
func NewMemoryCache() *MemoryCache {
|
||||
c := &MemoryCache{items: map[string][]byte{}}
|
||||
return c
|
||||
}
|
||||
|
||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
||||
// to repeated requests allowing servers to return 304 / Not Modified
|
||||
type Transport struct {
|
||||
// The RoundTripper interface actually used to make requests
|
||||
// If nil, http.DefaultTransport is used
|
||||
Transport http.RoundTripper
|
||||
Cache Cache
|
||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
||||
MarkCachedResponses bool
|
||||
}
|
||||
|
||||
// NewTransport returns a new Transport with the
|
||||
// provided Cache implementation and MarkCachedResponses set to true
|
||||
func NewTransport(c Cache) *Transport {
|
||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
||||
}
|
||||
|
||||
// Client returns an *http.Client that caches responses.
|
||||
func (t *Transport) Client() *http.Client {
|
||||
return &http.Client{Transport: t}
|
||||
}
|
||||
|
||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
||||
// match the new request
|
||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
||||
header = http.CanonicalHeaderKey(header)
|
||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RoundTrip takes a Request and returns a Response
|
||||
//
|
||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
||||
// the server.
|
||||
//
|
||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
||||
// will be returned.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
cacheKey := cacheKey(req)
|
||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
||||
var cachedResp *http.Response
|
||||
if cacheable {
|
||||
cachedResp, err = CachedResponse(t.Cache, req)
|
||||
} else {
|
||||
// Need to invalidate an existing value
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
|
||||
transport := t.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
if cacheable && cachedResp != nil && err == nil {
|
||||
if t.MarkCachedResponses {
|
||||
cachedResp.Header.Set(XFromCache, "1")
|
||||
}
|
||||
|
||||
if varyMatches(cachedResp, req) {
|
||||
// Can only use cached value if the new request doesn't Vary significantly
|
||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
||||
if freshness == fresh {
|
||||
return cachedResp, nil
|
||||
}
|
||||
|
||||
if freshness == stale {
|
||||
var req2 *http.Request
|
||||
// Add validators if caller hasn't already done so
|
||||
etag := cachedResp.Header.Get("etag")
|
||||
if etag != "" && req.Header.Get("etag") == "" {
|
||||
req2 = cloneRequest(req)
|
||||
req2.Header.Set("if-none-match", etag)
|
||||
}
|
||||
lastModified := cachedResp.Header.Get("last-modified")
|
||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
||||
if req2 == nil {
|
||||
req2 = cloneRequest(req)
|
||||
}
|
||||
req2.Header.Set("if-modified-since", lastModified)
|
||||
}
|
||||
if req2 != nil {
|
||||
req = req2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
||||
// Replace the 304 response with the one from cache, but update with some new headers
|
||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
||||
for _, header := range endToEndHeaders {
|
||||
cachedResp.Header[header] = resp.Header[header]
|
||||
}
|
||||
resp = cachedResp
|
||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
||||
// In case of transport failure and stale-if-error activated, returns cached content
|
||||
// when available
|
||||
return cachedResp, nil
|
||||
} else {
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
reqCacheControl := parseCacheControl(req.Header)
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
resp = newGatewayTimeoutResponse(req)
|
||||
} else {
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
||||
fakeHeader := "X-Varied-" + varyKey
|
||||
reqValue := req.Header.Get(varyKey)
|
||||
if reqValue != "" {
|
||||
resp.Header.Set(fakeHeader, reqValue)
|
||||
}
|
||||
}
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
// Delay caching until EOF is reached.
|
||||
resp.Body = &cachingReadCloser{
|
||||
R: resp.Body,
|
||||
OnEOF: func(r io.Reader) {
|
||||
resp := *resp
|
||||
resp.Body = ioutil.NopCloser(r)
|
||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
},
|
||||
}
|
||||
default:
|
||||
respBytes, err := httputil.DumpResponse(resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
||||
var ErrNoDateHeader = errors.New("no Date header")
|
||||
|
||||
// Date parses and returns the value of the Date header.
|
||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
||||
dateHeader := respHeaders.Get("date")
|
||||
if dateHeader == "" {
|
||||
err = ErrNoDateHeader
|
||||
return
|
||||
}
|
||||
|
||||
return time.Parse(time.RFC1123, dateHeader)
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (c *realClock) since(d time.Time) time.Duration {
|
||||
return time.Since(d)
|
||||
}
|
||||
|
||||
type timer interface {
|
||||
since(d time.Time) time.Duration
|
||||
}
|
||||
|
||||
var clock timer = &realClock{}
|
||||
|
||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
||||
// values of the request and the response
|
||||
//
|
||||
// fresh indicates the response can be returned
|
||||
// stale indicates that the response needs validating before it is returned
|
||||
// transparent indicates the response should not be used to fulfil the request
|
||||
//
|
||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
||||
// signficant. Similarly, smax-age isn't used.
|
||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
||||
return transparent
|
||||
}
|
||||
if _, ok := respCacheControl["no-cache"]; ok {
|
||||
return stale
|
||||
}
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
return fresh
|
||||
}
|
||||
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return stale
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
|
||||
var lifetime time.Duration
|
||||
var zeroDuration time.Duration
|
||||
|
||||
// If a response includes both an Expires header and a max-age directive,
|
||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
} else {
|
||||
expiresHeader := respHeaders.Get("Expires")
|
||||
if expiresHeader != "" {
|
||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
} else {
|
||||
lifetime = expires.Sub(date)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
}
|
||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
||||
// its expiration time by no more than the specified number of seconds.
|
||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
||||
//
|
||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
||||
// return-value available here.
|
||||
if maxstale == "" {
|
||||
return fresh
|
||||
}
|
||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime > currentAge {
|
||||
return fresh
|
||||
}
|
||||
|
||||
return stale
|
||||
}
|
||||
|
||||
// Returns true if either the request or the response includes the stale-if-error
|
||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
|
||||
var err error
|
||||
lifetime := time.Duration(-1)
|
||||
|
||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime >= 0 {
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
if lifetime > currentAge {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
||||
// These headers are always hop-by-hop
|
||||
hopByHopHeaders := map[string]struct{}{
|
||||
"Connection": struct{}{},
|
||||
"Keep-Alive": struct{}{},
|
||||
"Proxy-Authenticate": struct{}{},
|
||||
"Proxy-Authorization": struct{}{},
|
||||
"Te": struct{}{},
|
||||
"Trailers": struct{}{},
|
||||
"Transfer-Encoding": struct{}{},
|
||||
"Upgrade": struct{}{},
|
||||
}
|
||||
|
||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
||||
// any header listed in connection, if present, is also considered hop-by-hop
|
||||
if strings.Trim(extra, " ") != "" {
|
||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
||||
}
|
||||
}
|
||||
endToEndHeaders := []string{}
|
||||
for respHeader, _ := range respHeaders {
|
||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
||||
}
|
||||
}
|
||||
return endToEndHeaders
|
||||
}
|
||||
|
||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
||||
if _, ok := respCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := reqCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
||||
var braw bytes.Buffer
|
||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type cacheControl map[string]string
|
||||
|
||||
func parseCacheControl(headers http.Header) cacheControl {
|
||||
cc := cacheControl{}
|
||||
ccHeader := headers.Get("Cache-Control")
|
||||
for _, part := range strings.Split(ccHeader, ",") {
|
||||
part = strings.Trim(part, " ")
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(part, '=') {
|
||||
keyval := strings.Split(part, "=")
|
||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
||||
} else {
|
||||
cc[part] = ""
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// headerAllCommaSepValues returns all comma-separated values (each
|
||||
// with whitespace trimmed) for header name in headers. According to
|
||||
// Section 4.2 of the HTTP/1.1 spec
|
||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
||||
// values from multiple occurrences of a header should be concatenated, if
|
||||
// the header's value is a comma-separated list.
|
||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
||||
var vals []string
|
||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
||||
fields := strings.Split(val, ",")
|
||||
for i, f := range fields {
|
||||
fields[i] = strings.TrimSpace(f)
|
||||
}
|
||||
vals = append(vals, fields...)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
||||
// handler with a full copy of the content read from R when EOF is
|
||||
// reached.
|
||||
type cachingReadCloser struct {
|
||||
// Underlying ReadCloser.
|
||||
R io.ReadCloser
|
||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
||||
OnEOF func(io.Reader)
|
||||
|
||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
||||
// return value n is the number of bytes read. If R has no data to
|
||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
||||
// has been read so far.
|
||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = r.R.Read(p)
|
||||
r.buf.Write(p[:n])
|
||||
if err == io.EOF {
|
||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *cachingReadCloser) Close() error {
|
||||
return r.R.Close()
|
||||
}
|
||||
|
||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
||||
func NewMemoryCacheTransport() *Transport {
|
||||
c := NewMemoryCache()
|
||||
t := NewTransport(c)
|
||||
return t
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- "1.8"
|
||||
- "1.9"
|
||||
- "1.10"
|
||||
- "1.11"
|
||||
- "1.12"
|
||||
- master
|
||||
|
||||
script: go test -v ./...
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,7 @@
|
|||
This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package.
|
||||
|
||||
It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license.
|
||||
|
||||
The following additional features are supported:
|
||||
* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called.
|
||||
* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers.
|
|
@ -0,0 +1,637 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tabwriter implements a write filter (tabwriter.Writer) that
|
||||
// translates tabbed columns in input into properly aligned text.
|
||||
//
|
||||
// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter),
|
||||
// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a
|
||||
// with support for additional features.
|
||||
//
|
||||
// The package is using the Elastic Tabstops algorithm described at
|
||||
// http://nickgravgaard.com/elastictabstops/index.html.
|
||||
package tabwriter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Filter implementation
|
||||
|
||||
// A cell represents a segment of text terminated by tabs or line breaks.
|
||||
// The text itself is stored in a separate buffer; cell only describes the
|
||||
// segment's size in bytes, its width in runes, and whether it's an htab
|
||||
// ('\t') terminated cell.
|
||||
//
|
||||
type cell struct {
|
||||
size int // cell size in bytes
|
||||
width int // cell width in runes
|
||||
htab bool // true if the cell is terminated by an htab ('\t')
|
||||
}
|
||||
|
||||
// A Writer is a filter that inserts padding around tab-delimited
|
||||
// columns in its input to align them in the output.
|
||||
//
|
||||
// The Writer treats incoming bytes as UTF-8-encoded text consisting
|
||||
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
|
||||
// and newline ('\n') or formfeed ('\f') characters; both newline and
|
||||
// formfeed act as line breaks.
|
||||
//
|
||||
// Tab-terminated cells in contiguous lines constitute a column. The
|
||||
// Writer inserts padding as needed to make all cells in a column have
|
||||
// the same width, effectively aligning the columns. It assumes that
|
||||
// all characters have the same width, except for tabs for which a
|
||||
// tabwidth must be specified. Column cells must be tab-terminated, not
|
||||
// tab-separated: non-tab terminated trailing text at the end of a line
|
||||
// forms a cell but that cell is not part of an aligned column.
|
||||
// For instance, in this example (where | stands for a horizontal tab):
|
||||
//
|
||||
// aaaa|bbb|d
|
||||
// aa |b |dd
|
||||
// a |
|
||||
// aa |cccc|eee
|
||||
//
|
||||
// the b and c are in distinct columns (the b column is not contiguous
|
||||
// all the way). The d and e are not in a column at all (there's no
|
||||
// terminating tab, nor would the column be contiguous).
|
||||
//
|
||||
// The Writer assumes that all Unicode code points have the same width;
|
||||
// this may not be true in some fonts or if the string contains combining
|
||||
// characters.
|
||||
//
|
||||
// If DiscardEmptyColumns is set, empty columns that are terminated
|
||||
// entirely by vertical (or "soft") tabs are discarded. Columns
|
||||
// terminated by horizontal (or "hard") tabs are not affected by
|
||||
// this flag.
|
||||
//
|
||||
// If a Writer is configured to filter HTML, HTML tags and entities
|
||||
// are passed through. The widths of tags and entities are
|
||||
// assumed to be zero (tags) and one (entities) for formatting purposes.
|
||||
//
|
||||
// A segment of text may be escaped by bracketing it with Escape
|
||||
// characters. The tabwriter passes escaped text segments through
|
||||
// unchanged. In particular, it does not interpret any tabs or line
|
||||
// breaks within the segment. If the StripEscape flag is set, the
|
||||
// Escape characters are stripped from the output; otherwise they
|
||||
// are passed through as well. For the purpose of formatting, the
|
||||
// width of the escaped text is always computed excluding the Escape
|
||||
// characters.
|
||||
//
|
||||
// The formfeed character acts like a newline but it also terminates
|
||||
// all columns in the current line (effectively calling Flush). Tab-
|
||||
// terminated cells in the next line start new columns. Unless found
|
||||
// inside an HTML tag or inside an escaped text segment, formfeed
|
||||
// characters appear as newlines in the output.
|
||||
//
|
||||
// The Writer must buffer input internally, because proper spacing
|
||||
// of one line may depend on the cells in future lines. Clients must
|
||||
// call Flush when done calling Write.
|
||||
//
|
||||
type Writer struct {
|
||||
// configuration
|
||||
output io.Writer
|
||||
minwidth int
|
||||
tabwidth int
|
||||
padding int
|
||||
padbytes [8]byte
|
||||
flags uint
|
||||
|
||||
// current state
|
||||
buf []byte // collected text excluding tabs or line breaks
|
||||
pos int // buffer position up to which cell.width of incomplete cell has been computed
|
||||
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
|
||||
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
|
||||
lines [][]cell // list of lines; each line is a list of cells
|
||||
widths []int // list of column widths in runes - re-used during formatting
|
||||
|
||||
maxwidths []int // list of max column widths in runes
|
||||
}
|
||||
|
||||
// addLine adds a new line.
|
||||
// flushed is a hint indicating whether the underlying writer was just flushed.
|
||||
// If so, the previous line is not likely to be a good indicator of the new line's cells.
|
||||
func (b *Writer) addLine(flushed bool) {
|
||||
// Grow slice instead of appending,
|
||||
// as that gives us an opportunity
|
||||
// to re-use an existing []cell.
|
||||
if n := len(b.lines) + 1; n <= cap(b.lines) {
|
||||
b.lines = b.lines[:n]
|
||||
b.lines[n-1] = b.lines[n-1][:0]
|
||||
} else {
|
||||
b.lines = append(b.lines, nil)
|
||||
}
|
||||
|
||||
if !flushed {
|
||||
// The previous line is probably a good indicator
|
||||
// of how many cells the current line will have.
|
||||
// If the current line's capacity is smaller than that,
|
||||
// abandon it and make a new one.
|
||||
if n := len(b.lines); n >= 2 {
|
||||
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
|
||||
b.lines[n-1] = make([]cell, 0, prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the current state.
|
||||
func (b *Writer) reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.pos = 0
|
||||
b.cell = cell{}
|
||||
b.endChar = 0
|
||||
b.lines = b.lines[0:0]
|
||||
b.widths = b.widths[0:0]
|
||||
b.addLine(true)
|
||||
}
|
||||
|
||||
// Internal representation (current state):
|
||||
//
|
||||
// - all text written is appended to buf; tabs and line breaks are stripped away
|
||||
// - at any given time there is a (possibly empty) incomplete cell at the end
|
||||
// (the cell starts after a tab or line break)
|
||||
// - cell.size is the number of bytes belonging to the cell so far
|
||||
// - cell.width is text width in runes of that cell from the start of the cell to
|
||||
// position pos; html tags and entities are excluded from this width if html
|
||||
// filtering is enabled
|
||||
// - the sizes and widths of processed text are kept in the lines list
|
||||
// which contains a list of cells for each line
|
||||
// - the widths list is a temporary list with current widths used during
|
||||
// formatting; it is kept in Writer because it's re-used
|
||||
//
|
||||
// |<---------- size ---------->|
|
||||
// | |
|
||||
// |<- width ->|<- ignored ->| |
|
||||
// | | | |
|
||||
// [---processed---tab------------<tag>...</tag>...]
|
||||
// ^ ^ ^
|
||||
// | | |
|
||||
// buf start of incomplete cell pos
|
||||
|
||||
// Formatting can be controlled with these flags.
|
||||
const (
|
||||
// Ignore html tags and treat entities (starting with '&'
|
||||
// and ending in ';') as single characters (width = 1).
|
||||
FilterHTML uint = 1 << iota
|
||||
|
||||
// Strip Escape characters bracketing escaped text segments
|
||||
// instead of passing them through unchanged with the text.
|
||||
StripEscape
|
||||
|
||||
// Force right-alignment of cell content.
|
||||
// Default is left-alignment.
|
||||
AlignRight
|
||||
|
||||
// Handle empty columns as if they were not present in
|
||||
// the input in the first place.
|
||||
DiscardEmptyColumns
|
||||
|
||||
// Always use tabs for indentation columns (i.e., padding of
|
||||
// leading empty cells on the left) independent of padchar.
|
||||
TabIndent
|
||||
|
||||
// Print a vertical bar ('|') between columns (after formatting).
|
||||
// Discarded columns appear as zero-width columns ("||").
|
||||
Debug
|
||||
|
||||
// Remember maximum widths seen per column even after Flush() is called.
|
||||
RememberWidths
|
||||
)
|
||||
|
||||
// A Writer must be initialized with a call to Init. The first parameter (output)
|
||||
// specifies the filter output. The remaining parameters control the formatting:
|
||||
//
|
||||
// minwidth minimal cell width including any padding
|
||||
// tabwidth width of tab characters (equivalent number of spaces)
|
||||
// padding padding added to a cell before computing its width
|
||||
// padchar ASCII char used for padding
|
||||
// if padchar == '\t', the Writer will assume that the
|
||||
// width of a '\t' in the formatted output is tabwidth,
|
||||
// and cells are left-aligned independent of align_left
|
||||
// (for correct-looking results, tabwidth must correspond
|
||||
// to the tab width in the viewer displaying the result)
|
||||
// flags formatting control
|
||||
//
|
||||
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
if minwidth < 0 || tabwidth < 0 || padding < 0 {
|
||||
panic("negative minwidth, tabwidth, or padding")
|
||||
}
|
||||
b.output = output
|
||||
b.minwidth = minwidth
|
||||
b.tabwidth = tabwidth
|
||||
b.padding = padding
|
||||
for i := range b.padbytes {
|
||||
b.padbytes[i] = padchar
|
||||
}
|
||||
if padchar == '\t' {
|
||||
// tab padding enforces left-alignment
|
||||
flags &^= AlignRight
|
||||
}
|
||||
b.flags = flags
|
||||
|
||||
b.reset()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// debugging support (keep code around)
|
||||
func (b *Writer) dump() {
|
||||
pos := 0
|
||||
for i, line := range b.lines {
|
||||
print("(", i, ") ")
|
||||
for _, c := range line {
|
||||
print("[", string(b.buf[pos:pos+c.size]), "]")
|
||||
pos += c.size
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
|
||||
// local error wrapper so we can distinguish errors we want to return
|
||||
// as errors from genuine panics (which we don't want to return as errors)
|
||||
type osError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *Writer) write0(buf []byte) {
|
||||
n, err := b.output.Write(buf)
|
||||
if n != len(buf) && err == nil {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
panic(osError{err})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Writer) writeN(src []byte, n int) {
|
||||
for n > len(src) {
|
||||
b.write0(src)
|
||||
n -= len(src)
|
||||
}
|
||||
b.write0(src[0:n])
|
||||
}
|
||||
|
||||
var (
|
||||
newline = []byte{'\n'}
|
||||
tabs = []byte("\t\t\t\t\t\t\t\t")
|
||||
)
|
||||
|
||||
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
|
||||
if b.padbytes[0] == '\t' || useTabs {
|
||||
// padding is done with tabs
|
||||
if b.tabwidth == 0 {
|
||||
return // tabs have no width - can't do any padding
|
||||
}
|
||||
// make cellw the smallest multiple of b.tabwidth
|
||||
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
|
||||
n := cellw - textw // amount of padding
|
||||
if n < 0 {
|
||||
panic("internal error")
|
||||
}
|
||||
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
|
||||
return
|
||||
}
|
||||
|
||||
// padding is done with non-tab characters
|
||||
b.writeN(b.padbytes[0:], cellw-textw)
|
||||
}
|
||||
|
||||
var vbar = []byte{'|'}
|
||||
|
||||
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
for i := line0; i < line1; i++ {
|
||||
line := b.lines[i]
|
||||
|
||||
// if TabIndent is set, use tabs to pad leading empty cells
|
||||
useTabs := b.flags&TabIndent != 0
|
||||
|
||||
for j, c := range line {
|
||||
if j > 0 && b.flags&Debug != 0 {
|
||||
// indicate column break
|
||||
b.write0(vbar)
|
||||
}
|
||||
|
||||
if c.size == 0 {
|
||||
// empty cell
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], useTabs)
|
||||
}
|
||||
} else {
|
||||
// non-empty cell
|
||||
useTabs = false
|
||||
if b.flags&AlignRight == 0 { // align left
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
} else { // align right
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i+1 == len(b.lines) {
|
||||
// last buffered line - we don't have a newline, so just write
|
||||
// any outstanding buffered data
|
||||
b.write0(b.buf[pos : pos+b.cell.size])
|
||||
pos += b.cell.size
|
||||
} else {
|
||||
// not the last line - write newline
|
||||
b.write0(newline)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Format the text between line0 and line1 (excluding line1); pos
|
||||
// is the buffer position corresponding to the beginning of line0.
|
||||
// Returns the buffer position corresponding to the beginning of
|
||||
// line1 and an error, if any.
|
||||
//
|
||||
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
column := len(b.widths)
|
||||
for this := line0; this < line1; this++ {
|
||||
line := b.lines[this]
|
||||
|
||||
if column >= len(line)-1 {
|
||||
continue
|
||||
}
|
||||
// cell exists in this column => this line
|
||||
// has more cells than the previous line
|
||||
// (the last cell per line is ignored because cells are
|
||||
// tab-terminated; the last cell per line describes the
|
||||
// text before the newline/formfeed and does not belong
|
||||
// to a column)
|
||||
|
||||
// print unprinted lines until beginning of block
|
||||
pos = b.writeLines(pos, line0, this)
|
||||
line0 = this
|
||||
|
||||
// column block begin
|
||||
width := b.minwidth // minimal column width
|
||||
discardable := true // true if all cells in this column are empty and "soft"
|
||||
for ; this < line1; this++ {
|
||||
line = b.lines[this]
|
||||
if column >= len(line)-1 {
|
||||
break
|
||||
}
|
||||
// cell exists in this column
|
||||
c := line[column]
|
||||
// update width
|
||||
if w := c.width + b.padding; w > width {
|
||||
width = w
|
||||
}
|
||||
// update discardable
|
||||
if c.width > 0 || c.htab {
|
||||
discardable = false
|
||||
}
|
||||
}
|
||||
// column block end
|
||||
|
||||
// discard empty columns if necessary
|
||||
if discardable && b.flags&DiscardEmptyColumns != 0 {
|
||||
width = 0
|
||||
}
|
||||
|
||||
if b.flags&RememberWidths != 0 {
|
||||
if len(b.maxwidths) < len(b.widths) {
|
||||
b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(b.maxwidths) == len(b.widths):
|
||||
b.maxwidths = append(b.maxwidths, width)
|
||||
case b.maxwidths[len(b.widths)] > width:
|
||||
width = b.maxwidths[len(b.widths)]
|
||||
case b.maxwidths[len(b.widths)] < width:
|
||||
b.maxwidths[len(b.widths)] = width
|
||||
}
|
||||
}
|
||||
|
||||
// format and print all columns to the right of this column
|
||||
// (we know the widths of this column and all columns to the left)
|
||||
b.widths = append(b.widths, width) // push width
|
||||
pos = b.format(pos, line0, this)
|
||||
b.widths = b.widths[0 : len(b.widths)-1] // pop width
|
||||
line0 = this
|
||||
}
|
||||
|
||||
// print unprinted lines until end
|
||||
return b.writeLines(pos, line0, line1)
|
||||
}
|
||||
|
||||
// Append text to current cell.
|
||||
func (b *Writer) append(text []byte) {
|
||||
b.buf = append(b.buf, text...)
|
||||
b.cell.size += len(text)
|
||||
}
|
||||
|
||||
// Update the cell width.
|
||||
func (b *Writer) updateWidth() {
|
||||
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
|
||||
b.pos = len(b.buf)
|
||||
}
|
||||
|
||||
// To escape a text segment, bracket it with Escape characters.
|
||||
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
|
||||
// does not terminate a cell and constitutes a single character of
|
||||
// width one for formatting purposes.
|
||||
//
|
||||
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
|
||||
//
|
||||
const Escape = '\xff'
|
||||
|
||||
// Start escaped mode.
|
||||
func (b *Writer) startEscape(ch byte) {
|
||||
switch ch {
|
||||
case Escape:
|
||||
b.endChar = Escape
|
||||
case '<':
|
||||
b.endChar = '>'
|
||||
case '&':
|
||||
b.endChar = ';'
|
||||
}
|
||||
}
|
||||
|
||||
// Terminate escaped mode. If the escaped text was an HTML tag, its width
|
||||
// is assumed to be zero for formatting purposes; if it was an HTML entity,
|
||||
// its width is assumed to be one. In all other cases, the width is the
|
||||
// unicode width of the text.
|
||||
//
|
||||
func (b *Writer) endEscape() {
|
||||
switch b.endChar {
|
||||
case Escape:
|
||||
b.updateWidth()
|
||||
if b.flags&StripEscape == 0 {
|
||||
b.cell.width -= 2 // don't count the Escape chars
|
||||
}
|
||||
case '>': // tag of zero width
|
||||
case ';':
|
||||
b.cell.width++ // entity, count as one rune
|
||||
}
|
||||
b.pos = len(b.buf)
|
||||
b.endChar = 0
|
||||
}
|
||||
|
||||
// Terminate the current cell by adding it to the list of cells of the
|
||||
// current line. Returns the number of cells in that line.
|
||||
//
|
||||
func (b *Writer) terminateCell(htab bool) int {
|
||||
b.cell.htab = htab
|
||||
line := &b.lines[len(b.lines)-1]
|
||||
*line = append(*line, b.cell)
|
||||
b.cell = cell{}
|
||||
return len(*line)
|
||||
}
|
||||
|
||||
func handlePanic(err *error, op string) {
|
||||
if e := recover(); e != nil {
|
||||
if nerr, ok := e.(osError); ok {
|
||||
*err = nerr.err
|
||||
return
|
||||
}
|
||||
panic("tabwriter: panic during " + op)
|
||||
}
|
||||
}
|
||||
|
||||
// RememberedWidths returns a copy of the remembered per-column maximum widths.
|
||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
||||
func (b *Writer) RememberedWidths() []int {
|
||||
retval := make([]int, len(b.maxwidths))
|
||||
copy(retval, b.maxwidths)
|
||||
return retval
|
||||
}
|
||||
|
||||
// SetRememberedWidths sets the remembered per-column maximum widths.
|
||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
||||
func (b *Writer) SetRememberedWidths(widths []int) *Writer {
|
||||
b.maxwidths = make([]int, len(widths))
|
||||
copy(b.maxwidths, widths)
|
||||
return b
|
||||
}
|
||||
|
||||
// Flush should be called after the last call to Write to ensure
|
||||
// that any data buffered in the Writer is written to output. Any
|
||||
// incomplete escape sequence at the end is considered
|
||||
// complete for formatting purposes.
|
||||
func (b *Writer) Flush() error {
|
||||
return b.flush()
|
||||
}
|
||||
|
||||
func (b *Writer) flush() (err error) {
|
||||
defer b.reset() // even in the presence of errors
|
||||
defer handlePanic(&err, "Flush")
|
||||
|
||||
// add current cell if not empty
|
||||
if b.cell.size > 0 {
|
||||
if b.endChar != 0 {
|
||||
// inside escape - terminate it even if incomplete
|
||||
b.endEscape()
|
||||
}
|
||||
b.terminateCell(false)
|
||||
}
|
||||
|
||||
// format contents of buffer
|
||||
b.format(0, 0, len(b.lines))
|
||||
return nil
|
||||
}
|
||||
|
||||
var hbar = []byte("---\n")
|
||||
|
||||
// Write writes buf to the writer b.
|
||||
// The only errors returned are ones encountered
|
||||
// while writing to the underlying output stream.
|
||||
//
|
||||
func (b *Writer) Write(buf []byte) (n int, err error) {
|
||||
defer handlePanic(&err, "Write")
|
||||
|
||||
// split text into cells
|
||||
n = 0
|
||||
for i, ch := range buf {
|
||||
if b.endChar == 0 {
|
||||
// outside escape
|
||||
switch ch {
|
||||
case '\t', '\v', '\n', '\f':
|
||||
// end of cell
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i + 1 // ch consumed
|
||||
ncells := b.terminateCell(ch == '\t')
|
||||
if ch == '\n' || ch == '\f' {
|
||||
// terminate line
|
||||
b.addLine(ch == '\f')
|
||||
if ch == '\f' || ncells == 1 {
|
||||
// A '\f' always forces a flush. Otherwise, if the previous
|
||||
// line has only one cell which does not have an impact on
|
||||
// the formatting of the following lines (the last cell per
|
||||
// line is ignored by format()), thus we can flush the
|
||||
// Writer contents.
|
||||
if err = b.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
if ch == '\f' && b.flags&Debug != 0 {
|
||||
// indicate section break
|
||||
b.write0(hbar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case Escape:
|
||||
// start of escaped sequence
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
if b.flags&StripEscape != 0 {
|
||||
n++ // strip Escape
|
||||
}
|
||||
b.startEscape(Escape)
|
||||
|
||||
case '<', '&':
|
||||
// possibly an html tag/entity
|
||||
if b.flags&FilterHTML != 0 {
|
||||
// begin of tag/entity
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
b.startEscape(ch)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// inside escape
|
||||
if ch == b.endChar {
|
||||
// end of tag/entity
|
||||
j := i + 1
|
||||
if ch == Escape && b.flags&StripEscape != 0 {
|
||||
j = i // strip Escape
|
||||
}
|
||||
b.append(buf[n:j])
|
||||
n = i + 1 // ch consumed
|
||||
b.endEscape()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append leftover text
|
||||
b.append(buf[n:])
|
||||
n = len(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewWriter allocates and initializes a new tabwriter.Writer.
|
||||
// The parameters are the same as for the Init function.
|
||||
//
|
||||
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,39 @@
|
|||
# go-wordwrap
|
||||
|
||||
`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
|
||||
automatically wraps words into multiple lines. The primary use case for this
|
||||
is in formatting CLI output, but of course word wrapping is a generally useful
|
||||
thing to do.
|
||||
|
||||
## Installation and Usage
|
||||
|
||||
Install using `go get github.com/mitchellh/go-wordwrap`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/mitchellh/go-wordwrap
|
||||
|
||||
Below is an example of its usage ignoring errors:
|
||||
|
||||
```go
|
||||
wrapped := wordwrap.WrapString("foo bar baz", 3)
|
||||
fmt.Println(wrapped)
|
||||
```
|
||||
|
||||
Would output:
|
||||
|
||||
```
|
||||
foo
|
||||
bar
|
||||
baz
|
||||
```
|
||||
|
||||
## Word Wrap Algorithm
|
||||
|
||||
This library doesn't use any clever algorithm for word wrapping. The wrapping
|
||||
is actually very naive: whenever there is whitespace or an explicit linebreak.
|
||||
The goal of this library is for word wrapping CLI output, so the input is
|
||||
typically pretty well controlled human language. Because of this, the naive
|
||||
approach typically works just fine.
|
||||
|
||||
In the future, we'd like to make the algorithm more advanced. We would do
|
||||
so without breaking the API.
|
|
@ -0,0 +1 @@
|
|||
module github.com/mitchellh/go-wordwrap
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue