upgrade to latest dependencies (#1558)

bumping k8s.io/code-generator 25a36f5...7b37ed0:
  > 7b37ed0 Merge pull request # 106960 from cpanato/update-xnet-122t
  > d078fd8 dependencies: Update golang.org/x/net to v0.0.0-20211209124913-491a49abca63
  > 5747c7b Merge pull request # 106250 from ulucinar/bump-k-openapi-1.22
  > 2e52dc3 Manual cherry pick of kube-openapi changes for release-1.22
  > 22f764f Merge pull request # 104310 from liggitt/automated-cherry-pick-of-# 104279-upstream-release-1.22
  > c6452dc Merge pull request # 104469 from liggitt/utils-1-22
  > c2733a4 Copy golang license to staging copies
  > f338d05 vendor: bump k8s.io/util to get fix for LRU cache
  > ff0bf9f Automated cherry pick of # 104014: Update golang.org/x/time/rate (# 104018)
  > 9fd003d update vendor after switch
  > b1b4942 Merge pull request # 101624 from tilt-dev/nicks/go-to-protobuf
  > d874928 Merge pull request # 103176 from CaoDonghui123/updatemod
  > c56a6a7 go-to-protobuf: small fixes to improve debuggability
  > 43f2a02 Merge pull request # 103318 from jpbetz/fix-102749
  > 563f0ae Update golang.org/x/net to v0.0.0-20210520170846-37e1c6afe023
  > 3fe66c8 Bump SMD to v4.1.2 to pick up # 102749 fix
  > 53a4e85 Merge pull request # 103026 from sanposhiho/fix/typo-on-ExtractHoge
  > 94f0451 Merge pull request # 95472 from ahmedtd/lru-remove-mpl
  > b76aedd Fix: typo on ExtractHoge
  > 600a804 Remove MPL-licensed dep from lruexpirecache
  > 723f918 Merge pull request # 103010 from BenTheElder/ansi-386-overflow
  > d624dfc update Azure/go-ansiterm to v0.0.0-20210617225240-d185dfc1b5a1
  > 8d9ac5e Merge pull request # 102783 from mcbenjemaa/update-dep-gnostic
  > 12c648b update internal modules
  > b3a1c69 update vendor
  > b599f86 update internal modules
  > 964fb60 Correct comment block from openapi_v2.NewDocument to openapi_v2.ParseDocument
  > faf39f8 Update github.com/googleapis/gnostic to v0.5.5 and updating transitive dependencies go-cmp, protobuf,, to adapt the latest gnostic release which cosists of [Update protos for Go protoc plugin compatibility]
  > 0ea71da Merge pull request # 102441 from feiskyer/update-vendor
  > dd653c5 Update Azure Go SDK to v55.0.0
  > f65da15 Merge pull request # 102897 from liggitt/etcd-ga
  > 22f08a3 Update to etcd v3.5.0 client
  > 93504ea Merge pull request # 100488 from liggitt/protobuf
  > de39fc1 Update protobuf,grpc,etcd dependencies
  > e5ccfa3 sync: remove Godeps/
  > af0b5d9 Merge pull request # 102454 from wzshiming/clean/remove-godeps
  > ed0f8d0 Merge pull request # 102467 from pacoxu/json-patch-5.5.0
  > 7639b06 Remove Godeps
  > daefbed Merge pull request # 102059 from jsafrane/fix-consistentread
  > 783e1ca upgrade gopkg.in/evanphx/json-patch to v4.11.0
  > 9c11f92 Merge pull request # 102409 from dims/bump-golang.org/x/text-to-v0.3.6
  > 0b0b341 Bump k8s.io/utils
  > 45c8999 Bump golang.org/x/text to v0.3.6
  > 2f22fda Merge pull request # 102332 from pacoxu/klog-2.9.0
  > 3e7c599 upgrade klog to v2.9.0
  > 0213807 Merge pull request # 102147 from kolyshkin/update-runc-rc94-take-II
  > d76ae01 vendor: bump runc to rc95
  > deed336 Merge pull request # 102111 from dims/update-testing-related-deps-for-1.22
  > 632c95c update testing related dependencies
  > 994995d Merge pull request # 102094 from liggitt/revert-runc
  > 61abebe Revert "Merge pull request 101888 from kolyshkin/update-runc-rc94"
  > 66f43cf Merge pull request # 101888 from kolyshkin/update-runc-rc94
  > bb6578c vendor: bump runc to rc94
  > f57bba4 Merge pull request # 100940 from markusthoemmes/fake-client-interface
  > b9c3fe9 Merge pull request # 101320 from amanchourasiya/bugfix/101309
  > 4b75d82 Implement a FakeClient interface
  > 3fae618 Merge pull request # 100496 from jpbetz/extract-subresources
  > 2697f01 Removing comment to improve generated docs.
  > 112073b Merge pull request # 101357 from feiskyer/update-vendor
  > 9586e06 Add subresource support to apply extract
  > de8ea98 Update Azure Go SDK version to v53.1.0
  > cbf249e Merge pull request # 101234 from gautierdelorme/rm-go-openapi-spec
  > 678d1fb remove go-openapi/spec
  > 6b304b4 bump k8s.io/kube-openapi
  > dd2ded1 Merge pull request # 100490 from howardjohn/gnostic-v051
  > 1f18445 Update kube-openapi and gnostic dependencies
  > 70ccd82 Merge pull request # 100970 from apelisse/add-subresource-managedfields
  > 597a684 Generated code
  > bdc2396 Merge pull request # 100784 from kevindelgado/smd-to-4-1-1
  > e4dc439 Merge pull request # 100684 from Jefftree/atomic
  > c22c694 Update structured-merge-diff to v4.1.1
  > 0557bb8 Merge pull request # 100671 from Niekvdplas/spelling-mistakes
  > 275d6b8 Generate openapi and proto files
  > 3112572 Merge pull request # 99849 from jpbetz/apply-subresources
  > c74497a Fixed several spelling mistakes
  > 72793d0 Generate ApplyScale client support
  > cbbce91 Add apply subresource support to client-go's typed client
  > 5bc604e Merge pull request # 100156 from ehashman/issue-100155
  > 092920b Bump klog to 2.8.0, fixing nil panics in KObj
bumping knative.dev/eventing d829745...eb4c06c:
  > eb4c06c Add CloudEvent SQL validation (# 6032)
  > eb62db9 Fix broken links (# 6030)
  > 7bf054f Add k8s trace attribute to PingSource (# 5928)
  > b8d1cfb rename pingsource component (# 6003)
  > 1ae8a49 Fixing Broker DLQ flaky tests (# 6006)
  > 4348c29 upgrade to latest dependencies (# 6031)
  > 5e3183b Remove useless logconfig package (# 6015)
  > 06ba3ea Remove skipped Trigger no Broker test (# 6014)
  > 1ba7810 Bump otel/trace dependency. (# 6021)
  > e71f0b0 Update actions (# 6016)
bumping knative.dev/pkg 7927179...96f1852:
  > 96f1852 Update actions (# 2382)
  > 3f86f59 Update community files (# 2381)
  > 52e42b7 Extend Apply verb to extensions, Bump to K8s 1.22. (# 2327)
bumping k8s.io/api 3fcac9f...9d2e771:
  > 9d2e771 Update dependencies to v0.22.5 tag
  > d5ade40 Merge pull request # 106960 from cpanato/update-xnet-122t
  > ad37243 dependencies: Update golang.org/x/net to v0.0.0-20211209124913-491a49abca63
  > 79120fd Merge pull request # 106250 from ulucinar/bump-k-openapi-1.22
  > 2792020 Manual cherry pick of kube-openapi changes for release-1.22
  > 68328c1 Revert "tests for statefulset PersistentVolumeClaimDeletePolicy api change"
  > 99803f6 Revert "statefulset PersistentVolumeClaimDeletePolicy api change"
  > 4bdcbc6 Merge pull request # 103168 from raisaat/beta-2238
  > 4aee549 Update API description for probe.terminationGracePeriodSeconds
  > cd49d27 Merge pull request # 103245 from wzshiming/fix/prober-termination
  > af0a862 Merge pull request # 99023 from verb/1.21-securitycontext
  > 8479309 Regenerate
  > 0ff29d3 Merge pull request # 98817 from alculquicondor/job-completion-api
  > f608fb0 Generated code for securityContext in EphemeralContainers
  > ae00b16 Update API documents
  > 1e1dad4 Merge pull request # 103467 from thockin/svc-alloc-lb-nodeports-bug
  > 1cee8b1 Add Job.status.uncountedPodUIDs
  > 1a4d863 Allow securityContext in EphemeralContainers
  > 8ad172a Merge pull request # 102966 from SergeyKanzhelev/deprecateDynamicKubeletConfig
  > 9aa829f Fix small bug with AllocateLoadBalancerNodePorts
  > c712065 Merge pull request # 103276 from NetApp/data-source-ref
  > bb601f1 deprecate and disable by default DynamicKubeletConfig feature flag
  > 2c49f10 Merge pull request # 99961 from margocrawf/master
  > 3bec203 Add DataSourceRef field to PVC spec
  > 9f69fea Merge pull request # 101296 from Miciah/fix-RollingUpdateDaemonSet-godoc-regarding-rounding
  > fb2c6db This introduces an Impersonate-Uid header to server side code.
  > 49e8721 Merge pull request # 99494 from enj/enj/i/not_after_ttl_hint
  > a05eb2d Fix RollingUpdateDaemonSet godoc regarding rounding
  > f75dde5 Merge pull request # 103176 from CaoDonghui123/updatemod
  > 03dbe2b Generated
  > e14d3f9 Merge pull request # 103318 from jpbetz/fix-102749
  > bfaee58 Update golang.org/x/net to v0.0.0-20210520170846-37e1c6afe023
  > 2475115 csr: add expirationSeconds field to control cert lifetime
  > 0c9a900 Merge pull request # 99378 from mattcary/api
  > ca00f16 Bump SMD to v4.1.2 to pick up # 102749 fix
  > b71fde3 Merge pull request # 102028 from chrishenzie/read-write-once-pod-access-mode
  > af1946e tests for statefulset PersistentVolumeClaimDeletePolicy api change
  > bbf61a2 ReadWriteOncePod PV access mode and feature gate
  > 3f39120 statefulset PersistentVolumeClaimDeletePolicy api change
  > c112aae Merge pull request # 103153 from josephburnett/v2beta2
  > 2cc5916 Merge pull request # 103003 from sschne/bugfix/fix-required-pathtype
  > 8eb76d0 Move HPA v2beta2 deprecation to 1.23.
  > 6db9ac8 Merge pull request # 103001 from zshihang/csi
  > 55a4932 Remove omitempty from PathType
  > 8de3d46 Merge pull request # 103190 from robscott/remove-app-protocol-gate
  > 6089ecf CSIServiceAccountToken ga
  > a5544ab Removing ServiceAppProtocol feature gate
  > 07d5b5c Merge pull request # 103161 from MikeSpreitzer/comment-apf-types
  > 52573b6 Merge pull request # 95472 from ahmedtd/lru-remove-mpl
  > 9efbb56 Add missing comments in APF API types
  > d4897f2 Merge pull request # 102022 from adtac/sbeta
  > 31da821 Remove MPL-licensed dep from lruexpirecache
  > 0f1d05d Merge pull request # 103010 from BenTheElder/ansi-386-overflow
  > 5a15c1a graduate SuspendJob to beta
  > a8267d5 Merge pull request # 95768 from danwinship/document-nodeaddresstype
  > 1473f93 update Azure/go-ansiterm to v0.0.0-20210617225240-d185dfc1b5a1
  > 8e345e4 Merge pull request # 102783 from mcbenjemaa/update-dep-gnostic
  > 475ec09 Document the NodeAddressType values
  > 58a75e7 update vendor
  > c707e2b Update github.com/googleapis/gnostic to v0.5.5 and updating transitive dependencies go-cmp, protobuf,, to adapt the latest gnostic release which cosists of [Update protos for Go protoc plugin compatibility]
  > 508b641 Merge pull request # 102897 from liggitt/etcd-ga
  > bc7ac6c Update to etcd v3.5.0 client
  > b5b150a Merge pull request # 100488 from liggitt/protobuf
  > d397d2a Merge pull request # 102834 from rikatz/endport-beta
  > 3410dfc Update protobuf,grpc,etcd dependencies
  > 672fa45 Update generated files for endPort promotion to Beta
  > 0f7caf4 Promote endPort field in netpolicies to beta
  > bd09c62 sync: remove Godeps/
  > 443865c Merge pull request # 102454 from wzshiming/clean/remove-godeps
  > 9f22d12 Merge pull request # 102412 from andrewsykim/kill-service-topology
  > 140e77f Remove Godeps
  > 597d8a9 Merge pull request # 102467 from pacoxu/json-patch-5.5.0
  > d79bfb4 api: update API compatibility tests to remove topologyKeys from Service
  > 5ca0554 upgrade gopkg.in/evanphx/json-patch to v4.11.0
  > c7678e6 core/v1: add unit tests to ensure deprecated protobuf field numbers are not re-used
  > 0b55fc9 Merge pull request # 102409 from dims/bump-golang.org/x/text-to-v0.3.6
  > 637d634 apis: update generated code after removing Service topologyKeys
  > 23dfef7 Merge pull request # 102223 from lunhuijie/run-test6
  > cb0ea78 Bump golang.org/x/text to v0.3.6
  > 952c10d apis: remove Service topologyKeys
  > b8a1b1f Merge pull request # 102336 from SataQiu/remove-podpreset
  > a1e83ec api link is missing
  > b0d9a0e Merge pull request # 100842 from ravisantoshgudimetla/add-minReadySeconds-ss
  > 98ffe9c cleanup PodPreset testdata
  > c1965f3 Merge pull request # 102332 from pacoxu/klog-2.9.0
  > 6c59389 testdata: StatefulSet
  > 60e1beb upgrade klog to v2.9.0
  > b51bb84 generated: Changes for api introduced
  > a16591c Merge pull request # 101742 from ravisantoshgudimetla/promote-maxSurge-beta
  > cb82702 api: Introduce minReadySeconds,AvailableReplicas in Statefulset
  > 191e7be Merge pull request # 102147 from kolyshkin/update-runc-rc94-take-II
  > f1ffee0 Promote DS MaxSurge to beta
  > da19d3a Merge pull request # 99576 from marosset/windows-host-process-work
  > 55a1051 vendor: bump runc to rc95
  > ad700d0 Merge pull request # 102159 from roycaihw/mark-containerimage-name-optional
  > 7c10cae  API support for Windows host process containers
  > 0fb680d Merge pull request # 101099 from pacoxu/podpreset-clean
  > 419f976 generated
  > 532146f Merge pull request # 102030 from aojea/ingress_beta
  > 59e252c remove pod preset testdata and args
  > 7673089 mark ContainerImage.names as optional
  > d5fda55 Merge pull request # 102111 from dims/update-testing-related-deps-for-1.22
  > acf8681 bump e2e test to ingress v1
  > 6500f76 Merge pull request # 101916 from MartinKanters/101137-document-double-dollar-behavior
  > c881484 update testing related dependencies
  > c2d30ef Merge pull request # 102034 from AbdulBasitAlvi/bug/add_description_api_apps_v1_types
  > c961028 Generated doc files
  > bb4775a Merge pull request # 102094 from liggitt/revert-runc
  > 67fc4d0 bug(staging_api_apps_v1): add descriptions to apps/v1/types.go files
  > c2884aa Processed review suggestions
  > 1d8425e Revert "Merge pull request 101888 from kolyshkin/update-runc-rc94"
  > 35f2b3a Generated doc files
  > 8d7383b Merge pull request # 102066 from alculquicondor/api_rev
  > e56fa49 Corrected the $$ documentation in staging/src as well
  > c10867d Merge pull request # 101888 from kolyshkin/update-runc-rc94
  > fc270cb Use aliases in sig apps and scheduling APIs OWNERS
  > 8672a59 Merge pull request # 101496 from ahg-g/ahg-nss-beta
  > dafeb4b vendor: bump runc to rc94
  > 98c0df6 Merge pull request # 101627 from rikatz/fix-ingress-doc
  > 367ad6c graduate pod affinity NamespaceSelector to Beta
  > 929601b Merge pull request # 100008 from wangyysde/issue99675
  > 2312ce4 Change ingress api doc to reflect the correct path validation
  > 9295cf9 Merge pull request # 101618 from vincepri/fuzzer-secs-bug-negative
  > 713113e Add descriptions to api/extensions/v1beta1/types.go
  > 3af60a8 Merge pull request # 101243 from liggitt/api-1-21
  > 315950d Update generated fixture data with creation/deletion timestamps
  > 020341d Merge pull request # 99391 from zhuangqh/update-doc
  > c390b84 Update round-tripped protobuf output
  > 645b51b Merge pull request # 101292 from AliceZhang2016/job_controller_metrics
  > 104cee5 docs: fix outdated enhancement doc link
  > b8166a5 Drop 1.19.0 API compatibility data
  > b8e414e Merge pull request # 98028 from tkashem/apf-post-startup-fix
  > fa35e8d change default feature gate value of IndexedJob
  > 1e93921 Add 1.21.0 API compatibility data
  > 2fa452f Merge pull request # 90635 from desaintmartin/type-container-securitycontext
  > aaabe7b add auto update for apf bootstrap configuration
  > 8c6483e Merge pull request # 99842 from mowangdk/upgrade_struct_annotation
  > adeaab0 Container type: document that SecurityContext applies to Container instead of Pod.
  > bdba003 Add descriptions to apiextensions v1 & v1beta1 types.go
  > e066428 Add description to policy types.go
  > 0097618 Merge pull request # 99811 from lauchokyip/fixtype
  > 79f8f51 Added description to staging/src/k8s.io/api/apiserverinternal/v1alpha1/types.go
  > 13f2909 Merge pull request # 101704 from liggitt/api-testdata
  > 9757115 Merge pull request # 101687 from siddhartha97/add-feature
  > f1c1562 Update testdata generation command
  > 527f77d Added changes
  > 73cb810 Merge pull request # 100233 from umangachapagain/imagepolicy-desc
  > 335d3ee Merge pull request # 99760 from navist2020/fix_descriptons
  > 7172eb3 add missing description to api/imagepolicy/v1alpha1/types.go
  > 6753086 Merge pull request # 98740 from andrewmelis/arm/fix-container-security-context-docs
  > 73d7b2e Add descriptions to api/authorization/*/types.go
  > 010282a Merge pull request # 95286 from SergeyKanzhelev/removePodUnknown
  > 6f6e8ed Generate specs after documentation change
  > c8d93ac Merge pull request # 99519 from whydoubt/typos
  > c31108f podUnknown is marked as Obsolete
  > a6e3a6d Remove duplicate link in container/securityContext
  > 95cee48 Merge pull request # 101034 from verb/1.22-ec-api
  > 45a00c1 Correct a misspelling of 'secret'.
  > b64c6b5 Merge pull request # 101234 from gautierdelorme/rm-go-openapi-spec
  > ece0d38 Generated code for Pod-based ephemeralcontainers
  > 6b193da bump k8s.io/kube-openapi
  > 457d71c Switch ephemeralcontainers SR to Pod Kind
  > e993e00 Merge pull request # 100490 from howardjohn/gnostic-v051
  > a9b89d5 Update kube-openapi and gnostic dependencies
  > 648b778 Merge pull request # 100970 from apelisse/add-subresource-managedfields
  > 16be49d Generated code
  > 50dfb4f sync: update go.mod
  > 86cef11 Merge pull request # 100724 from liggitt/eviction-v1beta1
  > e55a4a1 Merge pull request # 101086 from enj/enj/i/auth_owners_gen
  > 96ffc3f Define constant for eviction failure cause
  > fada3d2 Merge pull request # 101080 from ahg-g/ahg-cost-beta
  > 6bb5a8a Prune stale entries from OWNERS files
  > 63023e5 Generated files
  > 59fddaa Graduate PodDeletionCost to Beta
  > 4d4999e Register Eviction v1
  > 424177a Merge pull request # 100885 from enj/enj/i/auth_owners
  > 6eed676 Merge pull request # 100784 from kevindelgado/smd-to-4-1-1
  > b23efeb Update auth OWNERS files to only use aliases
  > 9b64426 Merge pull request # 100730 from mikedanese/rbacdoc
  > 5ace415 Update structured-merge-diff to v4.1.1
  > c1cb3cc Merge pull request # 100728 from robscott/topology-auto
  > e2bec99 clarify RBAC API documentation
  > 0bd7673 Merge pull request # 100684 from Jefftree/atomic
  > 6140925 Updating Topology Aware Hints to support "Auto" value for annotation
  > c2af720 Merge pull request # 100245 from mengjiao-liu/Update-broken-links
  > 2634033 Generate openapi and proto files
  > e32abb1 Merge pull request # 99849 from jpbetz/apply-subresources
  > 5d132f2 Update `PodSpec/Overhead` broken links
  > ddfb9ba Tag certain non-selector structs as atomic
  > 645ffdb Merge pull request # 97989 from Danil-Grigorev/atomic-label-selectors
  > 64fc45e Add genclient:method=ApplyScale to types supporting scale update
  > 04a51e7 Add structType=atomic to all selectors
  > 2824ee6 Merge pull request # 93195 from huffmanca/update-fsgrouppolicy
  > f81f579 Make selectors atomic
  > 04650e4 Updated generated dependencies
  > 0c55033 Addresses nitpicks for FSGroupPolicy
  > 3a2d6b5 Merge pull request # 100472 from liggitt/endpoint-slice-v1beta1
  > e923586 Mark v1beta1 EndpointSlice deprecated in favor of v1
bumping k8s.io/apimachinery f916759...47f99e7:
  > 47f99e7 Merge pull request # 106960 from cpanato/update-xnet-122t
  > 9e51a4c dependencies: Update golang.org/x/net to v0.0.0-20211209124913-491a49abca63
  > e757da0 Merge pull request # 106250 from ulucinar/bump-k-openapi-1.22
  > 223bdbb Manual cherry pick of kube-openapi changes for release-1.22
  > 4a9e16b Merge pull request # 104310 from liggitt/automated-cherry-pick-of-# 104279-upstream-release-1.22
  > 65ee33b Copy golang license to staging copies
  > a644435 Merge pull request # 103457 from codearky/fix-yaml-terminator-wcomment
  > ff522ab Merge pull request # 98817 from alculquicondor/job-completion-api
  > f1aad3e add yaml separator validation and avoid silent ignoration
  > 0dafcb4 Merge pull request # 102188 from alculquicondor/fasterselector
  > 8b8079d Add Job.status.uncountedPodUIDs
  > 8303750 Merge pull request # 103176 from CaoDonghui123/updatemod
  > 51eb781 Improve slice allocation in LabelSelectorAsSelector
  > 3272933 Merge pull request # 103318 from jpbetz/fix-102749
  > 2510b16 Update golang.org/x/net to v0.0.0-20210520170846-37e1c6afe023
  > 0376ffb Add benchmark for LabelSelectorAsSelector
  > 8989bdf Bump SMD to v4.1.2 to pick up # 102749 fix
  > a4b8a5f Merge pull request # 95472 from ahmedtd/lru-remove-mpl
  > 1241855 Remove MPL-licensed dep from lruexpirecache
  > 4713ab5 Merge pull request # 103010 from BenTheElder/ansi-386-overflow
  > 41b021b update Azure/go-ansiterm to v0.0.0-20210617225240-d185dfc1b5a1
  > 76ce197 Merge pull request # 102783 from mcbenjemaa/update-dep-gnostic
  > 8146ded Merge pull request # 102629 from tiloso/staticcheck-cluster-apimachinery-apiserver
  > 1acaad5 update vendor
  > 30802a0 Merge pull request # 102924 from liggitt/race-test-speed
  > 2245ed8 Fix staticcheck in cluster & k8s.io/{apimachinery,apiserver}
  > 076d25b Update github.com/googleapis/gnostic to v0.5.5 and updating transitive dependencies go-cmp, protobuf,, to adapt the latest gnostic release which cosists of [Update protos for Go protoc plugin compatibility]
  > 513e50f Speed up unit tests in -race detection mode
  > 0dc8a67 Merge pull request # 102897 from liggitt/etcd-ga
  > 15dfbe8 Update to etcd v3.5.0 client
  > b2555c6 Merge pull request # 100488 from liggitt/protobuf
  > f4becb8 Update protobuf,grpc,etcd dependencies
  > a3b8985 sync: remove Godeps/
  > 94d2465 Merge pull request # 102454 from wzshiming/clean/remove-godeps
  > aec8116 Merge pull request # 102467 from pacoxu/json-patch-5.5.0
  > 9a1154c Remove Godeps
  > a5103de Merge pull request # 102489 from saschagrunert/http-stream-nil
  > 68243ab add ut for preventing dropping null from arrays
  > 1e82f3b Merge pull request # 102409 from dims/bump-golang.org/x/text-to-v0.3.6
  > d97ed6c Fix regression for timed-out stream cleanups
  > 9f25815 upgrade gopkg.in/evanphx/json-patch to v4.11.0
  > 4cf81ce Bump golang.org/x/text to v0.3.6
  > 44113be Merge pull request # 102332 from pacoxu/klog-2.9.0
  > 2a3fbac upgrade klog to v2.9.0
  > 1f89c78 Merge pull request # 100394 from mengjiao-liu/clean-up-redundant-code
  > 2dd32d7 Merge pull request # 102150 from julianvmodesto/ssa-big-last-applied-followup
  > c1766ec Fix use nil err
  > 4e7cc58 Make a public ValidateAnnotationsSize
  > 6df4bfb Make validation totalAnnotationSizeLimitB public.
  > cfc896c Merge pull request # 99371 from tiloso/staticcheck-apimachinery-util
  > adc48d2 Merge pull request # 102147 from kolyshkin/update-runc-rc94-take-II
  > ecda63f Fix staticcheck in k8s.io/apimachinery/pkg/util
  > 95c5729 vendor: bump runc to rc95
  > c7322e8 Merge pull request # 102111 from dims/update-testing-related-deps-for-1.22
  > 2214a11 update testing related dependencies
  > 2540c63 Merge pull request # 102094 from liggitt/revert-runc
  > 9765e76 Revert "Merge pull request 101888 from kolyshkin/update-runc-rc94"
  > e1b4d3f Merge pull request # 101888 from kolyshkin/update-runc-rc94
  > 4c2cee4 Merge pull request # 101618 from vincepri/fuzzer-secs-bug-negative
  > 2a34e07 vendor: bump runc to rc94
  > 96c076b Merge pull request # 101668 from tkashem/wait-poll-with-context
  > 4112135 Timestamp fuzzer in metav1 should not use negative values
  > fda7135 Merge pull request # 101626 from MadhavJivrajani/master
  > aaed044 apimachinery: add context bound polling
  > 83e6b5f Merge pull request # 101320 from amanchourasiya/bugfix/101309
  > 60281a0 Add validation for names like '-'
  > b3737e4 Merge pull request # 101590 from smarterclayton/quantity
  > 22894de Removing comment to improve generated docs.
  > 8c18d83 quantity: Allow a new quantity to be created directly from inf.Dec
  > 05188ba Merge pull request # 101092 from shawnhanx/limited
  > 83ce8f0 Merge pull request # 100496 from jpbetz/extract-subresources
  > 8cefc5b fix limited4 -> limited
  > ec059b1 Merge pull request # 101361 from njuptlzf/label-test
  > e522d21 Enable extract test for status subresource
  > ba18a62 Merge pull request # 101306 from joelsmith/master
  > b4b4027 parseOperator description is inconsistent with the behavior
  > a5c5d6e Add subresource support to apply extract
  > f3a344a Merge pull request # 101234 from gautierdelorme/rm-go-openapi-spec
  > 2449c2e Additional CVE-2021-3121 fix
  > b78e067 bump k8s.io/kube-openapi
  > 37b6f01 Merge pull request # 100490 from howardjohn/gnostic-v051
  > 87974a2 Update kube-openapi and gnostic dependencies
  > 8daf289 Merge pull request # 100970 from apelisse/add-subresource-managedfields
  > 6ee6d74 Generated code
  > 5c420e5 Add "subresource" field to ManagedFieldEntry
  > 1ba67c1 Merge pull request # 101086 from enj/enj/i/auth_owners_gen
  > f2987aa Merge pull request # 101000 from lojies/ominilcheckforlen
  > 8c00220 Prune stale entries from OWNERS files
  > d23dd75 Merge pull request # 100430 from mozillazg/fix-staticcheck-failed-apimachinery-serializer
  > 2567f57 code cleanup: Omit redundant nil check on slices
  > d8cdd62 Merge pull request # 100784 from kevindelgado/smd-to-4-1-1
  > c4ed323 Fix staticcheck failures for vendor/k8s.io/apimachinery/pkg/runtime
  > 77ef728 Merge pull request # 100684 from Jefftree/atomic
  > 02a91c3 Update structured-merge-diff to v4.1.1
  > 74cf9d3 Merge pull request # 99839 from saschagrunert/portforward-stream-cleanup
  > ae7302c Generate openapi and proto files
  > 24633e4 Cleanup portforward streams after their usage
  > 22a1f0c Tag certain non-selector structs as atomic
  > 57f2a07 Merge pull request # 100156 from ehashman/issue-100155
  > 567c401 Bump klog to 2.8.0, fixing nil panics in KObj
bumping knative.dev/networking 62388a5...0dbedcd:
  > 0dbedcd upgrade to latest dependencies (# 598)
  > e40187c Update actions (# 597)
bumping k8s.io/apiextensions-apiserver 73536ac...811461e:
  > 811461e Update dependencies to v0.22.5 tag
  > 7b79c0c Merge pull request # 106960 from cpanato/update-xnet-122t
  > d3b1314 dependencies: Update golang.org/x/net to v0.0.0-20211209124913-491a49abca63
  > 6b25b67 Merge pull request # 106250 from ulucinar/bump-k-openapi-1.22
  > ddac85f Manual cherry pick of kube-openapi changes for release-1.22
  > 41e7589 Merge pull request # 104988 from liggitt/automated-cherry-pick-of-# 104969-upstream-release-1.22
  > c2005c2 Fix null JSON round tripping
  > 068ccef Propagate conversion errors
  > 9ea1b11 Merge pull request # 104469 from liggitt/utils-1-22
  > 0ddd93a vendor: bump k8s.io/util to get fix for LRU cache
  > d37e63d Automated cherry pick of # 104014: Update golang.org/x/time/rate (# 104018)
  > 0de7bca Merge pull request # 103930 from cheftako/automated-cherry-pick-of-# 103895-upstream-release-1.22
  > 9829d39 Update to using apiserver-network-proxy v1.22
  > 34f76b8 Merge pull request # 103882 from rphillips/automated-cherry-pick-of-# 103743-upstream-release-1.22
  > ad3dac1 vendor: bump runc to 1.0.1
  > 1eec384 add tracing to webhook requests
  > a9bb2e8 Merge pull request # 103548 from dims/drop-hashicorp-lru
  > 176547e update vendor after switch
  > 179ee05 update to new k8s.io/utils
  > 71ce3e0 Merge pull request # 103483 from odinuge/revert-102508-runc-1.0
  > 414b34f Revert "Update runc to 1.0.0"
  > d62f651 Merge pull request # 103176 from CaoDonghui123/updatemod
  > 7d1f6ff Merge pull request # 102508 from kolyshkin/runc-1.0
  > 3181a7b Update golang.org/x/net to v0.0.0-20210520170846-37e1c6afe023
  > bc42870 vendor: bump runc to 1.0.0 pre
  > be5c205 Merge pull request # 103318 from jpbetz/fix-102749
  > 94119d1 Bump SMD to v4.1.2 to pick up # 102749 fix
  > 930e99e Merge pull request # 103248 from sttts/sttts-crd-converison-test
  > 305a8a6 Merge pull request # 103126 from jdnurme/konn-21-update
  > b5be0cb apiextension: fix typo and test case in conversion integration test
  > 26c07d5 Updated to use konnectivity client v0.0.21, and implemented placeholder context
  > 92e6c85 Merge pull request # 94942 from dashpole/apiserver_opentelemetry
  > 11bad3f Add distributed tracing to the apiserver using OpenTelemetry
  > ac26ae6 Merge pull request # 102883 from liggitt/etcd-embed
  > 991af11 Drop etcd tests dependency
  > 171c646 Merge pull request # 103010 from BenTheElder/ansi-386-overflow
  > a0952e2 update Azure/go-ansiterm to v0.0.0-20210617225240-d185dfc1b5a1
  > 2dc2006 Merge pull request # 102783 from mcbenjemaa/update-dep-gnostic
  > 959d369 Merge pull request # 102991 from soltysh/column_printer
  > 40ffbb8 update vendor
  > 71d1ac4 Extract columnPrinter interface for printing the values behind additional columns
  > c776c69 Update github.com/googleapis/gnostic to v0.5.5 and updating transitive dependencies go-cmp, protobuf,, to adapt the latest gnostic release which cosists of [Update protos for Go protoc plugin compatibility]
  > 5e236fd Merge pull request # 102441 from feiskyer/update-vendor
  > b2e967b Update Azure Go SDK to v55.0.0
  > 01cad9a Merge pull request # 102897 from liggitt/etcd-ga
  > 3a83b89 Update to etcd v3.5.0 client
  > 9a7a407 Merge pull request # 100488 from liggitt/protobuf
  > b965a68 Update protobuf,grpc,etcd dependencies
  > 88dd39f Switch to go.etcd.io/etcd/client/v3
  > 6cfb102 Merge pull request # 102825 from n4j/bug/KubectlWindowsCrash
  > 18f9397 Merge pull request # 102814 from cheftako/master
  > 2dd0fdd Fix - Winterm issue on Windows 8.1 in kubectl exec
  > 71d03d0 Upgraded konnectivity-client to v0.0.20
  > 22a9708 sync: remove Godeps/
  > 1347dd8 Merge pull request # 102454 from wzshiming/clean/remove-godeps
  > 0c442dd Merge pull request # 102467 from pacoxu/json-patch-5.5.0
  > 3b8718c Remove Godeps
  > 660a516 Merge pull request # 102059 from jsafrane/fix-consistentread
  > 18ef5f9 upgrade gopkg.in/evanphx/json-patch to v4.11.0
  > be90f88 Merge pull request # 102561 from dims/updating-to-prometheus/common-latest-version
  > d387794 Bump k8s.io/utils
  > 788f663 make lint-dependencies happy!
  > a2e697c Updating to prometheus/common v0.26.0
  > ad3d999 Merge pull request # 102409 from dims/bump-golang.org/x/text-to-v0.3.6
  > 3cb49d2 Bump golang.org/x/text to v0.3.6
  > af57388 Merge pull request # 102332 from pacoxu/klog-2.9.0
  > 9b03e52 upgrade klog to v2.9.0
  > 80449b3 Merge pull request # 102213 from cheftako/grpc-client
  > 1e7435a Upgrade konnectivity-client for GRPC connection fixes
  > eae9a86 Merge pull request # 102197 from liggitt/crd-lifecycle
  > 06b6765 Merge pull request # 102147 from kolyshkin/update-runc-rc94-take-II
  > 01da005 Propagate server version correctly to apiextensions-apiserver, stop serving v1beta1 CRDs
  > 638e19f Merge pull request # 100754 from liggitt/warning-ga
  > 8f8e3ec vendor: bump runc to rc95
  > 997a056 Merge pull request # 101688 from liggitt/field-warnings
  > a6f3df2 Graduate WarningHeader feature to GA
  > b3b45bb Merge pull request # 102111 from dims/update-testing-related-deps-for-1.22
  > a55c164 Add WarningsOnCreate,WarningsOnUpdate
  > e6c6e3a update testing related dependencies
  > a01e380 Merge pull request # 102094 from liggitt/revert-runc
  > d22e8e0 Revert "Merge pull request 101888 from kolyshkin/update-runc-rc94"
  > 5be0c59 Merge pull request # 101888 from kolyshkin/update-runc-rc94
  > 02ce3c5 vendor: bump runc to rc94
  > 6fcacda Merge pull request # 100940 from markusthoemmes/fake-client-interface
  > 16f070d Merge pull request # 99842 from mowangdk/upgrade_struct_annotation
  > 0174810 Implement a FakeClient interface
  > 520f4b4 Add descriptions to apiextensions v1 & v1beta1 types.go
  > b38c3ee Merge pull request # 101320 from amanchourasiya/bugfix/101309
  > c9fb52e Merge pull request # 101497 from deads2k/crd-integration
  > 9becbfa Removing comment to improve generated docs.
  > 03ebf8a split CRD schema test between migrated data and current
  > e61844c Merge pull request # 101357 from feiskyer/update-vendor
  > eee4af5 Update Azure Go SDK version to v53.1.0
  > 1f6c96c Merge pull request # 98377 from nodo/scale-ownership-tracking-deployment
  > a0717ff Add nil path to mapping when a CR has no "scale" subresource
  > 1446de4 Use ScaleHandler for all scalable resources
  > ebf5081 Merge pull request # 101234 from gautierdelorme/rm-go-openapi-spec
  > d8eb225 remove go-openapi/spec
  > 7a8698f bump k8s.io/kube-openapi
  > b0804e0 Merge pull request # 100490 from howardjohn/gnostic-v051
  > 31019b4 Update kube-openapi and gnostic dependencies
  > 7537a76 Merge pull request # 100970 from apelisse/add-subresource-managedfields
  > 049231a Generated code
  > ce2b330 Add "subresource" field to ManagedFieldEntry
  > 60a7544 sync: update go.mod
  > 1253a6f Merge pull request # 100739 from pacoxu/update-zap
  > 35de004 Merge pull request # 100784 from kevindelgado/smd-to-4-1-1
  > 389a19b update uber zap to 1.16.0 to fix a nil pointer exception
  > 015cd7a Merge pull request # 100684 from Jefftree/atomic
  > da156a8 Update structured-merge-diff to v4.1.1
  > c5eec4f Merge pull request # 100671 from Niekvdplas/spelling-mistakes
  > 895747a Generate openapi and proto files
  > 904189d Fixed several spelling mistakes
  > e546218 Merge pull request # 100566 from dekkagaijin/patch-1
  > f141ea0 Update image base to `gcr.io/distroless/base-debian10:latest`
bumping k8s.io/client-go a2642ab...a406c93:
  > a406c93 Update dependencies to v0.22.5 tag
  > ea750b1 Merge pull request # 106960 from cpanato/update-xnet-122t
  > 8e1bfd2 dependencies: Update golang.org/x/net to v0.0.0-20211209124913-491a49abca63
  > 362cd0f Merge pull request # 106250 from ulucinar/bump-k-openapi-1.22
  > 2064e72 Manual cherry pick of kube-openapi changes for release-1.22
  > 36ef169 Merge pull request # 104988 from liggitt/automated-cherry-pick-of-# 104969-upstream-release-1.22
  > 528ab4f Propagate conversion errors
  > 972da7b Merge pull request # 104310 from liggitt/automated-cherry-pick-of-# 104279-upstream-release-1.22
  > 2ac20a6 Merge pull request # 104690 from atiratree/automated-cherry-pick-of-# 104172-upstream-release-1.22
  > d35f07a Copy golang license to staging copies
  > 3499434 add a test for jsonpath template parsing to prevent regressions
  > 67fba22 revert "fix wrong output when using jsonpath"
  > 4d7bb68 Merge pull request # 104469 from liggitt/utils-1-22
  > e475734 vendor: bump k8s.io/util to get fix for LRU cache
  > 5629b66 Automated cherry pick of # 104014: Update golang.org/x/time/rate (# 104018)
  > 1e037e8 Revert "tests for statefulset PersistentVolumeClaimDeletePolicy api change"
  > d3b9758 Merge pull request # 103689 from enj/enj/t/exec_metrics
  > ef1d5d2 client-go exec: fix metrics related to plugin not found
  > 7a90b08 Merge pull request # 102928 from dprotaso/dynamic-client-backwards-compatible
  > f0bc45f Merge pull request # 98817 from alculquicondor/job-completion-api
  > c6c0ca0 Simplify use of the fake dynamic client
  > 69e00b0 Merge pull request # 103548 from dims/drop-hashicorp-lru
  > 3bb4101 Add Job.status.uncountedPodUIDs
  > 266e43a update to new k8s.io/utils
  > 8abac60 Merge pull request # 103487 from novahe/fix/fixture-data-race
  > b267864 Merge pull request # 103276 from NetApp/data-source-ref
  > b7e5fce client-go: copying object to fix data race (# 103148)
  > 4339f1b Merge pull request # 102181 from enj/enj/i/deprecate_gcp_azure
  > 0cb2597 Add DataSourceRef field to PVC spec
  > 68cb2dd Merge pull request # 102890 from ankeesler/exec-plugin-v1
  > e2b5311 Deprecate azure and gcp in-tree auth plugins
  > ca3a47f Merge pull request # 99494 from enj/enj/i/not_after_ttl_hint
  > f00874a exec credential provider: update tests+metadata for v1
  > 843bb80 Merge pull request # 103176 from CaoDonghui123/updatemod
  > e56c7dc Generated
  > dba1c9a promote client.authentication.k8s.io to v1
  > e9d2f61 Merge pull request # 103318 from jpbetz/fix-102749
  > 6851811 Update golang.org/x/net to v0.0.0-20210520170846-37e1c6afe023
  > 30cd4e9 csr: add expirationSeconds field to control cert lifetime
  > 45d1077 Merge pull request # 99378 from mattcary/api
  > 83ade40 Bump SMD to v4.1.2 to pick up # 102749 fix
  > e68c673 Merge pull request # 103026 from sanposhiho/fix/typo-on-ExtractHoge
  > dba85b7 tests for statefulset PersistentVolumeClaimDeletePolicy api change
  > dae61be Merge pull request # 95472 from ahmedtd/lru-remove-mpl
  > c374b40 Fix: typo with hack/update-codegen.sh
  > f54e143 Merge pull request # 102540 from jpbetz/apply-client-docs
  > 65b7d26 Remove MPL-licensed dep from lruexpirecache
  > 5a30221 Merge pull request # 990 from nikhita/revert-direct-pr
  > 50ffc02 Split documentation according to both mechanisms available for migration
  > e7ebdbc Merge pull request # 982 from sprutner/patch-1
  > 18b053f Revert "Merge pull request # 982 from sprutner/patch-1"
  > acf46ab Apply suggestions from code review
  > 3fae6f0 Merge pull request # 103010 from BenTheElder/ansi-386-overflow
  > d5f964d Update README.md
  > 9ffcd16 Add doc.go for client-go apply support
  > eadbc45 update Azure/go-ansiterm to v0.0.0-20210617225240-d185dfc1b5a1
  > 25e061e sync: remove Godeps/
  > ca46d47 Merge pull request # 103007 from smira/error-wrapping
  > 3ecbde3 Merge pull request # 102454 from wzshiming/clean/remove-godeps
  > c8b4c1e fix: properly wrap errors when reading response body in the client
  > 27d803a Merge pull request # 102606 from tkashem/revert-102581
  > b7739d8 Remove Godeps
  > 848310b Merge pull request # 102783 from mcbenjemaa/update-dep-gnostic
  > ded678f Merge pull request # 102412 from andrewsykim/kill-service-topology
  > fc210d9 client-go: fix flake in test TestRequestWatchWithRetry
  > 0634aae Merge pull request # 102946 from ahmed-mez/patch-1
  > 68eb0a2 update vendor
  > 742abf7 Merge pull request # 100939 from wzshiming/feat/subresource-match
  > cc4f429 apis: update generated code after removing Service topologyKeys
  > bbd71da Revert "Merge pull request # 102581 from liggitt/revert-watch-retry"
  > eecff02 Fix ServerGroupsAndResources docs typo
  > 77d7443 Update github.com/googleapis/gnostic to v0.5.5 and updating transitive dependencies go-cmp, protobuf,, to adapt the latest gnostic release which cosists of [Update protos for Go protoc plugin compatibility]
  > 7801ce3 Merge pull request # 102581 from liggitt/revert-watch-retry
  > 8715480 Support subresource match
  > 0a7756a Merge pull request # 102441 from feiskyer/update-vendor
  > 71f81dc Merge pull request # 102467 from pacoxu/json-patch-5.5.0
  > 5ba99a7 Revert "client-go: add retry logic for Watch and Stream"
  > ce9fcb2 Add test
  > 5e73919 Update Azure Go SDK to v55.0.0
  > 2a923d5 Merge pull request # 102059 from jsafrane/fix-consistentread
  > 8e66c0a upgrade gopkg.in/evanphx/json-patch to v4.11.0
  > ef2d884 Merge pull request # 102897 from liggitt/etcd-ga
  > fd12ff3 Merge pull request # 102572 from JonZeolla/remove-insecure-curl-suggestion
  > 50a473d Bump k8s.io/utils
  > ead3c96 Update to etcd v3.5.0 client
  > a3215d7 Remove -k from toCurl output
  > 80617fc Merge pull request # 100488 from liggitt/protobuf
  > 09dbda0 Merge pull request # 102063 from tdihp/fix/azure-auth-refresh
  > d412730 Merge pull request # 99310 from ankeesler/exec-plugin-interactive
  > 0ba74ec Update protobuf,grpc,etcd dependencies
  > caa2dde Merge pull request # 102409 from dims/bump-golang.org/x/text-to-v0.3.6
  > 88929e8 Azure auth forwarding adal refresh error to tokenSource, fixes error when token refresh fails.
  > 37ed584 exec credential provider: InteractiveMode support
  > 56494c9 Merge pull request # 102107 from tkashem/client-go-retry
  > f2bdce0 Bump golang.org/x/text to v0.3.6
  > 1bccfc8 Merge pull request # 102788 from pohly/reflector-log-level
  > ccac415 Merge pull request # 100842 from ravisantoshgudimetla/add-minReadySeconds-ss
  > 55854fa client-go: add retry logic for Watch and Stream
  > 67de95c client-go: reduce log level of reflector again
  > 507a204 Merge pull request # 974 from dims/drop-outdated-broken-widgets
  > 906f6d9 generated: Changes for api introduced
  > fe090a5 Merge pull request # 102332 from pacoxu/klog-2.9.0
  > f5566ff Drop outdated/broken widgets in README
  > 07ab04d Merge pull request # 98077 from jayunit100/jay-netpol-win
  > 6b087d2 upgrade klog to v2.9.0
  > 39cb8cd Merge pull request # 102217 from tkashem/client-go-refactor-retry
  > 7a25584 Implement a windows Netpol NewModel
  > 228dada Merge pull request # 102241 from lunhuijie/optimizationReturn
  > 239ec44 client-go: refactor retry logic so it can be reused
  > dab51bb Add netpol tests for windows
  > 5b0c719 simplify returning boolean expression in staging/src/k8s.io/client-go/tools
  > 614c59d Merge pull request # 102175 from ankeesler/exec-plugin-with-basic-auth
  > 9edbd9b exec credential provider: don't run exec plugin with basic auth
  > d1fa200 Merge pull request # 102147 from kolyshkin/update-runc-rc94-take-II
  > dd8d430 Merge pull request # 99576 from marosset/windows-host-process-work
  > c7a0644 vendor: bump runc to rc95
  > 13cd919 Merge pull request # 102090 from mengjiao-liu/fix-data-race-csi
  > 5d46949  API support for Windows host process containers
  > b0c8a7c Merge pull request # 102111 from dims/update-testing-related-deps-for-1.22
  > 8cb6ac7 Prevent data race condition  in csi unit tests
  > 36ff79f update testing related dependencies
  > 03f765c Merge pull request # 102094 from liggitt/revert-runc
  > 940ff81 Revert "Merge pull request 101888 from kolyshkin/update-runc-rc94"
  > da28164 Merge pull request # 101888 from kolyshkin/update-runc-rc94
  > 253f58b Merge pull request # 101817 from cndoit18/add-events-expansion
  > 56ee358 vendor: bump runc to rc94
  > 39e54e7 Add fieldSelector builder function to events.
  > 3cca9d7 Merge pull request # 98057 from zerodayz/wrong-output-with-jsonpath
  > 9a82c6a Merge pull request # 100940 from markusthoemmes/fake-client-interface
  > b6d16d4 [jsonpath] fix wrong output when using jsonpath
  > 2ed8b30 Merge pull request # 101707 from enj/enj/i/bad_cadata
  > 3579fb3 Implement a FakeClient interface
  > 776e602 client-go transport: assert that final CA data is valid
  > db078d2 client-go: NewSelfSignedCACert makes Go 1.15+ compatible cert (# 100324)
  > 341f59a Merge pull request # 101535 from adjika/feature/client-go_documentation
  > 6db26dd Fixes formatting and typos in client-go docs
  > c850435 Merge pull request # 101252 from smarterclayton/certificate_logging_upstream
  > e1f818c Merge pull request # 93412 from thinpark/fix.vardoc
  > 3faf506 Add type logging to certificate manager
  > b401b11 Merge pull request # 101002 from GreenApple10/feature/mod_timer
  > a53ab6b [k8s.io/client-go/discovery/discovery_client]: CustomResourceDefinition
  > 47b3cad Merge pull request # 100496 from jpbetz/extract-subresources
  > ca138c8 Change time.Now().Sub(x) to time.Since(x) for cleanup
  > d3fbdeb Merge pull request # 101357 from feiskyer/update-vendor
  > 0fd603d Re-generate extract functions
  > 9c26abc Merge pull request # 97964 from sxllwx/avoid/alloc_array
  > bee3e6b Update Azure Go SDK version to v53.1.0
  > cfd0d8a Merge pull request # 94120 from Dean-Coakley/fix-listWatchUntil-comment
  > c7901fa modify the elements in the array directly without allocating a new array
  > 071918f Merge pull request # 101034 from verb/1.22-ec-api
  > 53da6da Remove outdated comment on wait.Until
  > d696ff0 Merge pull request # 101162 from ihcsim/fix-malformed-request-logs
  > 2d8ed52 Generated code for Pod-based ephemeralcontainers
  > ae85206 Merge pull request # 101234 from gautierdelorme/rm-go-openapi-spec
  > c870406 Fix the missing format specifier error in log output
  > 0e8029b Switch ephemeralcontainers SR to Pod Kind
  > d60baad bump k8s.io/kube-openapi
  > 4e3d560 Merge pull request # 101269 from 3Xpl0it3r/master
  > db0131b Merge pull request # 101174 from lojies/cleanupcertificate_manager_test
  > abc6b41 remove duplicatd description in comment
  > bfec291 Merge pull request # 100490 from howardjohn/gnostic-v051
  > b1c481c code cleanup:remove repeated package import in client-go
  > 1099742 Merge pull request # 100737 from Iceber/fix-delta-fifo
  > 4b5d87d Update kube-openapi and gnostic dependencies
  > 8c8fa70 Merge pull request # 100970 from apelisse/add-subresource-managedfields
  > 8269e41 client-go/cache: fix the AddIfNotPresent method of the DeltaFIFO
  > 6767d6d Generated code
  > f19995f sync: update go.mod
  > d974964 Merge pull request # 100768 from Iceber/fix-nested-key-error
  > 4e267f6 Merge pull request # 100724 from liggitt/eviction-v1beta1
  > fc03592 client-go/cache: support errors.Unwrap for KeyError
  > 0c029ff Merge pull request # 100503 from jpbetz/patch-3
  > 4c82a56 Generated files
  > f7d41dd Merge pull request # 101086 from enj/enj/i/auth_owners_gen
  > 0d358cd Add jpbetz to client-go reviewers
  > 36954f7 Register Eviction v1
  > 7998e98 Prune stale entries from OWNERS files
  > 44bedf8 Merge pull request # 100885 from enj/enj/i/auth_owners
  > 0cfeb7f Fix staticcheck failures for vendor/k8s.io/client-go/transport (# 100429)
  > 4f026b6 Update auth OWNERS files to only use aliases
  > 945ba71 Merge pull request # 100878 from wojtek-t/backoff_watches_on_429
  > 196e360 Merge pull request # 100873 from Iceber/events-cache-typo
  > 03ebb27 Handle 429 errors from watch requests in reflector library.
  > 0bb6464 Merge pull request # 100784 from kevindelgado/smd-to-4-1-1
  > d39ae5d fix typo in comment for EventAggregatorByReasonFunc
  > 980164c Merge pull request # 100684 from Jefftree/atomic
  > 54f0cda Update structured-merge-diff to v4.1.1
  > ebfab03 Merge pull request # 100570 from tiloso/staticcheck-clientgo-discovery
  > 9cd23e7 Generate openapi and proto files
  > eaeb6e7 Merge pull request # 100458 from yliaog/master
  > 4820b53 Fix staticcheck in k8s.io/client-go/discovery
  > 2f4b7a2 Merge pull request # 100355 from onesafe/master
  > 1e560af added yliaog as approver
  > af56ae8 Merge pull request # 100217 from 0daryo/fix-example-workqueue-option-readme
  > aa707b1 Replace deprecated NewDeltaFIFO with NewDeltaFIFOWithOptions
  > 064bf3d Merge pull request # 100085 from markusthoemmes/add-tracker-for-dynamic
  > 2de7d23 Remove non-existent log option
  > 4841142 Merge pull request # 99849 from jpbetz/apply-subresources
  > 57471de Add Tracker() function to fake dynamic client
  > 50af76e Merge pull request # 99839 from saschagrunert/portforward-stream-cleanup
  > c367767 Generate ApplyScale client support
  > ea39032 Merge pull request # 97989 from Danil-Grigorev/atomic-label-selectors
  > 0f6528b Cleanup portforward streams after their usage
  > dbebcea Add apply subresource support to client-go's typed client
  > c71768c Merge pull request # 97419 from d-kuro/d-kuro/fix-data-race
  > 11ceec1 Make selectors atomic
  > c36b96a Merge pull request # 95897 from roycaihw/doc/fake-client-with-informer
  > b4027a9 Fix data race for leaderelection package
  > 67a7335 Merge pull request # 100660 from dims/common-auth-plugins-should-always-be-available
  > 7dddf97 fix the fake client example: how to handle a race between the fake client and informer
  > 3da4c9c Common auth plugins should always be available
  > c8af929 Merge pull request # 100606 from dims/providerless-tag-for-client-go-auth-plugins
  > 6c816ad providerless tag for client-go auth plugins
  > f6ce18a Merge pull request # 100156 from ehashman/issue-100155
bumping knative.dev/serving c69f92c...0309174:
  > 0309174 upgrade to latest dependencies (# 12481)
  > 76bdf86 Fix broken links (# 12472)
  > 26b9a40 Move liveness checks to user container port (# 12479)
  > 7dba334 Update net-contour nightly (# 12478)
  > ef58256 Update net-kourier nightly (# 12477)
  > 15c37e7 upgrade to latest dependencies (# 12476)
  > 719ab3a Update net-gateway-api nightly (# 12451)
  > 476f65b upgrade to latest dependencies (# 12449)
  > 5aebd5d Update net-kourier nightly (# 12444)
  > e53f455 Update community files (# 12443)
  > 83e7063 Update net-contour nightly (# 12469)
  > 5accc70 Update net-istio nightly (# 12468)
  > 0e1d13e Update net-certmanager nightly (# 12467)
  > 278af32 Update actions (# 12454)

Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
knative-automation 2022-01-10 01:17:59 -08:00 committed by GitHub
parent dc4fa417bd
commit 99c69a9cc1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
455 changed files with 31095 additions and 3530 deletions

20
go.mod
View File

@ -6,7 +6,6 @@ require (
github.com/emicklei/go-restful v2.15.0+incompatible // indirect
github.com/go-openapi/spec v0.20.2 // indirect
github.com/google/go-cmp v0.5.6
github.com/googleapis/gnostic v0.5.3 // indirect
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/smartystreets/assertions v1.0.0 // indirect
@ -15,17 +14,16 @@ require (
github.com/spf13/viper v1.8.1
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d
gotest.tools/v3 v3.0.3
k8s.io/api v0.21.4
k8s.io/apiextensions-apiserver v0.21.4
k8s.io/apimachinery v0.21.4
k8s.io/api v0.22.5
k8s.io/apiextensions-apiserver v0.22.5
k8s.io/apimachinery v0.22.5
k8s.io/cli-runtime v0.21.4
k8s.io/client-go v0.21.4
k8s.io/code-generator v0.21.4
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 // indirect
knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455
k8s.io/client-go v0.22.5
k8s.io/code-generator v0.22.5
knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d
knative.dev/hack v0.0.0-20211222071919-abd085fc43de
knative.dev/networking v0.0.0-20211223013028-62388a5f2853
knative.dev/pkg v0.0.0-20211216142117-79271798f696
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7
knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf
knative.dev/pkg v0.0.0-20220105211333-96f18522d78d
knative.dev/serving v0.28.1-0.20220107170125-03091748d279
sigs.k8s.io/yaml v1.3.0
)

112
go.sum
View File

@ -57,14 +57,18 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
@ -76,6 +80,7 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@ -128,6 +133,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96 h1:2P/dm3KbCLnRHQN/Ma50elhMx1Si9loEZe5hOrsuvuE=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@ -136,6 +143,7 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -166,6 +174,8 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
@ -183,9 +193,11 @@ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc=
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.4.1/go.mod h1:lhEpxMrIUkeu9rVRgoAbyqZ8GR8Hd3DUy+thHUxAHoI=
github.com/cloudevents/sdk-go/sql/v2 v2.8.0 h1:gWednxJHL0Ycf93XeEFyQxYj81A7b4eNwkzjNxGunAM=
github.com/cloudevents/sdk-go/sql/v2 v2.8.0/go.mod h1:u9acNJbhmi1wnDJro4PEAqbr4N1LTCyEUClErxbPS1A=
github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY=
github.com/cloudevents/sdk-go/v2 v2.7.0 h1:Pt+cOKWNG0tZZKRzuvfVsxcWArO0eq/UPKUxskyuSb8=
github.com/cloudevents/sdk-go/v2 v2.7.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
github.com/cloudevents/sdk-go/v2 v2.8.0 h1:kmRaLbsafZmidZ0rZ6h7WOMqCkRMcVTLV5lxV/HKQ9Y=
github.com/cloudevents/sdk-go/v2 v2.8.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -195,6 +207,9 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@ -366,12 +381,15 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -379,6 +397,7 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
@ -451,6 +470,7 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
@ -525,8 +545,9 @@ github.com/gonum/mathext v0.0.0-20181121095525-8a4bf007ea55/go.mod h1:fmo8aiSEWk
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=
github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b/go.mod h1:Z4GIJBJO3Wa4gD4vbwQxXXZ+WHmW6E9ixmNrwvs0iZs=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -583,8 +604,9 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.3 h1:2qsuRm+bzgwSIKikigPASa2GhW8H2Dn4Qq7UxD8K/48=
github.com/googleapis/gnostic v0.5.3/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -604,6 +626,7 @@ github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -667,6 +690,7 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@ -758,6 +782,7 @@ github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -788,6 +813,7 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
@ -826,6 +852,7 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ=
github.com/openzipkin/zipkin-go v0.3.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ=
@ -934,6 +961,7 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
@ -991,6 +1019,7 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo=
github.com/tsenart/go-tsz v0.0.0-20180814235614-0bd30b3df1c3/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo=
github.com/tsenart/vegeta/v12 v12.8.4/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho=
@ -1039,10 +1068,15 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@ -1055,7 +1089,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
@ -1065,6 +1109,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@ -1197,8 +1242,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211205041911-012df41ee64c h1:7SfqwP5fxEtl/P02w5IhKc86ziJ+A25yFrkVgoy2FT8=
golang.org/x/net v0.0.0-20211205041911-012df41ee64c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1300,6 +1346,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1383,6 +1430,7 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -1505,6 +1553,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@ -1655,23 +1704,27 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc=
k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk=
k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU=
k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8=
k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk=
k8s.io/apiextensions-apiserver v0.22.5 h1:ML0QqT7FIlmZHN+9+2EtARJ3cJVHeoizt6GCteFRE0o=
k8s.io/apiextensions-apiserver v0.22.5/go.mod h1:tIXeZ0BrDxUb1PoAz+tgOz43Zi1Bp4BEEqVtUccMJbE=
k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw=
k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
k8s.io/apimachinery v0.22.5 h1:cIPwldOYm1Slq9VLBRPtEYpyhjIm1C6aAMAoENuvN9s=
k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g=
k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
k8s.io/cli-runtime v0.21.4 h1:kvOzx6dKg+9wRuHTzSqo8tfTV6ixZCkmi+ag54s7mn8=
k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
@ -1679,16 +1732,19 @@ k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs=
k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc=
k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew=
k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo=
k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
k8s.io/cloud-provider v0.21.0/go.mod h1:z17TQgu3JgUFjcgby8sj5X86YdVK5Pbt+jm/eYMZU9M=
k8s.io/code-generator v0.21.4 h1:vO8jVuEGV4UF+/2s/88Qg05MokE/1QUFi/Q2YDgz++A=
k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
k8s.io/code-generator v0.22.5 h1:jn+mYXI5q7rzo7Bz/n8xZIgbe61SeXlIjU5jA8jLVps=
k8s.io/code-generator v0.22.5/go.mod h1:sbdWCOVob+KaQ5O7xs8PNNaCTpbWVqNgA6EPwLOmRNk=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw=
k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg=
k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
k8s.io/controller-manager v0.21.0/go.mod h1:Ohy0GRNRKPVjB8C8G+dV+4aPn26m8HYUI6ejloUBvUA=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
@ -1706,34 +1762,36 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
knative.dev/caching v0.0.0-20211206133228-c29dc56d8f03/go.mod h1:xki+LBTL1riXSoU2dKznqUfgOlQ2eO/F1WF+GMXxH0k=
knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455 h1:LnatQYBFh/tum+ATTVZKB1xV5UxwvA2bhFZUGPSve6I=
knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455/go.mod h1:4o3oerr1tmjWTV2n33Ar9Ss+jF/QksOsa4/81ghhOVg=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
knative.dev/caching v0.0.0-20220105210833-30ba0b6609c6/go.mod h1:SgmDCiZNxriP5d1ie3cDZ+/3nXVk8cCd7dEVPWz0beo=
knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d h1:SWnDa8RmF1jti2oM+lOXgUmvI0D0ohCw677OsVIbqLw=
knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d/go.mod h1:rJnn9hsSYQ89SS31Sjxjzj5OVCjzCXtdUDaXfYEmnvQ=
knative.dev/hack v0.0.0-20211122162614-813559cefdda/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack v0.0.0-20211222071919-abd085fc43de h1:K7UeyvIfdTjznffAZg2L4fDkOuFWEDiaxgEa+B33nP8=
knative.dev/hack v0.0.0-20211222071919-abd085fc43de/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack/schema v0.0.0-20211222071919-abd085fc43de/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
knative.dev/networking v0.0.0-20211209101835-8ef631418fc0/go.mod h1:+ozCw7PVf//G9+HOW04hfWnU8UJE5fmWAQkb+ieMaXY=
knative.dev/networking v0.0.0-20211223013028-62388a5f2853 h1:VZ/yJoR/eiyI/wyo1JNEgpRFyRaqGD8paBYOH1gU/nQ=
knative.dev/networking v0.0.0-20211223013028-62388a5f2853/go.mod h1:NTfJpL2xQVdJtdPYuIE2j7rxC4/Cttplh1g0oYqQJFE=
knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf h1:1RqxCIJBwvpahPVNCfxEk4Z/z7nHgmBhPp7ba9A1My0=
knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf/go.mod h1:kXbsW1qHQcwHyd7qV1bHeYxGysR6XHh/hCkEvk28R/s=
knative.dev/pkg v0.0.0-20211206113427-18589ac7627e/go.mod h1:E6B4RTjZyxe55a0kxOlnEHEl71zuG7gghnqYvNBKwBw=
knative.dev/pkg v0.0.0-20211216142117-79271798f696 h1:L/r5prSBhm+7x4br5g8Gij/OfF4nx12sorqMXCcnpm0=
knative.dev/pkg v0.0.0-20211216142117-79271798f696/go.mod h1:hrD91/shO1o4KMZa4oWhnbRPmVJhvq86TLy/STF/qf8=
knative.dev/pkg v0.0.0-20220104185830-52e42b760b54/go.mod h1:189cvGP0mwpqwZGFrLk5WuERIsNI/J6HuQ1CIX7SXxY=
knative.dev/pkg v0.0.0-20220105211333-96f18522d78d h1:KqTqUP+w382CaI7NdIGaFLSI0qq2vo4QT93zzbsLnYY=
knative.dev/pkg v0.0.0-20220105211333-96f18522d78d/go.mod h1:189cvGP0mwpqwZGFrLk5WuERIsNI/J6HuQ1CIX7SXxY=
knative.dev/reconciler-test v0.0.0-20211222120418-816f2192fec9/go.mod h1:dCq1Fuu+eUISdnxABMvoDhefF91DYwE6O3rTYTraXbw=
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7 h1:vkv/sstZZtV9al/ZJ84l8TyWTLPGWZOpk7Ke9d6itBg=
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7/go.mod h1:1d8YYUu0hY19KlIRs2SgAn/o64Hr265+3fhOtV3FFVA=
knative.dev/serving v0.28.1-0.20220107170125-03091748d279 h1:oojha0ReZFGPreNwVRZUR25Uelc6jobehximut1C+yY=
knative.dev/serving v0.28.1-0.20220107170125-03091748d279/go.mod h1:BzDqCMZ1YCvv2cnGdOT0/JbvrQCiu1Vzd7YNDirgr2M=
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=

View File

@ -0,0 +1,26 @@
Copyright 2021 The ANTLR Project
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,26 @@
Copyright 2021 The ANTLR Project
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

152
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
var ATNInvalidAltNumber int
type ATN struct {
// DecisionToState is the decision points for all rules, subrules, optional
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
grammarType int
// lexerActions is referenced by action transitions in the ATN for lexer ATNs.
lexerActions []LexerAction
// maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
maxTokenType int
modeNameToStartState map[string]*TokensStartState
modeToStartState []*TokensStartState
// ruleToStartState maps from rule index to starting state number.
ruleToStartState []*RuleStartState
// ruleToStopState maps from rule index to stop state number.
ruleToStopState []*RuleStopState
// ruleToTokenType maps the rule index to the resulting token type for lexer
// ATNs. For parser ATNs, it maps the rule index to the generated bypass token
// type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
// specified, and otherwise is nil.
ruleToTokenType []int
states []ATNState
}
func NewATN(grammarType int, maxTokenType int) *ATN {
return &ATN{
grammarType: grammarType,
maxTokenType: maxTokenType,
modeNameToStartState: make(map[string]*TokensStartState),
}
}
// NextTokensInContext computes the set of valid tokens that can occur starting
// in state s. If ctx is nil, the set of tokens will not include what can follow
// the rule surrounding s. In other words, the set will be restricted to tokens
// reachable staying within the rule of s.
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
return NewLL1Analyzer(a).Look(s, nil, ctx)
}
// NextTokensNoContext computes the set of valid tokens that can occur starting
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
// rule.
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
if s.GetNextTokenWithinRule() != nil {
return s.GetNextTokenWithinRule()
}
s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
s.GetNextTokenWithinRule().readOnly = true
return s.GetNextTokenWithinRule()
}
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
if ctx == nil {
return a.NextTokensNoContext(s)
}
return a.NextTokensInContext(s, ctx)
}
func (a *ATN) addState(state ATNState) {
if state != nil {
state.SetATN(a)
state.SetStateNumber(len(a.states))
}
a.states = append(a.states, state)
}
func (a *ATN) removeState(state ATNState) {
a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
}
func (a *ATN) defineDecisionState(s DecisionState) int {
a.DecisionToState = append(a.DecisionToState, s)
s.setDecision(len(a.DecisionToState) - 1)
return s.getDecision()
}
func (a *ATN) getDecisionState(decision int) DecisionState {
if len(a.DecisionToState) == 0 {
return nil
}
return a.DecisionToState[decision]
}
// getExpectedTokens computes the set of input symbols which could follow ATN
// state number stateNumber in the specified full parse context ctx and returns
// the set of potentially valid input symbols which could follow the specified
// state in the specified context. This method considers the complete parser
// context, but does not evaluate semantic predicates (i.e. all predicates
// encountered during the calculation are assumed true). If a path in the ATN
// exists from the starting state to the RuleStopState of the outermost context
// without Matching any symbols, Token.EOF is added to the returned set.
//
// A nil ctx defaults to ParserRuleContext.EMPTY.
//
// It panics if the ATN does not contain state stateNumber.
func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
if stateNumber < 0 || stateNumber >= len(a.states) {
panic("Invalid state number.")
}
s := a.states[stateNumber]
following := a.NextTokens(s, nil)
if !following.contains(TokenEpsilon) {
return following
}
expected := NewIntervalSet()
expected.addSet(following)
expected.removeOne(TokenEpsilon)
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
invokingState := a.states[ctx.GetInvokingState()]
rt := invokingState.GetTransitions()[0]
following = a.NextTokens(rt.(*RuleTransition).followState, nil)
expected.addSet(following)
expected.removeOne(TokenEpsilon)
ctx = ctx.GetParent().(RuleContext)
}
if following.contains(TokenEpsilon) {
expected.addOne(TokenEOF)
}
return expected
}

View File

@ -0,0 +1,295 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
)
type comparable interface {
equals(other interface{}) bool
}
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
// context). The syntactic context is a graph-structured stack node whose
// path(s) to the root is the rule invocation(s) chain used to arrive at the
// state. The semantic context is the tree of semantic predicates encountered
// before reaching an ATN state.
type ATNConfig interface {
comparable
hash() int
GetState() ATNState
GetAlt() int
GetSemanticContext() SemanticContext
GetContext() PredictionContext
SetContext(PredictionContext)
GetReachesIntoOuterContext() int
SetReachesIntoOuterContext(int)
String() string
getPrecedenceFilterSuppressed() bool
setPrecedenceFilterSuppressed(bool)
}
type BaseATNConfig struct {
precedenceFilterSuppressed bool
state ATNState
alt int
context PredictionContext
semanticContext SemanticContext
reachesIntoOuterContext int
}
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
return &BaseATNConfig{
state: old.state,
alt: old.alt,
context: old.context,
semanticContext: old.semanticContext,
reachesIntoOuterContext: old.reachesIntoOuterContext,
}
}
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
}
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
if semanticContext == nil {
panic("semanticContext cannot be nil") // TODO: Necessary?
}
return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
}
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
}
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
}
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
}
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
}
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
if semanticContext == nil {
panic("semanticContext cannot be nil")
}
return &BaseATNConfig{
state: state,
alt: c.GetAlt(),
context: context,
semanticContext: semanticContext,
reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
}
}
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
return b.precedenceFilterSuppressed
}
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
b.precedenceFilterSuppressed = v
}
func (b *BaseATNConfig) GetState() ATNState {
return b.state
}
func (b *BaseATNConfig) GetAlt() int {
return b.alt
}
func (b *BaseATNConfig) SetContext(v PredictionContext) {
b.context = v
}
func (b *BaseATNConfig) GetContext() PredictionContext {
return b.context
}
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
return b.semanticContext
}
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
return b.reachesIntoOuterContext
}
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
b.reachesIntoOuterContext = v
}
// An ATN configuration is equal to another if both have the same state, they
// predict the same alternative, and syntactic/semantic contexts are the same.
func (b *BaseATNConfig) equals(o interface{}) bool {
if b == o {
return true
}
var other, ok = o.(*BaseATNConfig)
if !ok {
return false
}
var equal bool
if b.context == nil {
equal = other.context == nil
} else {
equal = b.context.equals(other.context)
}
var (
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
alts = b.alt == other.alt
cons = b.semanticContext.equals(other.semanticContext)
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
)
return nums && alts && cons && sups && equal
}
func (b *BaseATNConfig) hash() int {
var c int
if b.context != nil {
c = b.context.hash()
}
h := murmurInit(7)
h = murmurUpdate(h, b.state.GetStateNumber())
h = murmurUpdate(h, b.alt)
h = murmurUpdate(h, c)
h = murmurUpdate(h, b.semanticContext.hash())
return murmurFinish(h, 4)
}
func (b *BaseATNConfig) String() string {
var s1, s2, s3 string
if b.context != nil {
s1 = ",[" + fmt.Sprint(b.context) + "]"
}
if b.semanticContext != SemanticContextNone {
s2 = "," + fmt.Sprint(b.semanticContext)
}
if b.reachesIntoOuterContext > 0 {
s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
}
return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
}
type LexerATNConfig struct {
*BaseATNConfig
lexerActionExecutor *LexerActionExecutor
passedThroughNonGreedyDecision bool
}
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
}
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
lexerActionExecutor: lexerActionExecutor,
}
}
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
lexerActionExecutor: c.lexerActionExecutor,
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
}
}
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
lexerActionExecutor: lexerActionExecutor,
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
}
}
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
return &LexerATNConfig{
BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
lexerActionExecutor: c.lexerActionExecutor,
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
}
}
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
}
func (l *LexerATNConfig) hash() int {
var f int
if l.passedThroughNonGreedyDecision {
f = 1
} else {
f = 0
}
h := murmurInit(7)
h = murmurUpdate(h, l.state.GetStateNumber())
h = murmurUpdate(h, l.alt)
h = murmurUpdate(h, l.context.hash())
h = murmurUpdate(h, l.semanticContext.hash())
h = murmurUpdate(h, f)
h = murmurUpdate(h, l.lexerActionExecutor.hash())
h = murmurFinish(h, 6)
return h
}
func (l *LexerATNConfig) equals(other interface{}) bool {
var othert, ok = other.(*LexerATNConfig)
if l == other {
return true
} else if !ok {
return false
} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
return false
}
var b bool
if l.lexerActionExecutor != nil {
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
} else {
b = othert.lexerActionExecutor != nil
}
if b {
return false
}
return l.BaseATNConfig.equals(othert.BaseATNConfig)
}
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
var ds, ok = target.(DecisionState)
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
}

View File

@ -0,0 +1,407 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import "fmt"
type ATNConfigSet interface {
hash() int
Add(ATNConfig, *DoubleDict) bool
AddAll([]ATNConfig) bool
GetStates() Set
GetPredicates() []SemanticContext
GetItems() []ATNConfig
OptimizeConfigs(interpreter *BaseATNSimulator)
Equals(other interface{}) bool
Length() int
IsEmpty() bool
Contains(ATNConfig) bool
ContainsFast(ATNConfig) bool
Clear()
String() string
HasSemanticContext() bool
SetHasSemanticContext(v bool)
ReadOnly() bool
SetReadOnly(bool)
GetConflictingAlts() *BitSet
SetConflictingAlts(*BitSet)
Alts() *BitSet
FullContext() bool
GetUniqueAlt() int
SetUniqueAlt(int)
GetDipsIntoOuterContext() bool
SetDipsIntoOuterContext(bool)
}
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
// about its elements and can combine similar configurations using a
// graph-structured stack.
type BaseATNConfigSet struct {
cachedHash int
// configLookup is used to determine whether two BaseATNConfigSets are equal. We
// need all configurations with the same (s, i, _, semctx) to be equal. A key
// effectively doubles the number of objects associated with ATNConfigs. All
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
// read-only because a set becomes a DFA state.
configLookup Set
// configs is the added elements.
configs []ATNConfig
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
// info together because it saves recomputation. Can we track conflicts as they
// are added to save scanning configs later?
conflictingAlts *BitSet
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
// we hit a pred while computing a closure operation. Do not make a DFA state
// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
dipsIntoOuterContext bool
// fullCtx is whether it is part of a full context LL prediction. Used to
// determine how to merge $. It is a wildcard with SLL, but not for an LL
// context merge.
fullCtx bool
// Used in parser and lexer. In lexer, it indicates we hit a pred
// while computing a closure operation. Don't make a DFA state from a.
hasSemanticContext bool
// readOnly is whether it is read-only. Do not
// allow any code to manipulate the set if true because DFA states will point at
// sets and those must not change. It not protect other fields; conflictingAlts
// in particular, which is assigned after readOnly.
readOnly bool
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
// info together because it saves recomputation. Can we track conflicts as they
// are added to save scanning configs later?
uniqueAlt int
}
func (b *BaseATNConfigSet) Alts() *BitSet {
alts := NewBitSet()
for _, it := range b.configs {
alts.add(it.GetAlt())
}
return alts
}
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
return &BaseATNConfigSet{
cachedHash: -1,
configLookup: NewArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
fullCtx: fullCtx,
}
}
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
// dipsIntoOuterContext and hasSemanticContext when necessary.
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
if b.readOnly {
panic("set is read-only")
}
if config.GetSemanticContext() != SemanticContextNone {
b.hasSemanticContext = true
}
if config.GetReachesIntoOuterContext() > 0 {
b.dipsIntoOuterContext = true
}
existing := b.configLookup.Add(config).(ATNConfig)
if existing == config {
b.cachedHash = -1
b.configs = append(b.configs, config) // Track order here
return true
}
// Merge a previous (s, i, pi, _) with it and save the result
rootIsWildcard := !b.fullCtx
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
// No need to check for existing.context because config.context is in the cache,
// since the only way to create new graphs is the "call rule" and here. We cache
// at both places.
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
// Preserve the precedence filter suppression during the merge
if config.getPrecedenceFilterSuppressed() {
existing.setPrecedenceFilterSuppressed(true)
}
// Replace the context because there is no need to do alt mapping
existing.SetContext(merged)
return true
}
func (b *BaseATNConfigSet) GetStates() Set {
states := NewArray2DHashSet(nil, nil)
for i := 0; i < len(b.configs); i++ {
states.Add(b.configs[i].GetState())
}
return states
}
func (b *BaseATNConfigSet) HasSemanticContext() bool {
return b.hasSemanticContext
}
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
b.hasSemanticContext = v
}
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
preds := make([]SemanticContext, 0)
for i := 0; i < len(b.configs); i++ {
c := b.configs[i].GetSemanticContext()
if c != SemanticContextNone {
preds = append(preds, c)
}
}
return preds
}
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
return b.configs
}
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
if b.readOnly {
panic("set is read-only")
}
if b.configLookup.Len() == 0 {
return
}
for i := 0; i < len(b.configs); i++ {
config := b.configs[i]
config.SetContext(interpreter.getCachedContext(config.GetContext()))
}
}
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
for i := 0; i < len(coll); i++ {
b.Add(coll[i], nil)
}
return false
}
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
if b == other {
return true
} else if _, ok := other.(*BaseATNConfigSet); !ok {
return false
}
other2 := other.(*BaseATNConfigSet)
return b.configs != nil &&
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
b.fullCtx == other2.fullCtx &&
b.uniqueAlt == other2.uniqueAlt &&
b.conflictingAlts == other2.conflictingAlts &&
b.hasSemanticContext == other2.hasSemanticContext &&
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
}
func (b *BaseATNConfigSet) hash() int {
if b.readOnly {
if b.cachedHash == -1 {
b.cachedHash = b.hashCodeConfigs()
}
return b.cachedHash
}
return b.hashCodeConfigs()
}
func (b *BaseATNConfigSet) hashCodeConfigs() int {
h := 1
for _, config := range b.configs {
h = 31*h + config.hash()
}
return h
}
func (b *BaseATNConfigSet) Length() int {
return len(b.configs)
}
func (b *BaseATNConfigSet) IsEmpty() bool {
return len(b.configs) == 0
}
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
if b.configLookup == nil {
panic("not implemented for read-only sets")
}
return b.configLookup.Contains(item)
}
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
if b.configLookup == nil {
panic("not implemented for read-only sets")
}
return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
}
func (b *BaseATNConfigSet) Clear() {
if b.readOnly {
panic("set is read-only")
}
b.configs = make([]ATNConfig, 0)
b.cachedHash = -1
b.configLookup = NewArray2DHashSet(nil, equalATNConfigs)
}
func (b *BaseATNConfigSet) FullContext() bool {
return b.fullCtx
}
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
return b.dipsIntoOuterContext
}
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
b.dipsIntoOuterContext = v
}
func (b *BaseATNConfigSet) GetUniqueAlt() int {
return b.uniqueAlt
}
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
b.uniqueAlt = v
}
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
return b.conflictingAlts
}
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
b.conflictingAlts = v
}
func (b *BaseATNConfigSet) ReadOnly() bool {
return b.readOnly
}
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
b.readOnly = readOnly
if readOnly {
b.configLookup = nil // Read only, so no need for the lookup cache
}
}
func (b *BaseATNConfigSet) String() string {
s := "["
for i, c := range b.configs {
s += c.String()
if i != len(b.configs)-1 {
s += ", "
}
}
s += "]"
if b.hasSemanticContext {
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
}
if b.uniqueAlt != ATNInvalidAltNumber {
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
}
if b.conflictingAlts != nil {
s += ",conflictingAlts=" + b.conflictingAlts.String()
}
if b.dipsIntoOuterContext {
s += ",dipsIntoOuterContext"
}
return s
}
type OrderedATNConfigSet struct {
*BaseATNConfigSet
}
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
b := NewBaseATNConfigSet(false)
b.configLookup = NewArray2DHashSet(nil, nil)
return &OrderedATNConfigSet{BaseATNConfigSet: b}
}
func hashATNConfig(i interface{}) int {
o := i.(ATNConfig)
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
hash = 31*hash + o.GetSemanticContext().hash()
return hash
}
func equalATNConfigs(a, b interface{}) bool {
if a == nil || b == nil {
return false
}
if a == b {
return true
}
var ai, ok = a.(ATNConfig)
var bi, ok1 = b.(ATNConfig)
if !ok || !ok1 {
return false
}
if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
return false
}
if ai.GetAlt() != bi.GetAlt() {
return false
}
return ai.GetSemanticContext().equals(bi.GetSemanticContext())
}

View File

@ -0,0 +1,25 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
type ATNDeserializationOptions struct {
readOnly bool
verifyATN bool
generateRuleBypassTransitions bool
}
func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
o := new(ATNDeserializationOptions)
if CopyFrom != nil {
o.readOnly = CopyFrom.readOnly
o.verifyATN = CopyFrom.verifyATN
o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
}
return o
}

View File

@ -0,0 +1,828 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"encoding/hex"
"fmt"
"strconv"
"strings"
"unicode/utf16"
)
// This is the earliest supported serialized UUID.
// stick to serialized version for now, we don't need a UUID instance
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
// This list contains all of the currently supported UUIDs, ordered by when
// the feature first appeared in this branch.
var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
var SerializedVersion = 3
// This is the current serialized UUID.
var SerializedUUID = AddedUnicodeSMP
type LoopEndStateIntPair struct {
item0 *LoopEndState
item1 int
}
type BlockStartStateIntPair struct {
item0 BlockStartState
item1 int
}
type ATNDeserializer struct {
deserializationOptions *ATNDeserializationOptions
data []rune
pos int
uuid string
}
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
if options == nil {
options = ATNDeserializationOptionsdefaultOptions
}
return &ATNDeserializer{deserializationOptions: options}
}
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
return i
}
}
return -1
}
// isFeatureSupported determines if a particular serialized representation of an
// ATN supports a particular feature, identified by the UUID used for
// serializing the ATN at the time the feature was first introduced. Feature is
// the UUID marking the first time the feature was supported in the serialized
// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
// being deserialized. It returns true if actualUuid represents a serialized ATN
// at or after the feature identified by feature was introduced, and otherwise
// false.
func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
idx1 := stringInSlice(feature, SupportedUUIDs)
if idx1 < 0 {
return false
}
idx2 := stringInSlice(actualUUID, SupportedUUIDs)
return idx2 >= idx1
}
func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
a.reset(utf16.Decode(data))
a.checkVersion()
a.checkUUID()
atn := a.readATN()
a.readStates(atn)
a.readRules(atn)
a.readModes(atn)
sets := make([]*IntervalSet, 0)
// First, deserialize sets with 16-bit arguments <= U+FFFF.
sets = a.readSets(atn, sets, a.readInt)
// Next, if the ATN was serialized with the Unicode SMP feature,
// deserialize sets with 32-bit arguments <= U+10FFFF.
if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) {
sets = a.readSets(atn, sets, a.readInt32)
}
a.readEdges(atn, sets)
a.readDecisions(atn)
a.readLexerActions(atn)
a.markPrecedenceDecisions(atn)
a.verifyATN(atn)
if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
a.generateRuleBypassTransitions(atn)
// Re-verify after modification
a.verifyATN(atn)
}
return atn
}
func (a *ATNDeserializer) reset(data []rune) {
temp := make([]rune, len(data))
for i, c := range data {
// Don't adjust the first value since that's the version number
if i == 0 {
temp[i] = c
} else if c > 1 {
temp[i] = c - 2
} else {
temp[i] = c + 65533
}
}
a.data = temp
a.pos = 0
}
func (a *ATNDeserializer) checkVersion() {
version := a.readInt()
if version != SerializedVersion {
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
}
}
func (a *ATNDeserializer) checkUUID() {
uuid := a.readUUID()
if stringInSlice(uuid, SupportedUUIDs) < 0 {
panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
}
a.uuid = uuid
}
func (a *ATNDeserializer) readATN() *ATN {
grammarType := a.readInt()
maxTokenType := a.readInt()
return NewATN(grammarType, maxTokenType)
}
func (a *ATNDeserializer) readStates(atn *ATN) {
loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
endStateNumbers := make([]BlockStartStateIntPair, 0)
nstates := a.readInt()
for i := 0; i < nstates; i++ {
stype := a.readInt()
// Ignore bad types of states
if stype == ATNStateInvalidType {
atn.addState(nil)
continue
}
ruleIndex := a.readInt()
if ruleIndex == 0xFFFF {
ruleIndex = -1
}
s := a.stateFactory(stype, ruleIndex)
if stype == ATNStateLoopEnd {
loopBackStateNumber := a.readInt()
loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
} else if s2, ok := s.(BlockStartState); ok {
endStateNumber := a.readInt()
endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
}
atn.addState(s)
}
// Delay the assignment of loop back and end states until we know all the state
// instances have been initialized
for j := 0; j < len(loopBackStateNumbers); j++ {
pair := loopBackStateNumbers[j]
pair.item0.loopBackState = atn.states[pair.item1]
}
for j := 0; j < len(endStateNumbers); j++ {
pair := endStateNumbers[j]
pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
}
numNonGreedyStates := a.readInt()
for j := 0; j < numNonGreedyStates; j++ {
stateNumber := a.readInt()
atn.states[stateNumber].(DecisionState).setNonGreedy(true)
}
numPrecedenceStates := a.readInt()
for j := 0; j < numPrecedenceStates; j++ {
stateNumber := a.readInt()
atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
}
}
func (a *ATNDeserializer) readRules(atn *ATN) {
nrules := a.readInt()
if atn.grammarType == ATNTypeLexer {
atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
}
atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
for i := 0; i < nrules; i++ {
s := a.readInt()
startState := atn.states[s].(*RuleStartState)
atn.ruleToStartState[i] = startState
if atn.grammarType == ATNTypeLexer {
tokenType := a.readInt()
if tokenType == 0xFFFF {
tokenType = TokenEOF
}
atn.ruleToTokenType[i] = tokenType
}
}
atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if s2, ok := state.(*RuleStopState); ok {
atn.ruleToStopState[s2.ruleIndex] = s2
atn.ruleToStartState[s2.ruleIndex].stopState = s2
}
}
}
func (a *ATNDeserializer) readModes(atn *ATN) {
nmodes := a.readInt()
for i := 0; i < nmodes; i++ {
s := a.readInt()
atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
}
}
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet {
m := a.readInt()
for i := 0; i < m; i++ {
iset := NewIntervalSet()
sets = append(sets, iset)
n := a.readInt()
containsEOF := a.readInt()
if containsEOF != 0 {
iset.addOne(-1)
}
for j := 0; j < n; j++ {
i1 := readUnicode()
i2 := readUnicode()
iset.addRange(i1, i2)
}
}
return sets
}
func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
nedges := a.readInt()
for i := 0; i < nedges; i++ {
var (
src = a.readInt()
trg = a.readInt()
ttype = a.readInt()
arg1 = a.readInt()
arg2 = a.readInt()
arg3 = a.readInt()
trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
srcState = atn.states[src]
)
srcState.AddTransition(trans, -1)
}
// Edges for rule stop states can be derived, so they are not serialized
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
for j := 0; j < len(state.GetTransitions()); j++ {
var t, ok = state.GetTransitions()[j].(*RuleTransition)
if !ok {
continue
}
outermostPrecedenceReturn := -1
if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
if t.precedence == 0 {
outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
}
}
trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
}
}
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if s2, ok := state.(*BaseBlockStartState); ok {
// We need to know the end state to set its start state
if s2.endState == nil {
panic("IllegalState")
}
// Block end states can only be associated to a single block start state
if s2.endState.startState != nil {
panic("IllegalState")
}
s2.endState.startState = state
}
if s2, ok := state.(*PlusLoopbackState); ok {
for j := 0; j < len(s2.GetTransitions()); j++ {
target := s2.GetTransitions()[j].getTarget()
if t2, ok := target.(*PlusBlockStartState); ok {
t2.loopBackState = state
}
}
} else if s2, ok := state.(*StarLoopbackState); ok {
for j := 0; j < len(s2.GetTransitions()); j++ {
target := s2.GetTransitions()[j].getTarget()
if t2, ok := target.(*StarLoopEntryState); ok {
t2.loopBackState = state
}
}
}
}
}
func (a *ATNDeserializer) readDecisions(atn *ATN) {
ndecisions := a.readInt()
for i := 0; i < ndecisions; i++ {
s := a.readInt()
decState := atn.states[s].(DecisionState)
atn.DecisionToState = append(atn.DecisionToState, decState)
decState.setDecision(i)
}
}
func (a *ATNDeserializer) readLexerActions(atn *ATN) {
if atn.grammarType == ATNTypeLexer {
count := a.readInt()
atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
for i := 0; i < count; i++ {
actionType := a.readInt()
data1 := a.readInt()
if data1 == 0xFFFF {
data1 = -1
}
data2 := a.readInt()
if data2 == 0xFFFF {
data2 = -1
}
lexerAction := a.lexerActionFactory(actionType, data1, data2)
atn.lexerActions[i] = lexerAction
}
}
}
func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
count := len(atn.ruleToStartState)
for i := 0; i < count; i++ {
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
}
for i := 0; i < count; i++ {
a.generateRuleBypassTransition(atn, i)
}
}
func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart := NewBasicBlockStartState()
bypassStart.ruleIndex = idx
atn.addState(bypassStart)
bypassStop := NewBlockEndState()
bypassStop.ruleIndex = idx
atn.addState(bypassStop)
bypassStart.endState = bypassStop
atn.defineDecisionState(bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
var excludeTransition Transition
var endState ATNState
if atn.ruleToStartState[idx].isPrecedenceRule {
// Wrap from the beginning of the rule to the StarLoopEntryState
endState = nil
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if a.stateIsEndStateFor(state, idx) != nil {
endState = state
excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
break
}
}
if excludeTransition == nil {
panic("Couldn't identify final state of the precedence rule prefix section.")
}
} else {
endState = atn.ruleToStopState[idx]
}
// All non-excluded transitions that currently target end state need to target
// blockEnd instead
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
for j := 0; j < len(state.GetTransitions()); j++ {
transition := state.GetTransitions()[j]
if transition == excludeTransition {
continue
}
if transition.getTarget() == endState {
transition.setTarget(bypassStop)
}
}
}
// All transitions leaving the rule start state need to leave blockStart instead
ruleToStartState := atn.ruleToStartState[idx]
count := len(ruleToStartState.GetTransitions())
for count > 0 {
bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
}
// Link the new states
atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
MatchState := NewBasicState()
atn.addState(MatchState)
MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
}
func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
if state.GetRuleIndex() != idx {
return nil
}
if _, ok := state.(*StarLoopEntryState); !ok {
return nil
}
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
return nil
}
var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
return state
}
return nil
}
// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
// the correct value.
func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
for _, state := range atn.states {
if _, ok := state.(*StarLoopEntryState); !ok {
continue
}
// We analyze the ATN to determine if a ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
if s3.epsilonOnlyTransitions && ok2 {
state.(*StarLoopEntryState).precedenceRuleDecision = true
}
}
}
}
}
func (a *ATNDeserializer) verifyATN(atn *ATN) {
if !a.deserializationOptions.verifyATN {
return
}
// Verify assumptions
for i := 0; i < len(atn.states); i++ {
state := atn.states[i]
if state == nil {
continue
}
a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
switch s2 := state.(type) {
case *PlusBlockStartState:
a.checkCondition(s2.loopBackState != nil, "")
case *StarLoopEntryState:
a.checkCondition(s2.loopBackState != nil, "")
a.checkCondition(len(s2.GetTransitions()) == 2, "")
switch s2 := state.(type) {
case *StarBlockStartState:
var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
a.checkCondition(ok2, "")
a.checkCondition(!s2.nonGreedy, "")
case *LoopEndState:
var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
a.checkCondition(ok2, "")
a.checkCondition(s3.nonGreedy, "")
default:
panic("IllegalState")
}
case *StarLoopbackState:
a.checkCondition(len(state.GetTransitions()) == 1, "")
var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
a.checkCondition(ok2, "")
case *LoopEndState:
a.checkCondition(s2.loopBackState != nil, "")
case *RuleStartState:
a.checkCondition(s2.stopState != nil, "")
case *BaseBlockStartState:
a.checkCondition(s2.endState != nil, "")
case *BlockEndState:
a.checkCondition(s2.startState != nil, "")
case DecisionState:
a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
default:
var _, ok = s2.(*RuleStopState)
a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
}
}
}
func (a *ATNDeserializer) checkCondition(condition bool, message string) {
if !condition {
if message == "" {
message = "IllegalState"
}
panic(message)
}
}
func (a *ATNDeserializer) readInt() int {
v := a.data[a.pos]
a.pos++
return int(v)
}
func (a *ATNDeserializer) readInt32() int {
var low = a.readInt()
var high = a.readInt()
return low | (high << 16)
}
//TODO
//func (a *ATNDeserializer) readLong() int64 {
// panic("Not implemented")
// var low = a.readInt32()
// var high = a.readInt32()
// return (low & 0x00000000FFFFFFFF) | (high << int32)
//}
func createByteToHex() []string {
bth := make([]string, 256)
for i := 0; i < 256; i++ {
bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
}
return bth
}
var byteToHex = createByteToHex()
func (a *ATNDeserializer) readUUID() string {
bb := make([]int, 16)
for i := 7; i >= 0; i-- {
integer := a.readInt()
bb[(2*i)+1] = integer & 0xFF
bb[2*i] = (integer >> 8) & 0xFF
}
return byteToHex[bb[0]] + byteToHex[bb[1]] +
byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
byteToHex[bb[10]] + byteToHex[bb[11]] +
byteToHex[bb[12]] + byteToHex[bb[13]] +
byteToHex[bb[14]] + byteToHex[bb[15]]
}
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
case TransitionEPSILON:
return NewEpsilonTransition(target, -1)
case TransitionRANGE:
if arg3 != 0 {
return NewRangeTransition(target, TokenEOF, arg2)
}
return NewRangeTransition(target, arg1, arg2)
case TransitionRULE:
return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
case TransitionPREDICATE:
return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
case TransitionPRECEDENCE:
return NewPrecedencePredicateTransition(target, arg1)
case TransitionATOM:
if arg3 != 0 {
return NewAtomTransition(target, TokenEOF)
}
return NewAtomTransition(target, arg1)
case TransitionACTION:
return NewActionTransition(target, arg1, arg2, arg3 != 0)
case TransitionSET:
return NewSetTransition(target, sets[arg1])
case TransitionNOTSET:
return NewNotSetTransition(target, sets[arg1])
case TransitionWILDCARD:
return NewWildcardTransition(target)
}
panic("The specified transition type is not valid.")
}
func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
var s ATNState
switch typeIndex {
case ATNStateInvalidType:
return nil
case ATNStateBasic:
s = NewBasicState()
case ATNStateRuleStart:
s = NewRuleStartState()
case ATNStateBlockStart:
s = NewBasicBlockStartState()
case ATNStatePlusBlockStart:
s = NewPlusBlockStartState()
case ATNStateStarBlockStart:
s = NewStarBlockStartState()
case ATNStateTokenStart:
s = NewTokensStartState()
case ATNStateRuleStop:
s = NewRuleStopState()
case ATNStateBlockEnd:
s = NewBlockEndState()
case ATNStateStarLoopBack:
s = NewStarLoopbackState()
case ATNStateStarLoopEntry:
s = NewStarLoopEntryState()
case ATNStatePlusLoopBack:
s = NewPlusLoopbackState()
case ATNStateLoopEnd:
s = NewLoopEndState()
default:
panic(fmt.Sprintf("state type %d is invalid", typeIndex))
}
s.SetRuleIndex(ruleIndex)
return s
}
func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
switch typeIndex {
case LexerActionTypeChannel:
return NewLexerChannelAction(data1)
case LexerActionTypeCustom:
return NewLexerCustomAction(data1, data2)
case LexerActionTypeMode:
return NewLexerModeAction(data1)
case LexerActionTypeMore:
return LexerMoreActionINSTANCE
case LexerActionTypePopMode:
return LexerPopModeActionINSTANCE
case LexerActionTypePushMode:
return NewLexerPushModeAction(data1)
case LexerActionTypeSkip:
return LexerSkipActionINSTANCE
case LexerActionTypeType:
return NewLexerTypeAction(data1)
default:
panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
}
}

View File

@ -0,0 +1,50 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
type IATNSimulator interface {
SharedContextCache() *PredictionContextCache
ATN() *ATN
DecisionToDFA() []*DFA
}
type BaseATNSimulator struct {
atn *ATN
sharedContextCache *PredictionContextCache
decisionToDFA []*DFA
}
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
b := new(BaseATNSimulator)
b.atn = atn
b.sharedContextCache = sharedContextCache
return b
}
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
if b.sharedContextCache == nil {
return context
}
visited := make(map[PredictionContext]PredictionContext)
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
}
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
return b.sharedContextCache
}
func (b *BaseATNSimulator) ATN() *ATN {
return b.atn
}
func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
return b.decisionToDFA
}

View File

@ -0,0 +1,386 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import "strconv"
// Constants for serialization.
const (
ATNStateInvalidType = 0
ATNStateBasic = 1
ATNStateRuleStart = 2
ATNStateBlockStart = 3
ATNStatePlusBlockStart = 4
ATNStateStarBlockStart = 5
ATNStateTokenStart = 6
ATNStateRuleStop = 7
ATNStateBlockEnd = 8
ATNStateStarLoopBack = 9
ATNStateStarLoopEntry = 10
ATNStatePlusLoopBack = 11
ATNStateLoopEnd = 12
ATNStateInvalidStateNumber = -1
)
var ATNStateInitialNumTransitions = 4
type ATNState interface {
GetEpsilonOnlyTransitions() bool
GetRuleIndex() int
SetRuleIndex(int)
GetNextTokenWithinRule() *IntervalSet
SetNextTokenWithinRule(*IntervalSet)
GetATN() *ATN
SetATN(*ATN)
GetStateType() int
GetStateNumber() int
SetStateNumber(int)
GetTransitions() []Transition
SetTransitions([]Transition)
AddTransition(Transition, int)
String() string
hash() int
}
type BaseATNState struct {
// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
NextTokenWithinRule *IntervalSet
// atn is the current ATN.
atn *ATN
epsilonOnlyTransitions bool
// ruleIndex tracks the Rule index because there are no Rule objects at runtime.
ruleIndex int
stateNumber int
stateType int
// Track the transitions emanating from this ATN state.
transitions []Transition
}
func NewBaseATNState() *BaseATNState {
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
}
func (as *BaseATNState) GetRuleIndex() int {
return as.ruleIndex
}
func (as *BaseATNState) SetRuleIndex(v int) {
as.ruleIndex = v
}
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
return as.epsilonOnlyTransitions
}
func (as *BaseATNState) GetATN() *ATN {
return as.atn
}
func (as *BaseATNState) SetATN(atn *ATN) {
as.atn = atn
}
func (as *BaseATNState) GetTransitions() []Transition {
return as.transitions
}
func (as *BaseATNState) SetTransitions(t []Transition) {
as.transitions = t
}
func (as *BaseATNState) GetStateType() int {
return as.stateType
}
func (as *BaseATNState) GetStateNumber() int {
return as.stateNumber
}
func (as *BaseATNState) SetStateNumber(stateNumber int) {
as.stateNumber = stateNumber
}
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
return as.NextTokenWithinRule
}
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
as.NextTokenWithinRule = v
}
func (as *BaseATNState) hash() int {
return as.stateNumber
}
func (as *BaseATNState) String() string {
return strconv.Itoa(as.stateNumber)
}
func (as *BaseATNState) equals(other interface{}) bool {
if ot, ok := other.(ATNState); ok {
return as.stateNumber == ot.GetStateNumber()
}
return false
}
func (as *BaseATNState) isNonGreedyExitState() bool {
return false
}
func (as *BaseATNState) AddTransition(trans Transition, index int) {
if len(as.transitions) == 0 {
as.epsilonOnlyTransitions = trans.getIsEpsilon()
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
as.epsilonOnlyTransitions = false
}
if index == -1 {
as.transitions = append(as.transitions, trans)
} else {
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
// TODO: as.transitions.splice(index, 1, trans)
}
}
type BasicState struct {
*BaseATNState
}
func NewBasicState() *BasicState {
b := NewBaseATNState()
b.stateType = ATNStateBasic
return &BasicState{BaseATNState: b}
}
type DecisionState interface {
ATNState
getDecision() int
setDecision(int)
getNonGreedy() bool
setNonGreedy(bool)
}
type BaseDecisionState struct {
*BaseATNState
decision int
nonGreedy bool
}
func NewBaseDecisionState() *BaseDecisionState {
return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
}
func (s *BaseDecisionState) getDecision() int {
return s.decision
}
func (s *BaseDecisionState) setDecision(b int) {
s.decision = b
}
func (s *BaseDecisionState) getNonGreedy() bool {
return s.nonGreedy
}
func (s *BaseDecisionState) setNonGreedy(b bool) {
s.nonGreedy = b
}
type BlockStartState interface {
DecisionState
getEndState() *BlockEndState
setEndState(*BlockEndState)
}
// BaseBlockStartState is the start of a regular (...) block.
type BaseBlockStartState struct {
*BaseDecisionState
endState *BlockEndState
}
func NewBlockStartState() *BaseBlockStartState {
return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
}
func (s *BaseBlockStartState) getEndState() *BlockEndState {
return s.endState
}
func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
s.endState = b
}
type BasicBlockStartState struct {
*BaseBlockStartState
}
func NewBasicBlockStartState() *BasicBlockStartState {
b := NewBlockStartState()
b.stateType = ATNStateBlockStart
return &BasicBlockStartState{BaseBlockStartState: b}
}
// BlockEndState is a terminal node of a simple (a|b|c) block.
type BlockEndState struct {
*BaseATNState
startState ATNState
}
func NewBlockEndState() *BlockEndState {
b := NewBaseATNState()
b.stateType = ATNStateBlockEnd
return &BlockEndState{BaseATNState: b}
}
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
// start symbol. In that case, there is one transition to EOF. Later, we might
// encode references to all calls to this rule to compute FOLLOW sets for error
// handling.
type RuleStopState struct {
*BaseATNState
}
func NewRuleStopState() *RuleStopState {
b := NewBaseATNState()
b.stateType = ATNStateRuleStop
return &RuleStopState{BaseATNState: b}
}
type RuleStartState struct {
*BaseATNState
stopState ATNState
isPrecedenceRule bool
}
func NewRuleStartState() *RuleStartState {
b := NewBaseATNState()
b.stateType = ATNStateRuleStart
return &RuleStartState{BaseATNState: b}
}
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
// transitions: one to the loop back to start of the block, and one to exit.
type PlusLoopbackState struct {
*BaseDecisionState
}
func NewPlusLoopbackState() *PlusLoopbackState {
b := NewBaseDecisionState()
b.stateType = ATNStatePlusLoopBack
return &PlusLoopbackState{BaseDecisionState: b}
}
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
// decision state; we don't use it for code generation. Somebody might need it,
// it is included for completeness. In reality, PlusLoopbackState is the real
// decision-making node for A+.
type PlusBlockStartState struct {
*BaseBlockStartState
loopBackState ATNState
}
func NewPlusBlockStartState() *PlusBlockStartState {
b := NewBlockStartState()
b.stateType = ATNStatePlusBlockStart
return &PlusBlockStartState{BaseBlockStartState: b}
}
// StarBlockStartState is the block that begins a closure loop.
type StarBlockStartState struct {
*BaseBlockStartState
}
func NewStarBlockStartState() *StarBlockStartState {
b := NewBlockStartState()
b.stateType = ATNStateStarBlockStart
return &StarBlockStartState{BaseBlockStartState: b}
}
type StarLoopbackState struct {
*BaseATNState
}
func NewStarLoopbackState() *StarLoopbackState {
b := NewBaseATNState()
b.stateType = ATNStateStarLoopBack
return &StarLoopbackState{BaseATNState: b}
}
type StarLoopEntryState struct {
*BaseDecisionState
loopBackState ATNState
precedenceRuleDecision bool
}
func NewStarLoopEntryState() *StarLoopEntryState {
b := NewBaseDecisionState()
b.stateType = ATNStateStarLoopEntry
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
return &StarLoopEntryState{BaseDecisionState: b}
}
// LoopEndState marks the end of a * or + loop.
type LoopEndState struct {
*BaseATNState
loopBackState ATNState
}
func NewLoopEndState() *LoopEndState {
b := NewBaseATNState()
b.stateType = ATNStateLoopEnd
return &LoopEndState{BaseATNState: b}
}
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
type TokensStartState struct {
*BaseDecisionState
}
func NewTokensStartState() *TokensStartState {
b := NewBaseDecisionState()
b.stateType = ATNStateTokenStart
return &TokensStartState{BaseDecisionState: b}
}

View File

@ -0,0 +1,11 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// Represent the type of recognizer an ATN applies to.
const (
ATNTypeLexer = 0
ATNTypeParser = 1
)

View File

@ -0,0 +1,12 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
GetTextFromInterval(*Interval) string
}

View File

@ -0,0 +1,56 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// TokenFactory creates CommonToken objects.
type TokenFactory interface {
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
}
// CommonTokenFactory is the default TokenFactory implementation.
type CommonTokenFactory struct {
// copyText indicates whether CommonToken.setText should be called after
// constructing tokens to explicitly set the text. This is useful for cases
// where the input stream might not be able to provide arbitrary substrings of
// text from the input after the lexer creates a token (e.g. the
// implementation of CharStream.GetText in UnbufferedCharStream panics an
// UnsupportedOperationException). Explicitly setting the token text allows
// Token.GetText to be called at any time regardless of the input stream
// implementation.
//
// The default value is false to avoid the performance and memory overhead of
// copying text for every token unless explicitly requested.
copyText bool
}
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
return &CommonTokenFactory{copyText: copyText}
}
// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
// explicitly copy token text when constructing tokens.
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
t := NewCommonToken(source, ttype, channel, start, stop)
t.line = line
t.column = column
if text != "" {
t.SetText(text)
} else if c.copyText && source.charStream != nil {
t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
}
return t
}
func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
t.SetText(text)
return t
}

View File

@ -0,0 +1,447 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"strconv"
)
// CommonTokenStream is an implementation of TokenStream that loads tokens from
// a TokenSource on-demand and places the tokens in a buffer to provide access
// to any previous token by index. This token stream ignores the value of
// Token.getChannel. If your parser requires the token stream filter tokens to
// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
type CommonTokenStream struct {
channel int
// fetchedEOF indicates whether the Token.EOF token has been fetched from
// tokenSource and added to tokens. This field improves performance for the
// following cases:
//
// consume: The lookahead check in consume to preven consuming the EOF symbol is
// optimized by checking the values of fetchedEOF and p instead of calling LA.
//
// fetch: The check to prevent adding multiple EOF symbols into tokens is
// trivial with bt field.
fetchedEOF bool
// index indexs into tokens of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
// see the documentation of IntStream for a description of initializing methods.
index int
// tokenSource is the TokenSource from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
// tokens is all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
index: -1,
tokenSource: lexer,
tokens: make([]Token, 0),
}
}
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
func (c *CommonTokenStream) Mark() int {
return 0
}
func (c *CommonTokenStream) Release(marker int) {}
func (c *CommonTokenStream) reset() {
c.Seek(0)
}
func (c *CommonTokenStream) Seek(index int) {
c.lazyInit()
c.index = c.adjustSeekIndex(index)
}
func (c *CommonTokenStream) Get(index int) Token {
c.lazyInit()
return c.tokens[index]
}
func (c *CommonTokenStream) Consume() {
SkipEOFCheck := false
if c.index >= 0 {
if c.fetchedEOF {
// The last token in tokens is EOF. Skip the check if p indexes any fetched.
// token except the last.
SkipEOFCheck = c.index < len(c.tokens)-1
} else {
// No EOF token in tokens. Skip the check if p indexes a fetched token.
SkipEOFCheck = c.index < len(c.tokens)
}
} else {
// Not yet initialized
SkipEOFCheck = false
}
if !SkipEOFCheck && c.LA(1) == TokenEOF {
panic("cannot consume EOF")
}
if c.Sync(c.index + 1) {
c.index = c.adjustSeekIndex(c.index + 1)
}
}
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
return fetched >= n
}
return true
}
// fetch adds n elements to buffer and returns the actual number of elements
// added to the buffer.
func (c *CommonTokenStream) fetch(n int) int {
if c.fetchedEOF {
return 0
}
for i := 0; i < n; i++ {
t := c.tokenSource.NextToken()
t.SetTokenIndex(len(c.tokens))
c.tokens = append(c.tokens, t)
if t.GetTokenType() == TokenEOF {
c.fetchedEOF = true
return i + 1
}
}
return n
}
// GetTokens gets all tokens from start to stop inclusive.
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
if start < 0 || stop < 0 {
return nil
}
c.lazyInit()
subset := make([]Token, 0)
if stop >= len(c.tokens) {
stop = len(c.tokens) - 1
}
for i := start; i < stop; i++ {
t := c.tokens[i]
if t.GetTokenType() == TokenEOF {
break
}
if types == nil || types.contains(t.GetTokenType()) {
subset = append(subset, t)
}
}
return subset
}
func (c *CommonTokenStream) LA(i int) int {
return c.LT(i).GetTokenType()
}
func (c *CommonTokenStream) lazyInit() {
if c.index == -1 {
c.setup()
}
}
func (c *CommonTokenStream) setup() {
c.Sync(0)
c.index = c.adjustSeekIndex(0)
}
func (c *CommonTokenStream) GetTokenSource() TokenSource {
return c.tokenSource
}
// SetTokenSource resets the c token stream by setting its token source.
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
// no tokens on channel between i and EOF.
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
c.Sync(i)
if i >= len(c.tokens) {
return -1
}
token := c.tokens[i]
for token.GetChannel() != c.channel {
if token.GetTokenType() == TokenEOF {
return -1
}
i++
c.Sync(i)
token = c.tokens[i]
}
return i
}
// previousTokenOnChannel returns the index of the previous token on channel
// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
// there are no tokens on channel between i and 0.
func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
for i >= 0 && c.tokens[i].GetChannel() != channel {
i--
}
return i
}
// GetHiddenTokensToRight collects all tokens on a specified channel to the
// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
// or EOF. If channel is -1, it finds any non-default channel token.
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
c.lazyInit()
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
}
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
var to int
if nextOnChannel == -1 {
to = len(c.tokens) - 1
} else {
to = nextOnChannel
}
return c.filterForChannel(from, to, channel)
}
// GetHiddenTokensToLeft collects all tokens on channel to the left of the
// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
// -1, it finds any non default channel token.
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
c.lazyInit()
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
}
prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
if prevOnChannel == tokenIndex-1 {
return nil
}
// If there are none on channel to the left and prevOnChannel == -1 then from = 0
from := prevOnChannel + 1
to := tokenIndex - 1
return c.filterForChannel(from, to, channel)
}
func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
hidden := make([]Token, 0)
for i := left; i < right+1; i++ {
t := c.tokens[i]
if channel == -1 {
if t.GetChannel() != LexerDefaultTokenChannel {
hidden = append(hidden, t)
}
} else if t.GetChannel() == channel {
hidden = append(hidden, t)
}
}
if len(hidden) == 0 {
return nil
}
return hidden
}
func (c *CommonTokenStream) GetSourceName() string {
return c.tokenSource.GetSourceName()
}
func (c *CommonTokenStream) Size() int {
return len(c.tokens)
}
func (c *CommonTokenStream) Index() int {
return c.index
}
func (c *CommonTokenStream) GetAllText() string {
return c.GetTextFromInterval(nil)
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
if start == nil || end == nil {
return ""
}
return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
}
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
return c.GetTextFromInterval(interval.GetSourceInterval())
}
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
c.Fill()
if interval == nil {
interval = NewInterval(0, len(c.tokens)-1)
}
start := interval.Start
stop := interval.Stop
if start < 0 || stop < 0 {
return ""
}
if stop >= len(c.tokens) {
stop = len(c.tokens) - 1
}
s := ""
for i := start; i < stop+1; i++ {
t := c.tokens[i]
if t.GetTokenType() == TokenEOF {
break
}
s += t.GetText()
}
return s
}
// Fill gets all tokens from the lexer until EOF.
func (c *CommonTokenStream) Fill() {
c.lazyInit()
for c.fetch(1000) == 1000 {
continue
}
}
func (c *CommonTokenStream) adjustSeekIndex(i int) int {
return c.NextTokenOnChannel(i, c.channel)
}
func (c *CommonTokenStream) LB(k int) Token {
if k == 0 || c.index-k < 0 {
return nil
}
i := c.index
n := 1
// Find k good tokens looking backward
for n <= k {
// Skip off-channel tokens
i = c.previousTokenOnChannel(i-1, c.channel)
n++
}
if i < 0 {
return nil
}
return c.tokens[i]
}
func (c *CommonTokenStream) LT(k int) Token {
c.lazyInit()
if k == 0 {
return nil
}
if k < 0 {
return c.LB(-k)
}
i := c.index
n := 1 // We know tokens[n] is valid
// Find k good tokens
for n < k {
// Skip off-channel tokens, but make sure to not look past EOF
if c.Sync(i + 1) {
i = c.NextTokenOnChannel(i+1, c.channel)
}
n++
}
return c.tokens[i]
}
// getNumberOfOnChannelTokens counts EOF once.
func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
var n int
c.Fill()
for i := 0; i < len(c.tokens); i++ {
t := c.tokens[i]
if t.GetChannel() == c.channel {
n++
}
if t.GetTokenType() == TokenEOF {
break
}
}
return n
}

183
vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"sort"
"sync"
)
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
decision int
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there.
states map[int]*DFAState
statesMu sync.RWMutex
s0 *DFAState
s0Mu sync.RWMutex
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
// True if the DFA is for a precedence decision and false otherwise.
precedenceDfa bool
precedenceDfaMu sync.RWMutex
}
func NewDFA(atnStartState DecisionState, decision int) *DFA {
return &DFA{
atnStartState: atnStartState,
decision: decision,
states: make(map[int]*DFAState),
}
}
// getPrecedenceStartState gets the start state for the current precedence and
// returns the start state corresponding to the specified precedence if a start
// state exists for the specified precedence and nil otherwise. d must be a
// precedence DFA. See also isPrecedenceDfa.
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
if !d.getPrecedenceDfa() {
panic("only precedence DFAs may contain a precedence start state")
}
// s0.edges is never nil for a precedence DFA
if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
return nil
}
return d.getS0().getIthEdge(precedence)
}
// setPrecedenceStartState sets the start state for the current precedence. d
// must be a precedence DFA. See also isPrecedenceDfa.
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
if !d.getPrecedenceDfa() {
panic("only precedence DFAs may contain a precedence start state")
}
if precedence < 0 {
return
}
// Synchronization on s0 here is ok. When the DFA is turned into a
// precedence DFA, s0 will be initialized once and not updated again. s0.edges
// is never nil for a precedence DFA.
s0 := d.getS0()
if precedence >= s0.numEdges() {
edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
s0.setEdges(edges)
d.setS0(s0)
}
s0.setIthEdge(precedence, startState)
}
func (d *DFA) getPrecedenceDfa() bool {
d.precedenceDfaMu.RLock()
defer d.precedenceDfaMu.RUnlock()
return d.precedenceDfa
}
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
// from the current DFA configuration, then d.states is cleared, the initial
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
// store the start states for individual precedence values if precedenceDfa is
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
d.setStates(make(map[int]*DFAState))
if precedenceDfa {
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
d.setS0(precedenceState)
} else {
d.setS0(nil)
}
d.precedenceDfaMu.Lock()
defer d.precedenceDfaMu.Unlock()
d.precedenceDfa = precedenceDfa
}
}
func (d *DFA) getS0() *DFAState {
d.s0Mu.RLock()
defer d.s0Mu.RUnlock()
return d.s0
}
func (d *DFA) setS0(s *DFAState) {
d.s0Mu.Lock()
defer d.s0Mu.Unlock()
d.s0 = s
}
func (d *DFA) getState(hash int) (*DFAState, bool) {
d.statesMu.RLock()
defer d.statesMu.RUnlock()
s, ok := d.states[hash]
return s, ok
}
func (d *DFA) setStates(states map[int]*DFAState) {
d.statesMu.Lock()
defer d.statesMu.Unlock()
d.states = states
}
func (d *DFA) setState(hash int, state *DFAState) {
d.statesMu.Lock()
defer d.statesMu.Unlock()
d.states[hash] = state
}
func (d *DFA) numStates() int {
d.statesMu.RLock()
defer d.statesMu.RUnlock()
return len(d.states)
}
type dfaStateList []*DFAState
func (d dfaStateList) Len() int { return len(d) }
func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
vs := make([]*DFAState, 0, len(d.states))
for _, v := range d.states {
vs = append(vs, v)
}
sort.Sort(dfaStateList(vs))
return vs
}
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
if d.getS0() == nil {
return ""
}
return NewDFASerializer(d, literalNames, symbolicNames).String()
}
func (d *DFA) ToLexerString() string {
if d.getS0() == nil {
return ""
}
return NewLexerDFASerializer(d).String()
}

View File

@ -0,0 +1,158 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strconv"
"strings"
)
// DFASerializer is a DFA walker that knows how to dump them to serialized
// strings.
type DFASerializer struct {
dfa *DFA
literalNames []string
symbolicNames []string
}
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
if literalNames == nil {
literalNames = make([]string, 0)
}
if symbolicNames == nil {
symbolicNames = make([]string, 0)
}
return &DFASerializer{
dfa: dfa,
literalNames: literalNames,
symbolicNames: symbolicNames,
}
}
func (d *DFASerializer) String() string {
if d.dfa.getS0() == nil {
return ""
}
buf := ""
states := d.dfa.sortedStates()
for _, s := range states {
if s.edges != nil {
n := len(s.edges)
for j := 0; j < n; j++ {
t := s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += d.GetStateString(s)
buf += "-"
buf += d.getEdgeLabel(j)
buf += "->"
buf += d.GetStateString(t)
buf += "\n"
}
}
}
}
if len(buf) == 0 {
return ""
}
return buf
}
func (d *DFASerializer) getEdgeLabel(i int) string {
if i == 0 {
return "EOF"
} else if d.literalNames != nil && i-1 < len(d.literalNames) {
return d.literalNames[i-1]
} else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
return d.symbolicNames[i-1]
}
return strconv.Itoa(i - 1)
}
func (d *DFASerializer) GetStateString(s *DFAState) string {
var a, b string
if s.isAcceptState {
a = ":"
}
if s.requiresFullContext {
b = "^"
}
baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
if s.isAcceptState {
if s.predicates != nil {
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
}
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
}
return baseStateStr
}
type LexerDFASerializer struct {
*DFASerializer
}
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
}
func (l *LexerDFASerializer) getEdgeLabel(i int) string {
var sb strings.Builder
sb.Grow(6)
sb.WriteByte('\'')
sb.WriteRune(rune(i))
sb.WriteByte('\'')
return sb.String()
}
func (l *LexerDFASerializer) String() string {
if l.dfa.getS0() == nil {
return ""
}
buf := ""
states := l.dfa.sortedStates()
for i := 0; i < len(states); i++ {
s := states[i]
if s.edges != nil {
n := len(s.edges)
for j := 0; j < n; j++ {
t := s.edges[j]
if t != nil && t.stateNumber != 0x7FFFFFFF {
buf += l.GetStateString(s)
buf += "-"
buf += l.getEdgeLabel(j)
buf += "->"
buf += l.GetStateString(t)
buf += "\n"
}
}
}
}
if len(buf) == 0 {
return ""
}
return buf
}

View File

@ -0,0 +1,183 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"sync"
)
// PredPrediction maps a predicate to a predicted alternative.
type PredPrediction struct {
alt int
pred SemanticContext
}
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
return &PredPrediction{alt: alt, pred: pred}
}
func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
// after reading input a1a2..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
// conversion, therefore, the subset T would be a bitset representing the set of
// states the ATN could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
// state (ala normal conversion) and a RuleContext describing the chain of rules
// (if any) followed to arrive at that state.
//
// A DFAState may have multiple references to a particular state, but with
// different ATN contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
configs ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
edges []*DFAState
edgesMu sync.RWMutex
isAcceptState bool
// prediction is the ttype we match or alt we predict if the state is accept.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
lexerActionExecutor *LexerActionExecutor
// requiresFullContext indicates it was created during an SLL prediction that
// discovered a conflict between the configurations in the state. Future
// ParserATNSimulator.execATN invocations immediately jump doing
// full context prediction if true.
requiresFullContext bool
// predicates is the predicates associated with the ATN configurations of the
// DFA state during SLL parsing. When we have predicates, requiresFullContext
// is false, since full context prediction evaluates predicates on-the-fly. If
// d is
// not nil, then prediction is ATN.INVALID_ALT_NUMBER.
//
// We only use these for non-requiresFullContext but conflicting states. That
// means we know from the context (it's $ or we don't dip into outer context)
// that it's an ambiguity not a conflict.
//
// This list is computed by
// ParserATNSimulator.predicateDFAState.
predicates []*PredPrediction
}
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
if configs == nil {
configs = NewBaseATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
}
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
func (d *DFAState) GetAltSet() Set {
alts := NewArray2DHashSet(nil, nil)
if d.configs != nil {
for _, c := range d.configs.GetItems() {
alts.Add(c.GetAlt())
}
}
if alts.Len() == 0 {
return nil
}
return alts
}
func (d *DFAState) getEdges() []*DFAState {
d.edgesMu.RLock()
defer d.edgesMu.RUnlock()
return d.edges
}
func (d *DFAState) numEdges() int {
d.edgesMu.RLock()
defer d.edgesMu.RUnlock()
return len(d.edges)
}
func (d *DFAState) getIthEdge(i int) *DFAState {
d.edgesMu.RLock()
defer d.edgesMu.RUnlock()
return d.edges[i]
}
func (d *DFAState) setEdges(newEdges []*DFAState) {
d.edgesMu.Lock()
defer d.edgesMu.Unlock()
d.edges = newEdges
}
func (d *DFAState) setIthEdge(i int, edge *DFAState) {
d.edgesMu.Lock()
defer d.edgesMu.Unlock()
d.edges[i] = edge
}
func (d *DFAState) setPrediction(v int) {
d.prediction = v
}
// equals returns whether d equals other. Two DFAStates are equal if their ATN
// configuration sets are the same. This method is used to see if a state
// already exists.
//
// Because the number of alternatives and number of ATN configurations are
// finite, there is a finite number of DFA states that can be processed. This is
// necessary to show that the algorithm terminates.
//
// Cannot test the DFA state numbers here because in
// ParserATNSimulator.addDFAState we need to know if any other state exists that
// has d exact set of ATN configurations. The stateNumber is irrelevant.
func (d *DFAState) equals(other interface{}) bool {
if d == other {
return true
} else if _, ok := other.(*DFAState); !ok {
return false
}
return d.configs.Equals(other.(*DFAState).configs)
}
func (d *DFAState) String() string {
var s string
if d.isAcceptState {
if d.predicates != nil {
s = "=>" + fmt.Sprint(d.predicates)
} else {
s = "=>" + fmt.Sprint(d.prediction)
}
}
return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
}
func (d *DFAState) hash() int {
h := murmurInit(7)
h = murmurUpdate(h, d.configs.hash())
return murmurFinish(h, 1)
}

View File

@ -0,0 +1,111 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"strconv"
)
//
// This implementation of {@link ANTLRErrorListener} can be used to identify
// certain potential correctness and performance problems in grammars. "reports"
// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
// message.
//
// <ul>
// <li><b>Ambiguities</b>: These are cases where more than one path through the
// grammar can Match the input.</li>
// <li><b>Weak context sensitivity</b>: These are cases where full-context
// prediction resolved an SLL conflict to a unique alternative which equaled the
// minimum alternative of the SLL conflict.</li>
// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
// full-context prediction resolved an SLL conflict to a unique alternative,
// <em>and</em> the minimum alternative of the SLL conflict was found to not be
// a truly viable alternative. Two-stage parsing cannot be used for inputs where
// d situation occurs.</li>
// </ul>
type DiagnosticErrorListener struct {
*DefaultErrorListener
exactOnly bool
}
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
// whether all ambiguities or only exact ambiguities are Reported.
n.exactOnly = exactOnly
return n
}
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
if d.exactOnly && !exact {
return
}
msg := "reportAmbiguity d=" +
d.getDecisionDescription(recognizer, dfa) +
": ambigAlts=" +
d.getConflictingAlts(ambigAlts, configs).String() +
", input='" +
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.NotifyErrorListeners(msg, nil, nil)
}
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.NotifyErrorListeners(msg, nil, nil)
}
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
recognizer.NotifyErrorListeners(msg, nil, nil)
}
func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
decision := dfa.decision
ruleIndex := dfa.atnStartState.GetRuleIndex()
ruleNames := recognizer.GetRuleNames()
if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
return strconv.Itoa(decision)
}
ruleName := ruleNames[ruleIndex]
if ruleName == "" {
return strconv.Itoa(decision)
}
return strconv.Itoa(decision) + " (" + ruleName + ")"
}
//
// Computes the set of conflicting or ambiguous alternatives from a
// configuration set, if that information was not already provided by the
// parser.
//
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
// Reported by the parser.
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
//
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
for _, c := range set.GetItems() {
result.add(c.GetAlt())
}
return result
}

View File

@ -0,0 +1,108 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"os"
"strconv"
)
// Provides an empty default implementation of {@link ANTLRErrorListener}. The
// default implementation of each method does nothing, but can be overridden as
// necessary.
type ErrorListener interface {
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
}
type DefaultErrorListener struct {
}
func NewDefaultErrorListener() *DefaultErrorListener {
return new(DefaultErrorListener)
}
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
}
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
}
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
}
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
}
type ConsoleErrorListener struct {
*DefaultErrorListener
}
func NewConsoleErrorListener() *ConsoleErrorListener {
return new(ConsoleErrorListener)
}
//
// Provides a default instance of {@link ConsoleErrorListener}.
//
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
//
// {@inheritDoc}
//
// <p>
// This implementation prints messages to {@link System//err} containing the
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
// the following format.</p>
//
// <pre>
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
// </pre>
//
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
}
type ProxyErrorListener struct {
*DefaultErrorListener
delegates []ErrorListener
}
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
if delegates == nil {
panic("delegates is not provided")
}
l := new(ProxyErrorListener)
l.delegates = delegates
return l
}
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
for _, d := range p.delegates {
d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
}
}
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
}
}
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
}
}
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
for _, d := range p.delegates {
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
}
}

View File

@ -0,0 +1,758 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"reflect"
"strconv"
"strings"
)
type ErrorStrategy interface {
reset(Parser)
RecoverInline(Parser) Token
Recover(Parser, RecognitionException)
Sync(Parser)
inErrorRecoveryMode(Parser) bool
ReportError(Parser, RecognitionException)
ReportMatch(Parser)
}
// This is the default implementation of {@link ANTLRErrorStrategy} used for
// error Reporting and recovery in ANTLR parsers.
//
type DefaultErrorStrategy struct {
errorRecoveryMode bool
lastErrorIndex int
lastErrorStates *IntervalSet
}
var _ ErrorStrategy = &DefaultErrorStrategy{}
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
d := new(DefaultErrorStrategy)
// Indicates whether the error strategy is currently "recovering from an
// error". This is used to suppress Reporting multiple error messages while
// attempting to recover from a detected syntax error.
//
// @see //inErrorRecoveryMode
//
d.errorRecoveryMode = false
// The index into the input stream where the last error occurred.
// This is used to prevent infinite loops where an error is found
// but no token is consumed during recovery...another error is found,
// ad nauseum. This is a failsafe mechanism to guarantee that at least
// one token/tree node is consumed for two errors.
//
d.lastErrorIndex = -1
d.lastErrorStates = nil
return d
}
// <p>The default implementation simply calls {@link //endErrorCondition} to
// ensure that the handler is not in error recovery mode.</p>
func (d *DefaultErrorStrategy) reset(recognizer Parser) {
d.endErrorCondition(recognizer)
}
//
// This method is called to enter error recovery mode when a recognition
// exception is Reported.
//
// @param recognizer the parser instance
//
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
d.errorRecoveryMode = true
}
func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
return d.errorRecoveryMode
}
//
// This method is called to leave error recovery mode after recovering from
// a recognition exception.
//
// @param recognizer
//
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
d.errorRecoveryMode = false
d.lastErrorStates = nil
d.lastErrorIndex = -1
}
//
// {@inheritDoc}
//
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
//
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
d.endErrorCondition(recognizer)
}
//
// {@inheritDoc}
//
// <p>The default implementation returns immediately if the handler is already
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
// and dispatches the Reporting task based on the runtime type of {@code e}
// according to the following table.</p>
//
// <ul>
// <li>{@link NoViableAltException}: Dispatches the call to
// {@link //ReportNoViableAlternative}</li>
// <li>{@link InputMisMatchException}: Dispatches the call to
// {@link //ReportInputMisMatch}</li>
// <li>{@link FailedPredicateException}: Dispatches the call to
// {@link //ReportFailedPredicate}</li>
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
// the exception</li>
// </ul>
//
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
// if we've already Reported an error and have not Matched a token
// yet successfully, don't Report any errors.
if d.inErrorRecoveryMode(recognizer) {
return // don't Report spurious errors
}
d.beginErrorCondition(recognizer)
switch t := e.(type) {
default:
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
// fmt.Println(e.stack)
recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
case *NoViableAltException:
d.ReportNoViableAlternative(recognizer, t)
case *InputMisMatchException:
d.ReportInputMisMatch(recognizer, t)
case *FailedPredicateException:
d.ReportFailedPredicate(recognizer, t)
}
}
// {@inheritDoc}
//
// <p>The default implementation reSynchronizes the parser by consuming tokens
// until we find one in the reSynchronization set--loosely the set of tokens
// that can follow the current rule.</p>
//
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
// uh oh, another error at same token index and previously-Visited
// state in ATN must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop d is a failsafe.
recognizer.Consume()
}
d.lastErrorIndex = recognizer.GetInputStream().Index()
if d.lastErrorStates == nil {
d.lastErrorStates = NewIntervalSet()
}
d.lastErrorStates.addOne(recognizer.GetState())
followSet := d.getErrorRecoverySet(recognizer)
d.consumeUntil(recognizer, followSet)
}
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
// that the current lookahead symbol is consistent with what were expecting
// at d point in the ATN. You can call d anytime but ANTLR only
// generates code to check before subrules/loops and each iteration.
//
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
// subrules. E.g.,</p>
//
// <pre>
// a : Sync ( stuff Sync )*
// Sync : {consume to what can follow Sync}
// </pre>
//
// At the start of a sub rule upon error, {@link //Sync} performs single
// token deletion, if possible. If it can't do that, it bails on the current
// rule and uses the default error recovery, which consumes until the
// reSynchronization set of the current rule.
//
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
// with an empty alternative), then the expected set includes what follows
// the subrule.</p>
//
// <p>During loop iteration, it consumes until it sees a token that can start a
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
// stay in the loop as long as possible.</p>
//
// <p><strong>ORIGINS</strong></p>
//
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
// A single mismatch token or missing token would force the parser to bail
// out of the entire rules surrounding the loop. So, for rule</p>
//
// <pre>
// classfunc : 'class' ID '{' member* '}'
// </pre>
//
// input with an extra token between members would force the parser to
// consume until it found the next class definition rather than the next
// member definition of the current class.
//
// <p>This functionality cost a little bit of effort because the parser has to
// compare token set at the start of the loop and at each iteration. If for
// some reason speed is suffering for you, you can turn off d
// functionality by simply overriding d method as a blank { }.</p>
//
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
// If already recovering, don't try to Sync
if d.inErrorRecoveryMode(recognizer) {
return
}
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
la := recognizer.GetTokenStream().LA(1)
// try cheaper subset first might get lucky. seems to shave a wee bit off
nextTokens := recognizer.GetATN().NextTokens(s, nil)
if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
return
}
switch s.GetStateType() {
case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
// Report error and recover if possible
if d.SingleTokenDeletion(recognizer) != nil {
return
}
panic(NewInputMisMatchException(recognizer))
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
d.ReportUnwantedToken(recognizer)
expecting := NewIntervalSet()
expecting.addSet(recognizer.GetExpectedTokens())
whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
default:
// do nothing if we can't identify the exact kind of ATN state
}
}
// This is called by {@link //ReportError} when the exception is a
// {@link NoViableAltException}.
//
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
tokens := recognizer.GetTokenStream()
var input string
if tokens != nil {
if e.startToken.GetTokenType() == TokenEOF {
input = "<EOF>"
} else {
input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
}
} else {
input = "<unknown input>"
}
msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
//
// This is called by {@link //ReportError} when the exception is an
// {@link InputMisMatchException}.
//
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
//
// This is called by {@link //ReportError} when the exception is a
// {@link FailedPredicateException}.
//
// @see //ReportError
//
// @param recognizer the parser instance
// @param e the recognition exception
//
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
msg := "rule " + ruleName + " " + e.message
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
// This method is called to Report a syntax error which requires the removal
// of a token from the input stream. At the time d method is called, the
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
// removed from the input stream. When d method returns,
// {@code recognizer} is in error recovery mode.
//
// <p>This method is called when {@link //singleTokenDeletion} identifies
// single-token deletion as a viable recovery strategy for a mismatched
// input error.</p>
//
// <p>The default implementation simply returns if the handler is already in
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
// {@link Parser//NotifyErrorListeners}.</p>
//
// @param recognizer the parser instance
//
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
if d.inErrorRecoveryMode(recognizer) {
return
}
d.beginErrorCondition(recognizer)
t := recognizer.GetCurrentToken()
tokenName := d.GetTokenErrorDisplay(t)
expecting := d.GetExpectedTokens(recognizer)
msg := "extraneous input " + tokenName + " expecting " +
expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, t, nil)
}
// This method is called to Report a syntax error which requires the
// insertion of a missing token into the input stream. At the time d
// method is called, the missing token has not yet been inserted. When d
// method returns, {@code recognizer} is in error recovery mode.
//
// <p>This method is called when {@link //singleTokenInsertion} identifies
// single-token insertion as a viable recovery strategy for a mismatched
// input error.</p>
//
// <p>The default implementation simply returns if the handler is already in
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
// enter error recovery mode, followed by calling
// {@link Parser//NotifyErrorListeners}.</p>
//
// @param recognizer the parser instance
//
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
if d.inErrorRecoveryMode(recognizer) {
return
}
d.beginErrorCondition(recognizer)
t := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
" at " + d.GetTokenErrorDisplay(t)
recognizer.NotifyErrorListeners(msg, t, nil)
}
// <p>The default implementation attempts to recover from the mismatched input
// by using single token insertion and deletion as described below. If the
// recovery attempt fails, d method panics an
// {@link InputMisMatchException}.</p>
//
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
//
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
// right token, however, then assume {@code LA(1)} is some extra spurious
// token and delete it. Then consume and return the next token (which was
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
//
// <p>This recovery strategy is implemented by {@link
// //singleTokenDeletion}.</p>
//
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
//
// <p>If current token (at {@code LA(1)}) is consistent with what could come
// after the expected {@code LA(1)} token, then assume the token is missing
// and use the parser's {@link TokenFactory} to create it on the fly. The
// "insertion" is performed by returning the created token as the successful
// result of the Match operation.</p>
//
// <p>This recovery strategy is implemented by {@link
// //singleTokenInsertion}.</p>
//
// <p><strong>EXAMPLE</strong></p>
//
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
// the parser returns from the nested call to {@code expr}, it will have
// call chain:</p>
//
// <pre>
// stat &rarr expr &rarr atom
// </pre>
//
// and it will be trying to Match the {@code ')'} at d point in the
// derivation:
//
// <pre>
// =&gt ID '=' '(' INT ')' ('+' atom)* ''
// ^
// </pre>
//
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
// is in the set of tokens that can follow the {@code ')'} token reference
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
//
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
if MatchedSymbol != nil {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.Consume()
return MatchedSymbol
}
// SINGLE TOKEN INSERTION
if d.SingleTokenInsertion(recognizer) {
return d.GetMissingSymbol(recognizer)
}
// even that didn't work must panic the exception
panic(NewInputMisMatchException(recognizer))
}
//
// This method implements the single-token insertion inline error recovery
// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
// method returns {@code true}, {@code recognizer} will be in error recovery
// mode.
//
// <p>This method determines whether or not single-token insertion is viable by
// checking if the {@code LA(1)} input symbol could be successfully Matched
// if it were instead the {@code LA(2)} symbol. If d method returns
// {@code true}, the caller is responsible for creating and inserting a
// token with the correct type to produce d behavior.</p>
//
// @param recognizer the parser instance
// @return {@code true} if single-token insertion is a viable recovery
// strategy for the current mismatched input, otherwise {@code false}
//
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
currentSymbolType := recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token error recovery
// is free to conjure up and insert the missing token
atn := recognizer.GetInterpreter().atn
currentState := atn.states[recognizer.GetState()]
next := currentState.GetTransitions()[0].getTarget()
expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
if expectingAtLL2.contains(currentSymbolType) {
d.ReportMissingToken(recognizer)
return true
}
return false
}
// This method implements the single-token deletion inline error recovery
// strategy. It is called by {@link //recoverInline} to attempt to recover
// from mismatched input. If this method returns nil, the parser and error
// handler state will not have changed. If this method returns non-nil,
// {@code recognizer} will <em>not</em> be in error recovery mode since the
// returned token was a successful Match.
//
// <p>If the single-token deletion is successful, d method calls
// {@link //ReportUnwantedToken} to Report the error, followed by
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
// before returning {@link //ReportMatch} is called to signal a successful
// Match.</p>
//
// @param recognizer the parser instance
// @return the successfully Matched {@link Token} instance if single-token
// deletion successfully recovers from the mismatched input, otherwise
// {@code nil}
//
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
NextTokenType := recognizer.GetTokenStream().LA(2)
expecting := d.GetExpectedTokens(recognizer)
if expecting.contains(NextTokenType) {
d.ReportUnwantedToken(recognizer)
// print("recoverFromMisMatchedToken deleting " \
// + str(recognizer.GetTokenStream().LT(1)) \
// + " since " + str(recognizer.GetTokenStream().LT(2)) \
// + " is what we want", file=sys.stderr)
recognizer.Consume() // simply delete extra token
// we want to return the token we're actually Matching
MatchedSymbol := recognizer.GetCurrentToken()
d.ReportMatch(recognizer) // we know current token is correct
return MatchedSymbol
}
return nil
}
// Conjure up a missing token during error recovery.
//
// The recognizer attempts to recover from single missing
// symbols. But, actions might refer to that missing symbol.
// For example, x=ID {f($x)}. The action clearly assumes
// that there has been an identifier Matched previously and that
// $x points at that token. If that token is missing, but
// the next token in the stream is what we want we assume that
// d token is missing and we keep going. Because we
// have to return some token to replace the missing token,
// we have to conjure one up. This method gives the user control
// over the tokens returned for missing tokens. Mostly,
// you will want to create something special for identifier
// tokens. For literals such as '{' and ',', the default
// action in the parser or tree parser works. It simply creates
// a CommonToken of the appropriate type. The text will be the token.
// If you change what tokens must be created by the lexer,
// override d method to create the appropriate tokens.
//
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
currentSymbol := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
expectedTokenType := expecting.first()
var tokenText string
if expectedTokenType == TokenEOF {
tokenText = "<missing EOF>"
} else {
ln := recognizer.GetLiteralNames()
if expectedTokenType > 0 && expectedTokenType < len(ln) {
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
} else {
tokenText = "<missing undefined>" // TODO matches the JS impl
}
}
current := currentSymbol
lookback := recognizer.GetTokenStream().LT(-1)
if current.GetTokenType() == TokenEOF && lookback != nil {
current = lookback
}
tf := recognizer.GetTokenFactory()
return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
}
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
return recognizer.GetExpectedTokens()
}
// How should a token be displayed in an error message? The default
// is to display just the text, but during development you might
// want to have a lot of information spit out. Override in that case
// to use t.String() (which, for CommonToken, dumps everything about
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
//
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
if t == nil {
return "<no token>"
}
s := t.GetText()
if s == "" {
if t.GetTokenType() == TokenEOF {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
}
}
return d.escapeWSAndQuote(s)
}
func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
return "'" + s + "'"
}
// Compute the error recovery set for the current rule. During
// rule invocation, the parser pushes the set of tokens that can
// follow that rule reference on the stack d amounts to
// computing FIRST of what follows the rule reference in the
// enclosing rule. See LinearApproximator.FIRST().
// This local follow set only includes tokens
// from within the rule i.e., the FIRST computation done by
// ANTLR stops at the end of a rule.
//
// EXAMPLE
//
// When you find a "no viable alt exception", the input is not
// consistent with any of the alternatives for rule r. The best
// thing to do is to consume tokens until you see something that
// can legally follow a call to r//or* any rule that called r.
// You don't want the exact set of viable next tokens because the
// input might just be missing a token--you might consume the
// rest of the input looking for one of the missing tokens.
//
// Consider grammar:
//
// a : '[' b ']'
// | '(' b ')'
//
// b : c '^' INT
// c : ID
// | INT
//
//
// At each rule invocation, the set of tokens that could follow
// that rule is pushed on a stack. Here are the various
// context-sensitive follow sets:
//
// FOLLOW(b1_in_a) = FIRST(']') = ']'
// FOLLOW(b2_in_a) = FIRST(')') = ')'
// FOLLOW(c_in_b) = FIRST('^') = '^'
//
// Upon erroneous input "[]", the call chain is
//
// a -> b -> c
//
// and, hence, the follow context stack is:
//
// depth follow set start of rule execution
// 0 <EOF> a (from main())
// 1 ']' b
// 2 '^' c
//
// Notice that ')' is not included, because b would have to have
// been called from a different context in rule a for ')' to be
// included.
//
// For error recovery, we cannot consider FOLLOW(c)
// (context-sensitive or otherwise). We need the combined set of
// all context-sensitive FOLLOW sets--the set of all tokens that
// could follow any reference in the call chain. We need to
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
// we reSync'd to that token, we'd consume until EOF. We need to
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
// not consume anything. After printing an error, rule c would
// return normally. Rule b would not find the required '^' though.
// At this point, it gets a mismatched token error and panics an
// exception (since LA(1) is not in the viable following token
// set). The rule exception handler tries to recover, but finds
// the same recovery set and doesn't consume anything. Rule b
// exits normally returning to rule a. Now it finds the ']' (and
// with the successful Match exits errorRecovery mode).
//
// So, you can see that the parser walks up the call chain looking
// for the token that was a member of the recovery set.
//
// Errors are not generated in errorRecovery mode.
//
// ANTLR's error recovery mechanism is based upon original ideas:
//
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
//
// and
//
// "A note on error recovery in recursive descent parsers":
// http://portal.acm.org/citation.cfm?id=947902.947905
//
// Later, Josef Grosch had some good ideas:
//
// "Efficient and Comfortable Error Recovery in Recursive Descent
// Parsers":
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
//
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
// at run-time upon error to avoid overhead during parsing.
//
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
atn := recognizer.GetInterpreter().atn
ctx := recognizer.GetParserRuleContext()
recoverSet := NewIntervalSet()
for ctx != nil && ctx.GetInvokingState() >= 0 {
// compute what follows who invoked us
invokingState := atn.states[ctx.GetInvokingState()]
rt := invokingState.GetTransitions()[0]
follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
recoverSet.addSet(follow)
ctx = ctx.GetParent().(ParserRuleContext)
}
recoverSet.removeOne(TokenEpsilon)
return recoverSet
}
// Consume tokens until one Matches the given token set.//
func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
ttype := recognizer.GetTokenStream().LA(1)
for ttype != TokenEOF && !set.contains(ttype) {
recognizer.Consume()
ttype = recognizer.GetTokenStream().LA(1)
}
}
//
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
// by immediately canceling the parse operation with a
// {@link ParseCancellationException}. The implementation ensures that the
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
// that were not completed prior to encountering the error.
//
// <p>
// This error strategy is useful in the following scenarios.</p>
//
// <ul>
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
// stage of two-stage parsing to immediately terminate if an error is
// encountered, and immediately fall back to the second stage. In addition to
// avoiding wasted work by attempting to recover from errors here, the empty
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
// the first stage.</li>
// <li><strong>Silent validation:</strong> When syntax errors are not being
// Reported or logged, and the parse result is simply ignored if errors occur,
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
// when the result will be ignored either way.</li>
// </ul>
//
// <p>
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
//
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
type BailErrorStrategy struct {
*DefaultErrorStrategy
}
var _ ErrorStrategy = &BailErrorStrategy{}
func NewBailErrorStrategy() *BailErrorStrategy {
b := new(BailErrorStrategy)
b.DefaultErrorStrategy = NewDefaultErrorStrategy()
return b
}
// Instead of recovering from exception {@code e}, re-panic it wrapped
// in a {@link ParseCancellationException} so it is not caught by the
// rule func catches. Use {@link Exception//getCause()} to get the
// original {@link RecognitionException}.
//
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context := recognizer.GetParserRuleContext()
for context != nil {
context.SetException(e)
context = context.GetParent().(ParserRuleContext)
}
panic(NewParseCancellationException()) // TODO we don't emit e properly
}
// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
//
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
b.Recover(recognizer, NewInputMisMatchException(recognizer))
return nil
}
// Make sure we don't attempt to recover from problems in subrules.//
func (b *BailErrorStrategy) Sync(recognizer Parser) {
// pass
}

View File

@ -0,0 +1,241 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
// 3 kinds of errors: prediction errors, failed predicate errors, and
// mismatched input errors. In each case, the parser knows where it is
// in the input, where it is in the ATN, the rule invocation stack,
// and what kind of problem occurred.
type RecognitionException interface {
GetOffendingToken() Token
GetMessage() string
GetInputStream() IntStream
}
type BaseRecognitionException struct {
message string
recognizer Recognizer
offendingToken Token
offendingState int
ctx RuleContext
input IntStream
}
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
// todo
// Error.call(this)
//
// if (!!Error.captureStackTrace) {
// Error.captureStackTrace(this, RecognitionException)
// } else {
// stack := NewError().stack
// }
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
t := new(BaseRecognitionException)
t.message = message
t.recognizer = recognizer
t.input = input
t.ctx = ctx
// The current {@link Token} when an error occurred. Since not all streams
// support accessing symbols by index, we have to track the {@link Token}
// instance itself.
t.offendingToken = nil
// Get the ATN state number the parser was in at the time the error
// occurred. For {@link NoViableAltException} and
// {@link LexerNoViableAltException} exceptions, this is the
// {@link DecisionState} number. For others, it is the state whose outgoing
// edge we couldn't Match.
t.offendingState = -1
if t.recognizer != nil {
t.offendingState = t.recognizer.GetState()
}
return t
}
func (b *BaseRecognitionException) GetMessage() string {
return b.message
}
func (b *BaseRecognitionException) GetOffendingToken() Token {
return b.offendingToken
}
func (b *BaseRecognitionException) GetInputStream() IntStream {
return b.input
}
// <p>If the state number is not known, b method returns -1.</p>
//
// Gets the set of input symbols which could potentially follow the
// previously Matched symbol at the time b exception was panicn.
//
// <p>If the set of expected tokens is not known and could not be computed,
// b method returns {@code nil}.</p>
//
// @return The set of token types that could potentially follow the current
// state in the ATN, or {@code nil} if the information is not available.
// /
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
if b.recognizer != nil {
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
}
return nil
}
func (b *BaseRecognitionException) String() string {
return b.message
}
type LexerNoViableAltException struct {
*BaseRecognitionException
startIndex int
deadEndConfigs ATNConfigSet
}
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
l := new(LexerNoViableAltException)
l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
l.startIndex = startIndex
l.deadEndConfigs = deadEndConfigs
return l
}
func (l *LexerNoViableAltException) String() string {
symbol := ""
if l.startIndex >= 0 && l.startIndex < l.input.Size() {
symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
}
return "LexerNoViableAltException" + symbol
}
type NoViableAltException struct {
*BaseRecognitionException
startToken Token
offendingToken Token
ctx ParserRuleContext
deadEndConfigs ATNConfigSet
}
// Indicates that the parser could not decide which of two or more paths
// to take based upon the remaining input. It tracks the starting token
// of the offending input and also knows where the parser was
// in the various paths when the error. Reported by ReportNoViableAlternative()
//
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
if ctx == nil {
ctx = recognizer.GetParserRuleContext()
}
if offendingToken == nil {
offendingToken = recognizer.GetCurrentToken()
}
if startToken == nil {
startToken = recognizer.GetCurrentToken()
}
if input == nil {
input = recognizer.GetInputStream().(TokenStream)
}
n := new(NoViableAltException)
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
// Which configurations did we try at input.Index() that couldn't Match
// input.LT(1)?//
n.deadEndConfigs = deadEndConfigs
// The token object at the start index the input stream might
// not be buffering tokens so get a reference to it. (At the
// time the error occurred, of course the stream needs to keep a
// buffer all of the tokens but later we might not have access to those.)
n.startToken = startToken
n.offendingToken = offendingToken
return n
}
type InputMisMatchException struct {
*BaseRecognitionException
}
// This signifies any kind of mismatched input exceptions such as
// when the current input does not Match the expected token.
//
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
i := new(InputMisMatchException)
i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
i.offendingToken = recognizer.GetCurrentToken()
return i
}
// A semantic predicate failed during validation. Validation of predicates
// occurs when normally parsing the alternative just like Matching a token.
// Disambiguating predicate evaluation occurs when we test a predicate during
// prediction.
type FailedPredicateException struct {
*BaseRecognitionException
ruleIndex int
predicateIndex int
predicate string
}
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
f := new(FailedPredicateException)
f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
trans := s.GetTransitions()[0]
if trans2, ok := trans.(*PredicateTransition); ok {
f.ruleIndex = trans2.ruleIndex
f.predicateIndex = trans2.predIndex
} else {
f.ruleIndex = 0
f.predicateIndex = 0
}
f.predicate = predicate
f.offendingToken = recognizer.GetCurrentToken()
return f
}
func (f *FailedPredicateException) formatMessage(predicate, message string) string {
if message != "" {
return message
}
return "failed predicate: {" + predicate + "}?"
}
type ParseCancellationException struct {
}
func NewParseCancellationException() *ParseCancellationException {
// Error.call(this)
// Error.captureStackTrace(this, ParseCancellationException)
return new(ParseCancellationException)
}

View File

@ -0,0 +1,49 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"bytes"
"io"
"os"
)
// This is an InputStream that is loaded from a file all at once
// when you construct the object.
type FileStream struct {
*InputStream
filename string
}
func NewFileStream(fileName string) (*FileStream, error) {
buf := bytes.NewBuffer(nil)
f, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer f.Close()
_, err = io.Copy(buf, f)
if err != nil {
return nil, err
}
fs := new(FileStream)
fs.filename = fileName
s := string(buf.Bytes())
fs.InputStream = NewInputStream(s)
return fs, nil
}
func (f *FileStream) GetSourceName() string {
return f.filename
}

View File

@ -0,0 +1,3 @@
module github.com/antlr/antlr4/runtime/Go/antlr
go 1.16

View File

@ -0,0 +1,113 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type InputStream struct {
name string
index int
data []rune
size int
}
func NewInputStream(data string) *InputStream {
is := new(InputStream)
is.name = "<empty>"
is.index = 0
is.data = []rune(data)
is.size = len(is.data) // number of runes
return is
}
func (is *InputStream) reset() {
is.index = 0
}
func (is *InputStream) Consume() {
if is.index >= is.size {
// assert is.LA(1) == TokenEOF
panic("cannot consume EOF")
}
is.index++
}
func (is *InputStream) LA(offset int) int {
if offset == 0 {
return 0 // nil
}
if offset < 0 {
offset++ // e.g., translate LA(-1) to use offset=0
}
pos := is.index + offset - 1
if pos < 0 || pos >= is.size { // invalid
return TokenEOF
}
return int(is.data[pos])
}
func (is *InputStream) LT(offset int) int {
return is.LA(offset)
}
func (is *InputStream) Index() int {
return is.index
}
func (is *InputStream) Size() int {
return is.size
}
// mark/release do nothing we have entire buffer
func (is *InputStream) Mark() int {
return -1
}
func (is *InputStream) Release(marker int) {
}
func (is *InputStream) Seek(index int) {
if index <= is.index {
is.index = index // just jump don't update stream state (line,...)
return
}
// seek forward
is.index = intMin(index, is.size)
}
func (is *InputStream) GetText(start int, stop int) string {
if stop >= is.size {
stop = is.size - 1
}
if start >= is.size {
return ""
}
return string(is.data[start : stop+1])
}
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
if start != nil && stop != nil {
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
}
return ""
}
func (is *InputStream) GetTextFromInterval(i *Interval) string {
return is.GetText(i.Start, i.Stop)
}
func (*InputStream) GetSourceName() string {
return "Obtained from string"
}
func (is *InputStream) String() string {
return string(is.data)
}

View File

@ -0,0 +1,16 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type IntStream interface {
Consume()
LA(int) int
Mark() int
Release(marker int)
Index() int
Seek(index int)
Size() int
GetSourceName() string
}

View File

@ -0,0 +1,308 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"strconv"
"strings"
)
type Interval struct {
Start int
Stop int
}
/* stop is not included! */
func NewInterval(start, stop int) *Interval {
i := new(Interval)
i.Start = start
i.Stop = stop
return i
}
func (i *Interval) Contains(item int) bool {
return item >= i.Start && item < i.Stop
}
func (i *Interval) String() string {
if i.Start == i.Stop-1 {
return strconv.Itoa(i.Start)
}
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
func (i *Interval) length() int {
return i.Stop - i.Start
}
type IntervalSet struct {
intervals []*Interval
readOnly bool
}
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
i.intervals = nil
i.readOnly = false
return i
}
func (i *IntervalSet) first() int {
if len(i.intervals) == 0 {
return TokenInvalidType
}
return i.intervals[0].Start
}
func (i *IntervalSet) addOne(v int) {
i.addInterval(NewInterval(v, v+1))
}
func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h+1))
}
func (i *IntervalSet) addInterval(v *Interval) {
if i.intervals == nil {
i.intervals = make([]*Interval, 0)
i.intervals = append(i.intervals, v)
} else {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
if v.Stop < interval.Start {
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
return
} else if v.Stop == interval.Start {
i.intervals[k].Start = v.Start
return
} else if v.Start <= interval.Stop {
i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
// if not applying to end, merge potential overlaps
if k < len(i.intervals)-1 {
l := i.intervals[k]
r := i.intervals[k+1]
// if r contained in l
if l.Stop >= r.Stop {
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
} else if l.Stop >= r.Start { // partial overlap
i.intervals[k] = NewInterval(l.Start, r.Stop)
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
}
}
return
}
}
// greater than any exiting
i.intervals = append(i.intervals, v)
}
}
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
if other.intervals != nil {
for k := 0; k < len(other.intervals); k++ {
i2 := other.intervals[k]
i.addInterval(NewInterval(i2.Start, i2.Stop))
}
}
return i
}
func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
result := NewIntervalSet()
result.addInterval(NewInterval(start, stop+1))
for j := 0; j < len(i.intervals); j++ {
result.removeRange(i.intervals[j])
}
return result
}
func (i *IntervalSet) contains(item int) bool {
if i.intervals == nil {
return false
}
for k := 0; k < len(i.intervals); k++ {
if i.intervals[k].Contains(item) {
return true
}
}
return false
}
func (i *IntervalSet) length() int {
len := 0
for _, v := range i.intervals {
len += v.length()
}
return len
}
func (i *IntervalSet) removeRange(v *Interval) {
if v.Start == v.Stop-1 {
i.removeOne(v.Start)
} else if i.intervals != nil {
k := 0
for n := 0; n < len(i.intervals); n++ {
ni := i.intervals[k]
// intervals are ordered
if v.Stop <= ni.Start {
return
} else if v.Start > ni.Start && v.Stop < ni.Stop {
i.intervals[k] = NewInterval(ni.Start, v.Start)
x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
k = k - 1 // need another pass
} else if v.Start < ni.Stop {
i.intervals[k] = NewInterval(ni.Start, v.Start)
} else if v.Stop < ni.Stop {
i.intervals[k] = NewInterval(v.Stop, ni.Stop)
}
k++
}
}
}
func (i *IntervalSet) removeOne(v int) {
if i.intervals != nil {
for k := 0; k < len(i.intervals); k++ {
ki := i.intervals[k]
// intervals i ordered
if v < ki.Start {
return
} else if v == ki.Start && v == ki.Stop-1 {
// i.intervals.splice(k, 1)
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
return
} else if v == ki.Start {
i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
return
} else if v == ki.Stop-1 {
i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
return
} else if v < ki.Stop-1 {
x := NewInterval(ki.Start, v)
ki.Start = v + 1
// i.intervals.splice(k, 0, x)
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
return
}
}
}
}
func (i *IntervalSet) String() string {
return i.StringVerbose(nil, nil, false)
}
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
if i.intervals == nil {
return "{}"
} else if literalNames != nil || symbolicNames != nil {
return i.toTokenString(literalNames, symbolicNames)
} else if elemsAreChar {
return i.toCharString()
}
return i.toIndexString()
}
func (i *IntervalSet) toCharString() string {
names := make([]string, len(i.intervals))
var sb strings.Builder
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
if v.Stop == v.Start+1 {
if v.Start == TokenEOF {
names = append(names, "<EOF>")
} else {
sb.WriteByte('\'')
sb.WriteRune(rune(v.Start))
sb.WriteByte('\'')
names = append(names, sb.String())
sb.Reset()
}
} else {
sb.WriteByte('\'')
sb.WriteRune(rune(v.Start))
sb.WriteString("'..'")
sb.WriteRune(rune(v.Stop - 1))
sb.WriteByte('\'')
names = append(names, sb.String())
sb.Reset()
}
}
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
}
return names[0]
}
func (i *IntervalSet) toIndexString() string {
names := make([]string, 0)
for j := 0; j < len(i.intervals); j++ {
v := i.intervals[j]
if v.Stop == v.Start+1 {
if v.Start == TokenEOF {
names = append(names, "<EOF>")
} else {
names = append(names, strconv.Itoa(v.Start))
}
} else {
names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
}
}
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
}
return names[0]
}
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
names := make([]string, 0)
for _, v := range i.intervals {
for j := v.Start; j < v.Stop; j++ {
names = append(names, i.elementName(literalNames, symbolicNames, j))
}
}
if len(names) > 1 {
return "{" + strings.Join(names, ", ") + "}"
}
return names[0]
}
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
if a == TokenEOF {
return "<EOF>"
} else if a == TokenEpsilon {
return "<EPSILON>"
} else {
if a < len(literalNames) && literalNames[a] != "" {
return literalNames[a]
}
return symbolicNames[a]
}
}

View File

@ -0,0 +1,418 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strconv"
)
// A lexer is recognizer that draws input symbols from a character stream.
// lexer grammars result in a subclass of this object. A Lexer object
// uses simplified Match() and error recovery mechanisms in the interest
// of speed.
///
type Lexer interface {
TokenSource
Recognizer
Emit() Token
SetChannel(int)
PushMode(int)
PopMode() int
SetType(int)
SetMode(int)
}
type BaseLexer struct {
*BaseRecognizer
Interpreter ILexerATNSimulator
TokenStartCharIndex int
TokenStartLine int
TokenStartColumn int
ActionType int
Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
input CharStream
factory TokenFactory
tokenFactorySourcePair *TokenSourceCharStreamPair
token Token
hitEOF bool
channel int
thetype int
modeStack IntStack
mode int
text string
}
func NewBaseLexer(input CharStream) *BaseLexer {
lexer := new(BaseLexer)
lexer.BaseRecognizer = NewBaseRecognizer()
lexer.input = input
lexer.factory = CommonTokenFactoryDEFAULT
lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
lexer.Virt = lexer
lexer.Interpreter = nil // child classes must populate it
// The goal of all lexer rules/methods is to create a token object.
// l is an instance variable as multiple rules may collaborate to
// create a single token. NextToken will return l object after
// Matching lexer rule(s). If you subclass to allow multiple token
// emissions, then set l to the last token to be Matched or
// something nonnil so that the auto token emit mechanism will not
// emit another token.
lexer.token = nil
// What character index in the stream did the current token start at?
// Needed, for example, to get the text for current token. Set at
// the start of NextToken.
lexer.TokenStartCharIndex = -1
// The line on which the first character of the token resides///
lexer.TokenStartLine = -1
// The character position of first character within the line///
lexer.TokenStartColumn = -1
// Once we see EOF on char stream, next token will be EOF.
// If you have DONE : EOF then you see DONE EOF.
lexer.hitEOF = false
// The channel number for the current token///
lexer.channel = TokenDefaultChannel
// The token type for the current token///
lexer.thetype = TokenInvalidType
lexer.modeStack = make([]int, 0)
lexer.mode = LexerDefaultMode
// You can set the text for the current token to override what is in
// the input char buffer. Use setText() or can set l instance var.
// /
lexer.text = ""
return lexer
}
const (
LexerDefaultMode = 0
LexerMore = -2
LexerSkip = -3
)
const (
LexerDefaultTokenChannel = TokenDefaultChannel
LexerHidden = TokenHiddenChannel
LexerMinCharValue = 0x0000
LexerMaxCharValue = 0x10FFFF
)
func (b *BaseLexer) reset() {
// wack Lexer state variables
if b.input != nil {
b.input.Seek(0) // rewind the input
}
b.token = nil
b.thetype = TokenInvalidType
b.channel = TokenDefaultChannel
b.TokenStartCharIndex = -1
b.TokenStartColumn = -1
b.TokenStartLine = -1
b.text = ""
b.hitEOF = false
b.mode = LexerDefaultMode
b.modeStack = make([]int, 0)
b.Interpreter.reset()
}
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
return b.Interpreter
}
func (b *BaseLexer) GetInputStream() CharStream {
return b.input
}
func (b *BaseLexer) GetSourceName() string {
return b.GrammarFileName
}
func (b *BaseLexer) SetChannel(v int) {
b.channel = v
}
func (b *BaseLexer) GetTokenFactory() TokenFactory {
return b.factory
}
func (b *BaseLexer) setTokenFactory(f TokenFactory) {
b.factory = f
}
func (b *BaseLexer) safeMatch() (ret int) {
defer func() {
if e := recover(); e != nil {
if re, ok := e.(RecognitionException); ok {
b.notifyListeners(re) // Report error
b.Recover(re)
ret = LexerSkip // default
}
}
}()
return b.Interpreter.Match(b.input, b.mode)
}
// Return a token from l source i.e., Match a token on the char stream.
func (b *BaseLexer) NextToken() Token {
if b.input == nil {
panic("NextToken requires a non-nil input stream.")
}
tokenStartMarker := b.input.Mark()
// previously in finally block
defer func() {
// make sure we release marker after Match or
// unbuffered char stream will keep buffering
b.input.Release(tokenStartMarker)
}()
for {
if b.hitEOF {
b.EmitEOF()
return b.token
}
b.token = nil
b.channel = TokenDefaultChannel
b.TokenStartCharIndex = b.input.Index()
b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
b.TokenStartLine = b.Interpreter.GetLine()
b.text = ""
continueOuter := false
for {
b.thetype = TokenInvalidType
ttype := LexerSkip
ttype = b.safeMatch()
if b.input.LA(1) == TokenEOF {
b.hitEOF = true
}
if b.thetype == TokenInvalidType {
b.thetype = ttype
}
if b.thetype == LexerSkip {
continueOuter = true
break
}
if b.thetype != LexerMore {
break
}
}
if continueOuter {
continue
}
if b.token == nil {
b.Virt.Emit()
}
return b.token
}
return nil
}
// Instruct the lexer to Skip creating a token for current lexer rule
// and look for another token. NextToken() knows to keep looking when
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
// if token==nil at end of any token rule, it creates one for you
// and emits it.
// /
func (b *BaseLexer) Skip() {
b.thetype = LexerSkip
}
func (b *BaseLexer) More() {
b.thetype = LexerMore
}
func (b *BaseLexer) SetMode(m int) {
b.mode = m
}
func (b *BaseLexer) PushMode(m int) {
if LexerATNSimulatorDebug {
fmt.Println("pushMode " + strconv.Itoa(m))
}
b.modeStack.Push(b.mode)
b.mode = m
}
func (b *BaseLexer) PopMode() int {
if len(b.modeStack) == 0 {
panic("Empty Stack")
}
if LexerATNSimulatorDebug {
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
}
i, _ := b.modeStack.Pop()
b.mode = i
return b.mode
}
func (b *BaseLexer) inputStream() CharStream {
return b.input
}
// SetInputStream resets the lexer input stream and associated lexer state.
func (b *BaseLexer) SetInputStream(input CharStream) {
b.input = nil
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
b.reset()
b.input = input
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
}
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
return b.tokenFactorySourcePair
}
// By default does not support multiple emits per NextToken invocation
// for efficiency reasons. Subclass and override l method, NextToken,
// and GetToken (to push tokens into a list and pull from that list
// rather than a single variable as l implementation does).
// /
func (b *BaseLexer) EmitToken(token Token) {
b.token = token
}
// The standard method called to automatically emit a token at the
// outermost lexical rule. The token object should point into the
// char buffer start..stop. If there is a text override in 'text',
// use that to set the token's text. Override l method to emit
// custom Token objects or provide a Newfactory.
// /
func (b *BaseLexer) Emit() Token {
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
b.EmitToken(t)
return t
}
func (b *BaseLexer) EmitEOF() Token {
cpos := b.GetCharPositionInLine()
lpos := b.GetLine()
eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
b.EmitToken(eof)
return eof
}
func (b *BaseLexer) GetCharPositionInLine() int {
return b.Interpreter.GetCharPositionInLine()
}
func (b *BaseLexer) GetLine() int {
return b.Interpreter.GetLine()
}
func (b *BaseLexer) GetType() int {
return b.thetype
}
func (b *BaseLexer) SetType(t int) {
b.thetype = t
}
// What is the index of the current character of lookahead?///
func (b *BaseLexer) GetCharIndex() int {
return b.input.Index()
}
// Return the text Matched so far for the current token or any text override.
//Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
}
return b.Interpreter.GetText(b.input)
}
func (b *BaseLexer) SetText(text string) {
b.text = text
}
func (b *BaseLexer) GetATN() *ATN {
return b.Interpreter.ATN()
}
// Return a list of all Token objects in input char stream.
// Forces load of all tokens. Does not include EOF token.
// /
func (b *BaseLexer) GetAllTokens() []Token {
vl := b.Virt
tokens := make([]Token, 0)
t := vl.NextToken()
for t.GetTokenType() != TokenEOF {
tokens = append(tokens, t)
t = vl.NextToken()
}
return tokens
}
func (b *BaseLexer) notifyListeners(e RecognitionException) {
start := b.TokenStartCharIndex
stop := b.input.Index()
text := b.input.GetTextFromInterval(NewInterval(start, stop))
msg := "token recognition error at: '" + text + "'"
listener := b.GetErrorListenerDispatch()
listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
}
func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
if c == TokenEOF {
return "<EOF>"
} else if c == '\n' {
return "\\n"
} else if c == '\t' {
return "\\t"
} else if c == '\r' {
return "\\r"
} else {
return string(c)
}
}
func (b *BaseLexer) getCharErrorDisplay(c rune) string {
return "'" + b.getErrorDisplayForChar(c) + "'"
}
// Lexers can normally Match any char in it's vocabulary after Matching
// a token, so do the easy thing and just kill a character and hope
// it all works out. You can instead use the rule invocation stack
// to do sophisticated error recovery if you are in a fragment rule.
// /
func (b *BaseLexer) Recover(re RecognitionException) {
if b.input.LA(1) != TokenEOF {
if _, ok := re.(*LexerNoViableAltException); ok {
// Skip a char and try again
b.Interpreter.Consume(b.input)
} else {
// TODO: Do we lose character or line position information?
b.input.Consume()
}
}
}

View File

@ -0,0 +1,430 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import "strconv"
const (
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
)
type LexerAction interface {
getActionType() int
getIsPositionDependent() bool
execute(lexer Lexer)
hash() int
equals(other LexerAction) bool
}
type BaseLexerAction struct {
actionType int
isPositionDependent bool
}
func NewBaseLexerAction(action int) *BaseLexerAction {
la := new(BaseLexerAction)
la.actionType = action
la.isPositionDependent = false
return la
}
func (b *BaseLexerAction) execute(lexer Lexer) {
panic("Not implemented")
}
func (b *BaseLexerAction) getActionType() int {
return b.actionType
}
func (b *BaseLexerAction) getIsPositionDependent() bool {
return b.isPositionDependent
}
func (b *BaseLexerAction) hash() int {
return b.actionType
}
func (b *BaseLexerAction) equals(other LexerAction) bool {
return b == other
}
//
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
//
// <p>The {@code Skip} command does not have any parameters, so l action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerSkipAction struct {
*BaseLexerAction
}
func NewLexerSkipAction() *LexerSkipAction {
la := new(LexerSkipAction)
la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
return la
}
// Provides a singleton instance of l parameterless lexer action.
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (l *LexerSkipAction) execute(lexer Lexer) {
lexer.Skip()
}
func (l *LexerSkipAction) String() string {
return "skip"
}
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
// with the assigned type.
type LexerTypeAction struct {
*BaseLexerAction
thetype int
}
func NewLexerTypeAction(thetype int) *LexerTypeAction {
l := new(LexerTypeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
l.thetype = thetype
return l
}
func (l *LexerTypeAction) execute(lexer Lexer) {
lexer.SetType(l.thetype)
}
func (l *LexerTypeAction) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.thetype)
return murmurFinish(h, 2)
}
func (l *LexerTypeAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerTypeAction); !ok {
return false
} else {
return l.thetype == other.(*LexerTypeAction).thetype
}
}
func (l *LexerTypeAction) String() string {
return "actionType(" + strconv.Itoa(l.thetype) + ")"
}
// Implements the {@code pushMode} lexer action by calling
// {@link Lexer//pushMode} with the assigned mode.
type LexerPushModeAction struct {
*BaseLexerAction
mode int
}
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
l := new(LexerPushModeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
l.mode = mode
return l
}
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
// value provided by {@link //getMode}.</p>
func (l *LexerPushModeAction) execute(lexer Lexer) {
lexer.PushMode(l.mode)
}
func (l *LexerPushModeAction) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.mode)
return murmurFinish(h, 2)
}
func (l *LexerPushModeAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerPushModeAction); !ok {
return false
} else {
return l.mode == other.(*LexerPushModeAction).mode
}
}
func (l *LexerPushModeAction) String() string {
return "pushMode(" + strconv.Itoa(l.mode) + ")"
}
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
//
// <p>The {@code popMode} command does not have any parameters, so l action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerPopModeAction struct {
*BaseLexerAction
}
func NewLexerPopModeAction() *LexerPopModeAction {
l := new(LexerPopModeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
return l
}
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (l *LexerPopModeAction) execute(lexer Lexer) {
lexer.PopMode()
}
func (l *LexerPopModeAction) String() string {
return "popMode"
}
// Implements the {@code more} lexer action by calling {@link Lexer//more}.
//
// <p>The {@code more} command does not have any parameters, so l action is
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
type LexerMoreAction struct {
*BaseLexerAction
}
func NewLexerMoreAction() *LexerMoreAction {
l := new(LexerMoreAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
return l
}
var LexerMoreActionINSTANCE = NewLexerMoreAction()
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
func (l *LexerMoreAction) execute(lexer Lexer) {
lexer.More()
}
func (l *LexerMoreAction) String() string {
return "more"
}
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
// the assigned mode.
type LexerModeAction struct {
*BaseLexerAction
mode int
}
func NewLexerModeAction(mode int) *LexerModeAction {
l := new(LexerModeAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
l.mode = mode
return l
}
// <p>This action is implemented by calling {@link Lexer//mode} with the
// value provided by {@link //getMode}.</p>
func (l *LexerModeAction) execute(lexer Lexer) {
lexer.SetMode(l.mode)
}
func (l *LexerModeAction) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.mode)
return murmurFinish(h, 2)
}
func (l *LexerModeAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerModeAction); !ok {
return false
} else {
return l.mode == other.(*LexerModeAction).mode
}
}
func (l *LexerModeAction) String() string {
return "mode(" + strconv.Itoa(l.mode) + ")"
}
// Executes a custom lexer action by calling {@link Recognizer//action} with the
// rule and action indexes assigned to the custom action. The implementation of
// a custom action is added to the generated code for the lexer in an override
// of {@link Recognizer//action} when the grammar is compiled.
//
// <p>This class may represent embedded actions created with the <code>{...}</code>
// syntax in ANTLR 4, as well as actions created for lexer commands where the
// command argument could not be evaluated when the grammar was compiled.</p>
// Constructs a custom lexer action with the specified rule and action
// indexes.
//
// @param ruleIndex The rule index to use for calls to
// {@link Recognizer//action}.
// @param actionIndex The action index to use for calls to
// {@link Recognizer//action}.
type LexerCustomAction struct {
*BaseLexerAction
ruleIndex, actionIndex int
}
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
l := new(LexerCustomAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
l.ruleIndex = ruleIndex
l.actionIndex = actionIndex
l.isPositionDependent = true
return l
}
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
// appropriate rule and action indexes.</p>
func (l *LexerCustomAction) execute(lexer Lexer) {
lexer.Action(nil, l.ruleIndex, l.actionIndex)
}
func (l *LexerCustomAction) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.ruleIndex)
h = murmurUpdate(h, l.actionIndex)
return murmurFinish(h, 3)
}
func (l *LexerCustomAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerCustomAction); !ok {
return false
} else {
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
}
}
// Implements the {@code channel} lexer action by calling
// {@link Lexer//setChannel} with the assigned channel.
// Constructs a New{@code channel} action with the specified channel value.
// @param channel The channel value to pass to {@link Lexer//setChannel}.
type LexerChannelAction struct {
*BaseLexerAction
channel int
}
func NewLexerChannelAction(channel int) *LexerChannelAction {
l := new(LexerChannelAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
l.channel = channel
return l
}
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
// value provided by {@link //getChannel}.</p>
func (l *LexerChannelAction) execute(lexer Lexer) {
lexer.SetChannel(l.channel)
}
func (l *LexerChannelAction) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.channel)
return murmurFinish(h, 2)
}
func (l *LexerChannelAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerChannelAction); !ok {
return false
} else {
return l.channel == other.(*LexerChannelAction).channel
}
}
func (l *LexerChannelAction) String() string {
return "channel(" + strconv.Itoa(l.channel) + ")"
}
// This implementation of {@link LexerAction} is used for tracking input offsets
// for position-dependent actions within a {@link LexerActionExecutor}.
//
// <p>This action is not serialized as part of the ATN, and is only required for
// position-dependent lexer actions which appear at a location other than the
// end of a rule. For more information about DFA optimizations employed for
// lexer actions, see {@link LexerActionExecutor//append} and
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
// Constructs a Newindexed custom action by associating a character offset
// with a {@link LexerAction}.
//
// <p>Note: This class is only required for lexer actions for which
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
//
// @param offset The offset into the input {@link CharStream}, relative to
// the token start index, at which the specified lexer action should be
// executed.
// @param action The lexer action to execute at a particular offset in the
// input {@link CharStream}.
type LexerIndexedCustomAction struct {
*BaseLexerAction
offset int
lexerAction LexerAction
isPositionDependent bool
}
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
l := new(LexerIndexedCustomAction)
l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
l.offset = offset
l.lexerAction = lexerAction
l.isPositionDependent = true
return l
}
// <p>This method calls {@link //execute} on the result of {@link //getAction}
// using the provided {@code lexer}.</p>
func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
// assume the input stream position was properly set by the calling code
l.lexerAction.execute(lexer)
}
func (l *LexerIndexedCustomAction) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.offset)
h = murmurUpdate(h, l.lexerAction.hash())
return murmurFinish(h, 2)
}
func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
return false
} else {
return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
}
}

View File

@ -0,0 +1,173 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// Represents an executor for a sequence of lexer actions which traversed during
// the Matching operation of a lexer rule (token).
//
// <p>The executor tracks position information for position-dependent lexer actions
// efficiently, ensuring that actions appearing only at the end of the rule do
// not cause bloating of the {@link DFA} created for the lexer.</p>
type LexerActionExecutor struct {
lexerActions []LexerAction
cachedHash int
}
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
if lexerActions == nil {
lexerActions = make([]LexerAction, 0)
}
l := new(LexerActionExecutor)
l.lexerActions = lexerActions
// Caches the result of {@link //hashCode} since the hash code is an element
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
l.cachedHash = murmurInit(57)
for _, a := range lexerActions {
l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
}
return l
}
// Creates a {@link LexerActionExecutor} which executes the actions for
// the input {@code lexerActionExecutor} followed by a specified
// {@code lexerAction}.
//
// @param lexerActionExecutor The executor for actions already traversed by
// the lexer while Matching a token within a particular
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
// though it were an empty executor.
// @param lexerAction The lexer action to execute after the actions
// specified in {@code lexerActionExecutor}.
//
// @return A {@link LexerActionExecutor} for executing the combine actions
// of {@code lexerActionExecutor} and {@code lexerAction}.
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
if lexerActionExecutor == nil {
return NewLexerActionExecutor([]LexerAction{lexerAction})
}
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
}
// Creates a {@link LexerActionExecutor} which encodes the current offset
// for position-dependent lexer actions.
//
// <p>Normally, when the executor encounters lexer actions where
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
// {@link IntStream//seek} on the input {@link CharStream} to set the input
// position to the <em>end</em> of the current token. This behavior provides
// for efficient DFA representation of lexer actions which appear at the end
// of a lexer rule, even when the lexer rule Matches a variable number of
// characters.</p>
//
// <p>Prior to traversing a Match transition in the ATN, the current offset
// from the token start index is assigned to all position-dependent lexer
// actions which have not already been assigned a fixed offset. By storing
// the offsets relative to the token start index, the DFA representation of
// lexer actions which appear in the middle of tokens remains efficient due
// to sharing among tokens of the same length, regardless of their absolute
// position in the input stream.</p>
//
// <p>If the current executor already has offsets assigned to all
// position-dependent lexer actions, the method returns {@code this}.</p>
//
// @param offset The current offset to assign to all position-dependent
// lexer actions which do not already have offsets assigned.
//
// @return A {@link LexerActionExecutor} which stores input stream offsets
// for all position-dependent lexer actions.
// /
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
var updatedLexerActions []LexerAction
for i := 0; i < len(l.lexerActions); i++ {
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
if l.lexerActions[i].getIsPositionDependent() && !ok {
if updatedLexerActions == nil {
updatedLexerActions = make([]LexerAction, 0)
for _, a := range l.lexerActions {
updatedLexerActions = append(updatedLexerActions, a)
}
}
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
}
}
if updatedLexerActions == nil {
return l
}
return NewLexerActionExecutor(updatedLexerActions)
}
// Execute the actions encapsulated by l executor within the context of a
// particular {@link Lexer}.
//
// <p>This method calls {@link IntStream//seek} to set the position of the
// {@code input} {@link CharStream} prior to calling
// {@link LexerAction//execute} on a position-dependent action. Before the
// method returns, the input position will be restored to the same position
// it was in when the method was invoked.</p>
//
// @param lexer The lexer instance.
// @param input The input stream which is the source for the current token.
// When l method is called, the current {@link IntStream//index} for
// {@code input} should be the start of the following token, i.e. 1
// character past the end of the current token.
// @param startIndex The token start index. This value may be passed to
// {@link IntStream//seek} to set the {@code input} position to the beginning
// of the token.
// /
func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
requiresSeek := false
stopIndex := input.Index()
defer func() {
if requiresSeek {
input.Seek(stopIndex)
}
}()
for i := 0; i < len(l.lexerActions); i++ {
lexerAction := l.lexerActions[i]
if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
offset := la.offset
input.Seek(startIndex + offset)
lexerAction = la.lexerAction
requiresSeek = (startIndex + offset) != stopIndex
} else if lexerAction.getIsPositionDependent() {
input.Seek(stopIndex)
requiresSeek = false
}
lexerAction.execute(lexer)
}
}
func (l *LexerActionExecutor) hash() int {
if l == nil {
return 61
}
return l.cachedHash
}
func (l *LexerActionExecutor) equals(other interface{}) bool {
if l == other {
return true
}
othert, ok := other.(*LexerActionExecutor)
if !ok {
return false
}
if othert == nil {
return false
}
return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
}

View File

@ -0,0 +1,665 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strconv"
"strings"
)
var (
LexerATNSimulatorDebug = false
LexerATNSimulatorDFADebug = false
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
LexerATNSimulatorMatchCalls = 0
)
type ILexerATNSimulator interface {
IATNSimulator
reset()
Match(input CharStream, mode int) int
GetCharPositionInLine() int
GetLine() int
GetText(input CharStream) string
Consume(input CharStream)
}
type LexerATNSimulator struct {
*BaseATNSimulator
recog Lexer
predictionMode int
mergeCache DoubleDict
startIndex int
Line int
CharPositionInLine int
mode int
prevAccept *SimState
MatchCalls int
}
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
l := new(LexerATNSimulator)
l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
l.decisionToDFA = decisionToDFA
l.recog = recog
// The current token's starting index into the character stream.
// Shared across DFA to ATN simulation in case the ATN fails and the
// DFA did not have a previous accept state. In l case, we use the
// ATN-generated exception object.
l.startIndex = -1
// line number 1..n within the input///
l.Line = 1
// The index of the character relative to the beginning of the line
// 0..n-1///
l.CharPositionInLine = 0
l.mode = LexerDefaultMode
// Used during DFA/ATN exec to record the most recent accept configuration
// info
l.prevAccept = NewSimState()
// done
return l
}
func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
l.CharPositionInLine = simulator.CharPositionInLine
l.Line = simulator.Line
l.mode = simulator.mode
l.startIndex = simulator.startIndex
}
func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
l.MatchCalls++
l.mode = mode
mark := input.Mark()
defer func() {
input.Release(mark)
}()
l.startIndex = input.Index()
l.prevAccept.reset()
dfa := l.decisionToDFA[mode]
if dfa.getS0() == nil {
return l.MatchATN(input)
}
return l.execATN(input, dfa.getS0())
}
func (l *LexerATNSimulator) reset() {
l.prevAccept.reset()
l.startIndex = -1
l.Line = 1
l.CharPositionInLine = 0
l.mode = LexerDefaultMode
}
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
startState := l.atn.modeToStartState[l.mode]
if LexerATNSimulatorDebug {
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
}
oldMode := l.mode
s0Closure := l.computeStartState(input, startState)
suppressEdge := s0Closure.hasSemanticContext
s0Closure.hasSemanticContext = false
next := l.addDFAState(s0Closure)
if !suppressEdge {
l.decisionToDFA[l.mode].setS0(next)
}
predict := l.execATN(input, next)
if LexerATNSimulatorDebug {
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
}
return predict
}
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
if LexerATNSimulatorDebug {
fmt.Println("start state closure=" + ds0.configs.String())
}
if ds0.isAcceptState {
// allow zero-length tokens
l.captureSimState(l.prevAccept, input, ds0)
}
t := input.LA(1)
s := ds0 // s is current/from DFA state
for { // while more work
if LexerATNSimulatorDebug {
fmt.Println("execATN loop starting closure: " + s.configs.String())
}
// As we move src->trg, src->trg, we keep track of the previous trg to
// avoid looking up the DFA state again, which is expensive.
// If the previous target was already part of the DFA, we might
// be able to avoid doing a reach operation upon t. If s!=nil,
// it means that semantic predicates didn't prevent us from
// creating a DFA state. Once we know s!=nil, we check to see if
// the DFA state has an edge already for t. If so, we can just reuse
// it's configuration set there's no point in re-computing it.
// This is kind of like doing DFA simulation within the ATN
// simulation because DFA simulation is really just a way to avoid
// computing reach/closure sets. Technically, once we know that
// we have a previously added DFA state, we could jump over to
// the DFA simulator. But, that would mean popping back and forth
// a lot and making things more complicated algorithmically.
// This optimization makes a lot of sense for loops within DFA.
// A character will take us back to an existing DFA state
// that already has lots of edges out of it. e.g., .* in comments.
target := l.getExistingTargetState(s, t)
if target == nil {
target = l.computeTargetState(input, s, t)
// print("Computed:" + str(target))
}
if target == ATNSimulatorError {
break
}
// If l is a consumable input element, make sure to consume before
// capturing the accept state so the input index, line, and char
// position accurately reflect the state of the interpreter at the
// end of the token.
if t != TokenEOF {
l.Consume(input)
}
if target.isAcceptState {
l.captureSimState(l.prevAccept, input, target)
if t == TokenEOF {
break
}
}
t = input.LA(1)
s = target // flip current DFA target becomes Newsrc/from state
}
return l.failOrAccept(l.prevAccept, input, s.configs, t)
}
// Get an existing target state for an edge in the DFA. If the target state
// for the edge has not yet been computed or is otherwise not available,
// l method returns {@code nil}.
//
// @param s The current DFA state
// @param t The next input symbol
// @return The existing target DFA state for the given input symbol
// {@code t}, or {@code nil} if the target state for l edge is not
// already cached
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
if s.getEdges() == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
return nil
}
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
if LexerATNSimulatorDebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
}
// Compute a target state for an edge in the DFA, and attempt to add the
// computed state and corresponding edge to the DFA.
//
// @param input The input stream
// @param s The current DFA state
// @param t The next input symbol
//
// @return The computed target DFA state for the given input symbol
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
// returns {@link //ERROR}.
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
reach := NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out l knowledge it'd
// cause a failover from DFA later.
l.addDFAEdge(s, t, ATNSimulatorError, nil)
}
// stop when we can't Match any more char
return ATNSimulatorError
}
// Add an edge from s to target DFA found/created for reach
return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
}
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
if l.prevAccept.dfaState != nil {
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
return prevAccept.dfaState.prediction
}
// if no accept and EOF is first char, return EOF
if t == TokenEOF && input.Index() == l.startIndex {
return TokenEOF
}
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
}
// Given a starting configuration set, figure out all ATN configurations
// we can reach upon input {@code t}. Parameter {@code reach} is a return
// parameter.
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
// l is used to Skip processing for configs which have a lower priority
// than a config that already reached an accept state for the same rule
SkipAlt := ATNInvalidAltNumber
for _, cfg := range closure.GetItems() {
currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
continue
}
if LexerATNSimulatorDebug {
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
}
for _, trans := range cfg.GetState().GetTransitions() {
target := l.getReachableTarget(trans, t)
if target != nil {
lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
}
treatEOFAsEpsilon := (t == TokenEOF)
config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
if l.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
// any remaining configs for l alt have a lower priority
// than the one that just reached an accept state.
SkipAlt = cfg.GetAlt()
}
}
}
}
}
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
if LexerATNSimulatorDebug {
fmt.Printf("ACTION %v\n", lexerActionExecutor)
}
// seek to after last char in token
input.Seek(index)
l.Line = line
l.CharPositionInLine = charPos
if lexerActionExecutor != nil && l.recog != nil {
lexerActionExecutor.execute(l.recog, input, startIndex)
}
}
func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
if trans.Matches(t, 0, LexerMaxCharValue) {
return trans.getTarget()
}
return nil
}
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
configs := NewOrderedATNConfigSet()
for i := 0; i < len(p.GetTransitions()); i++ {
target := p.GetTransitions()[i].getTarget()
cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
l.closure(input, cfg, configs, false, false, false)
}
return configs
}
// Since the alternatives within any lexer decision are ordered by
// preference, l method stops pursuing the closure as soon as an accept
// state is reached. After the first accept state is reached by depth-first
// search from {@code config}, all other (potentially reachable) states for
// l rule would have a lower priority.
//
// @return {@code true} if an accept state is reached, otherwise
// {@code false}.
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
if LexerATNSimulatorDebug {
fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
}
_, ok := config.state.(*RuleStopState)
if ok {
if LexerATNSimulatorDebug {
if l.recog != nil {
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
} else {
fmt.Printf("closure at rule stop %s\n", config)
}
}
if config.context == nil || config.context.hasEmptyPath() {
if config.context == nil || config.context.isEmpty() {
configs.Add(config, nil)
return true
}
configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
currentAltReachedAcceptState = true
}
if config.context != nil && !config.context.isEmpty() {
for i := 0; i < config.context.length(); i++ {
if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
newContext := config.context.GetParent(i) // "pop" return state
returnState := l.atn.states[config.context.getReturnState(i)]
cfg := NewLexerATNConfig2(config, returnState, newContext)
currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
}
}
}
return currentAltReachedAcceptState
}
// optimization
if !config.state.GetEpsilonOnlyTransitions() {
if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
configs.Add(config, nil)
}
}
for j := 0; j < len(config.state.GetTransitions()); j++ {
trans := config.state.GetTransitions()[j]
cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
if cfg != nil {
currentAltReachedAcceptState = l.closure(input, cfg, configs,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
}
}
return currentAltReachedAcceptState
}
// side-effect: can alter configs.hasSemanticContext
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
var cfg *LexerATNConfig
if trans.getSerializationType() == TransitionRULE {
rt := trans.(*RuleTransition)
newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
} else if trans.getSerializationType() == TransitionPRECEDENCE {
panic("Precedence predicates are not supported in lexers.")
} else if trans.getSerializationType() == TransitionPREDICATE {
// Track traversing semantic predicates. If we traverse,
// we cannot add a DFA state for l "reach" computation
// because the DFA would not test the predicate again in the
// future. Rather than creating collections of semantic predicates
// like v3 and testing them on prediction, v4 will test them on the
// fly all the time using the ATN not the DFA. This is slower but
// semantically it's not used that often. One of the key elements to
// l predicate mechanism is not adding DFA states that see
// predicates immediately afterwards in the ATN. For example,
// a : ID {p1}? | ID {p2}?
// should create the start state for rule 'a' (to save start state
// competition), but should not create target of ID state. The
// collection of ATN states the following ID references includes
// states reached by traversing predicates. Since l is when we
// test them, we cannot cash the DFA state target of ID.
pt := trans.(*PredicateTransition)
if LexerATNSimulatorDebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
configs.SetHasSemanticContext(true)
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
} else if trans.getSerializationType() == TransitionACTION {
if config.context == nil || config.context.hasEmptyPath() {
// execute actions anywhere in the start rule for a token.
//
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
// isEmpty() is false. In l case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
// getEpsilonTarget to return two configurations, so
// additional modifications are needed before we can support
// the split operation.
lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
} else {
// ignore actions in referenced rules
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
} else if trans.getSerializationType() == TransitionEPSILON {
cfg = NewLexerATNConfig4(config, trans.getTarget())
} else if trans.getSerializationType() == TransitionATOM ||
trans.getSerializationType() == TransitionRANGE ||
trans.getSerializationType() == TransitionSET {
if treatEOFAsEpsilon {
if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
}
}
return cfg
}
// Evaluate a predicate specified in the lexer.
//
// <p>If {@code speculative} is {@code true}, l method was called before
// {@link //consume} for the Matched character. This method should call
// {@link //consume} before evaluating the predicate to ensure position
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
// and {@link Lexer//getcolumn}, properly reflect the current
// lexer state. This method should restore {@code input} and the simulator
// to the original state before returning (i.e. undo the actions made by the
// call to {@link //consume}.</p>
//
// @param input The input stream.
// @param ruleIndex The rule containing the predicate.
// @param predIndex The index of the predicate within the rule.
// @param speculative {@code true} if the current index in {@code input} is
// one character before the predicate's location.
//
// @return {@code true} if the specified predicate evaluates to
// {@code true}.
// /
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if l.recog == nil {
return true
}
if !speculative {
return l.recog.Sempred(nil, ruleIndex, predIndex)
}
savedcolumn := l.CharPositionInLine
savedLine := l.Line
index := input.Index()
marker := input.Mark()
defer func() {
l.CharPositionInLine = savedcolumn
l.Line = savedLine
input.Seek(index)
input.Release(marker)
}()
l.Consume(input)
return l.recog.Sempred(nil, ruleIndex, predIndex)
}
func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
settings.index = input.Index()
settings.line = l.Line
settings.column = l.CharPositionInLine
settings.dfaState = dfaState
}
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes l edge
// dependent on the specific input sequence, so the static edge in the
// DFA should be omitted. The target DFAState is still created since
// execATN has the ability to reSynchronize with the DFA state cache
// following the predicate evaluation step.
//
// TJP notes: next time through the DFA, we see a pred again and eval.
// If that gets us to a previously created (but dangling) DFA
// state, we can continue in pure DFA mode from there.
// /
suppressEdge := cfgs.HasSemanticContext()
cfgs.SetHasSemanticContext(false)
to = l.addDFAState(cfgs)
if suppressEdge {
return to
}
}
// add the edge
if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
// Only track edges within the DFA bounds
return to
}
if LexerATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
}
if from.getEdges() == nil {
// make room for tokens 1..n and -1 masquerading as index 0
from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
}
from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
return to
}
// Add a NewDFA state if there isn't one with l set of
// configurations already. This method also detects the first
// configuration containing an ATN rule stop state. Later, when
// traversing the DFA, we will know which rule to accept.
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
proposed := NewDFAState(-1, configs)
var firstConfigWithRuleStopState ATNConfig
for _, cfg := range configs.GetItems() {
_, ok := cfg.GetState().(*RuleStopState)
if ok {
firstConfigWithRuleStopState = cfg
break
}
}
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
hash := proposed.hash()
dfa := l.decisionToDFA[l.mode]
existing, ok := dfa.getState(hash)
if ok {
return existing
}
newState := proposed
newState.stateNumber = dfa.numStates()
configs.SetReadOnly(true)
newState.configs = configs
dfa.setState(hash, newState)
return newState
}
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
return l.decisionToDFA[mode]
}
// Get the text Matched so far for the current token.
func (l *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
}
func (l *LexerATNSimulator) Consume(input CharStream) {
curChar := input.LA(1)
if curChar == int('\n') {
l.Line++
l.CharPositionInLine = 0
} else {
l.CharPositionInLine++
}
input.Consume()
}
func (l *LexerATNSimulator) GetCharPositionInLine() int {
return l.CharPositionInLine
}
func (l *LexerATNSimulator) GetLine() int {
return l.Line
}
func (l *LexerATNSimulator) GetTokenName(tt int) string {
if tt == -1 {
return "EOF"
}
var sb strings.Builder
sb.Grow(6)
sb.WriteByte('\'')
sb.WriteRune(rune(tt))
sb.WriteByte('\'')
return sb.String()
}
func resetSimState(sim *SimState) {
sim.index = -1
sim.line = 0
sim.column = -1
sim.dfaState = nil
}
type SimState struct {
index int
line int
column int
dfaState *DFAState
}
func NewSimState() *SimState {
s := new(SimState)
resetSimState(s)
return s
}
func (s *SimState) reset() {
resetSimState(s)
}

View File

@ -0,0 +1,212 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type LL1Analyzer struct {
atn *ATN
}
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
la := new(LL1Analyzer)
la.atn = atn
return la
}
//* Special value added to the lookahead sets to indicate that we hit
// a predicate during analysis if {@code seeThruPreds==false}.
///
const (
LL1AnalyzerHitPred = TokenInvalidType
)
//*
// Calculates the SLL(1) expected lookahead set for each outgoing transition
// of an {@link ATNState}. The returned array has one element for each
// outgoing transition in {@code s}. If the closure from transition
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
// element at index <em>i</em> of the result will be {@code nil}.
//
// @param s the ATN state
// @return the expected symbols for each outgoing transition of {@code s}.
func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
if s == nil {
return nil
}
count := len(s.GetTransitions())
look := make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
lookBusy := NewArray2DHashSet(nil, nil)
seeThruPreds := false // fail to get lookahead upon pred
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing
// or we had a predicate when we !seeThruPreds
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
look[alt] = nil
}
}
return look
}
//*
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
//
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
// reached, {@link Token//EOF} is added to the result set.</p>
//
// @param s the ATN state
// @param stopState the ATN state to stop at. This can be a
// {@link BlockEndState} to detect epsilon paths through a closure.
// @param ctx the complete parser context, or {@code nil} if the context
// should be ignored
//
// @return The set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
///
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
r := NewIntervalSet()
seeThruPreds := true // ignore preds get all lookahead
var lookContext PredictionContext
if ctx != nil {
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
}
la.look1(s, stopState, lookContext, r, NewArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
return r
}
//*
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
//
// <p>If {@code ctx} is {@code nil} and {@code stopState} or the end of the
// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
// {@code true} and {@code stopState} or the end of the outermost rule is
// reached, {@link Token//EOF} is added to the result set.</p>
//
// @param s the ATN state.
// @param stopState the ATN state to stop at. This can be a
// {@link BlockEndState} to detect epsilon paths through a closure.
// @param ctx The outer context, or {@code nil} if the outer context should
// not be used.
// @param look The result lookahead set.
// @param lookBusy A set used for preventing epsilon closures in the ATN
// from causing a stack overflow. Outside code should pass
// {@code NewSet<ATNConfig>} for la argument.
// @param calledRuleStack A set used for preventing left recursion in the
// ATN from causing a stack overflow. Outside code should pass
// {@code NewBitSet()} for la argument.
// @param seeThruPreds {@code true} to true semantic predicates as
// implicitly {@code true} and "see through them", otherwise {@code false}
// to treat semantic predicates as opaque and add {@link //HitPred} to the
// result if one is encountered.
// @param addEOF Add {@link Token//EOF} to the result if the end of the
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
c := NewBaseATNConfig6(s, 0, ctx)
if lookBusy.Contains(c) {
return
}
lookBusy.Add(c)
if s == stopState {
if ctx == nil {
look.addOne(TokenEpsilon)
return
} else if ctx.isEmpty() && addEOF {
look.addOne(TokenEOF)
return
}
}
_, ok := s.(*RuleStopState)
if ok {
if ctx == nil {
look.addOne(TokenEpsilon)
return
} else if ctx.isEmpty() && addEOF {
look.addOne(TokenEOF)
return
}
if ctx != BasePredictionContextEMPTY {
removed := calledRuleStack.contains(s.GetRuleIndex())
defer func() {
if removed {
calledRuleStack.add(s.GetRuleIndex())
}
}()
calledRuleStack.remove(s.GetRuleIndex())
// run thru all possible stack tops in ctx
for i := 0; i < ctx.length(); i++ {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
}
return
}
}
n := len(s.GetTransitions())
for i := 0; i < n; i++ {
t := s.GetTransitions()[i]
if t1, ok := t.(*RuleTransition); ok {
if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
continue
}
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
} else if t2, ok := t.(AbstractPredicateTransition); ok {
if seeThruPreds {
la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else {
look.addOne(LL1AnalyzerHitPred)
}
} else if t.getIsEpsilon() {
la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
} else if _, ok := t.(*WildcardTransition); ok {
look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
} else {
set := t.getLabel()
if set != nil {
if _, ok := t.(*NotSetTransition); ok {
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
}
look.addSet(set)
}
}
}
}
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
defer func() {
calledRuleStack.remove(t1.getTarget().GetRuleIndex())
}()
calledRuleStack.add(t1.getTarget().GetRuleIndex())
la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}

View File

@ -0,0 +1,718 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strconv"
)
type Parser interface {
Recognizer
GetInterpreter() *ParserATNSimulator
GetTokenStream() TokenStream
GetTokenFactory() TokenFactory
GetParserRuleContext() ParserRuleContext
SetParserRuleContext(ParserRuleContext)
Consume() Token
GetParseListeners() []ParseTreeListener
GetErrorHandler() ErrorStrategy
SetErrorHandler(ErrorStrategy)
GetInputStream() IntStream
GetCurrentToken() Token
GetExpectedTokens() *IntervalSet
NotifyErrorListeners(string, Token, RecognitionException)
IsExpectedToken(int) bool
GetPrecedence() int
GetRuleInvocationStack(ParserRuleContext) []string
}
type BaseParser struct {
*BaseRecognizer
Interpreter *ParserATNSimulator
BuildParseTrees bool
input TokenStream
errHandler ErrorStrategy
precedenceStack IntStack
ctx ParserRuleContext
tracer *TraceListener
parseListeners []ParseTreeListener
_SyntaxErrors int
}
// p.is all the parsing support code essentially most of it is error
// recovery stuff.//
func NewBaseParser(input TokenStream) *BaseParser {
p := new(BaseParser)
p.BaseRecognizer = NewBaseRecognizer()
// The input stream.
p.input = nil
// The error handling strategy for the parser. The default value is a new
// instance of {@link DefaultErrorStrategy}.
p.errHandler = NewDefaultErrorStrategy()
p.precedenceStack = make([]int, 0)
p.precedenceStack.Push(0)
// The {@link ParserRuleContext} object for the currently executing rule.
// p.is always non-nil during the parsing process.
p.ctx = nil
// Specifies whether or not the parser should construct a parse tree during
// the parsing process. The default value is {@code true}.
p.BuildParseTrees = true
// When {@link //setTrace}{@code (true)} is called, a reference to the
// {@link TraceListener} is stored here so it can be easily removed in a
// later call to {@link //setTrace}{@code (false)}. The listener itself is
// implemented as a parser listener so p.field is not directly used by
// other parser methods.
p.tracer = nil
// The list of {@link ParseTreeListener} listeners registered to receive
// events during the parse.
p.parseListeners = nil
// The number of syntax errors Reported during parsing. p.value is
// incremented each time {@link //NotifyErrorListeners} is called.
p._SyntaxErrors = 0
p.SetInputStream(input)
return p
}
// p.field maps from the serialized ATN string to the deserialized {@link
// ATN} with
// bypass alternatives.
//
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
//
var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
func (p *BaseParser) reset() {
if p.input != nil {
p.input.Seek(0)
}
p.errHandler.reset(p)
p.ctx = nil
p._SyntaxErrors = 0
p.SetTrace(nil)
p.precedenceStack = make([]int, 0)
p.precedenceStack.Push(0)
if p.Interpreter != nil {
p.Interpreter.reset()
}
}
func (p *BaseParser) GetErrorHandler() ErrorStrategy {
return p.errHandler
}
func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
p.errHandler = e
}
// Match current input symbol against {@code ttype}. If the symbol type
// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
// called to complete the Match process.
//
// <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
//
// @param ttype the token type to Match
// @return the Matched symbol
// @panics RecognitionException if the current input symbol did not Match
// {@code ttype} and the error strategy could not recover from the
// mismatched symbol
func (p *BaseParser) Match(ttype int) Token {
t := p.GetCurrentToken()
if t.GetTokenType() == ttype {
p.errHandler.ReportMatch(p)
p.Consume()
} else {
t = p.errHandler.RecoverInline(p)
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
// we must have conjured up a Newtoken during single token
// insertion
// if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
return t
}
// Match current input symbol as a wildcard. If the symbol type Matches
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
// and {@link //consume} are called to complete the Match process.
//
// <p>If the symbol type does not Match,
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
// strategy to attempt recovery. If {@link //getBuildParseTree} is
// {@code true} and the token index of the symbol returned by
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
//
// @return the Matched symbol
// @panics RecognitionException if the current input symbol did not Match
// a wildcard and the error strategy could not recover from the mismatched
// symbol
func (p *BaseParser) MatchWildcard() Token {
t := p.GetCurrentToken()
if t.GetTokenType() > 0 {
p.errHandler.ReportMatch(p)
p.Consume()
} else {
t = p.errHandler.RecoverInline(p)
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
// we must have conjured up a Newtoken during single token
// insertion
// if it's not the current symbol
p.ctx.AddErrorNode(t)
}
}
return t
}
func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
return p.ctx
}
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
p.ctx = v
}
func (p *BaseParser) GetParseListeners() []ParseTreeListener {
if p.parseListeners == nil {
return make([]ParseTreeListener, 0)
}
return p.parseListeners
}
// Registers {@code listener} to receive events during the parsing process.
//
// <p>To support output-preserving grammar transformations (including but not
// limited to left-recursion removal, automated left-factoring, and
// optimized code generation), calls to listener methods during the parse
// may differ substantially from calls made by
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
// particular, rule entry and exit events may occur in a different order
// during the parse than after the parser. In addition, calls to certain
// rule entry methods may be omitted.</p>
//
// <p>With the following specific exceptions, calls to listener events are
// <em>deterministic</em>, i.e. for identical input the calls to listener
// methods will be the same.</p>
//
// <ul>
// <li>Alterations to the grammar used to generate code may change the
// behavior of the listener calls.</li>
// <li>Alterations to the command line options passed to ANTLR 4 when
// generating the parser may change the behavior of the listener calls.</li>
// <li>Changing the version of the ANTLR Tool used to generate the parser
// may change the behavior of the listener calls.</li>
// </ul>
//
// @param listener the listener to add
//
// @panics nilPointerException if {@code} listener is {@code nil}
//
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
if listener == nil {
panic("listener")
}
if p.parseListeners == nil {
p.parseListeners = make([]ParseTreeListener, 0)
}
p.parseListeners = append(p.parseListeners, listener)
}
//
// Remove {@code listener} from the list of parse listeners.
//
// <p>If {@code listener} is {@code nil} or has not been added as a parse
// listener, p.method does nothing.</p>
// @param listener the listener to remove
//
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
if p.parseListeners != nil {
idx := -1
for i, v := range p.parseListeners {
if v == listener {
idx = i
break
}
}
if idx == -1 {
return
}
// remove the listener from the slice
p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
if len(p.parseListeners) == 0 {
p.parseListeners = nil
}
}
}
// Remove all parse listeners.
func (p *BaseParser) removeParseListeners() {
p.parseListeners = nil
}
// Notify any parse listeners of an enter rule event.
func (p *BaseParser) TriggerEnterRuleEvent() {
if p.parseListeners != nil {
ctx := p.ctx
for _, listener := range p.parseListeners {
listener.EnterEveryRule(ctx)
ctx.EnterRule(listener)
}
}
}
//
// Notify any parse listeners of an exit rule event.
//
// @see //addParseListener
//
func (p *BaseParser) TriggerExitRuleEvent() {
if p.parseListeners != nil {
// reverse order walk of listeners
ctx := p.ctx
l := len(p.parseListeners) - 1
for i := range p.parseListeners {
listener := p.parseListeners[l-i]
ctx.ExitRule(listener)
listener.ExitEveryRule(ctx)
}
}
}
func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
return p.Interpreter
}
func (p *BaseParser) GetATN() *ATN {
return p.Interpreter.atn
}
func (p *BaseParser) GetTokenFactory() TokenFactory {
return p.input.GetTokenSource().GetTokenFactory()
}
// Tell our token source and error strategy about a Newway to create tokens.//
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
p.input.GetTokenSource().setTokenFactory(factory)
}
// The ATN with bypass alternatives is expensive to create so we create it
// lazily.
//
// @panics UnsupportedOperationException if the current parser does not
// implement the {@link //getSerializedATN()} method.
//
func (p *BaseParser) GetATNWithBypassAlts() {
// TODO
panic("Not implemented!")
// serializedAtn := p.getSerializedATN()
// if (serializedAtn == nil) {
// panic("The current parser does not support an ATN with bypass alternatives.")
// }
// result := p.bypassAltsAtnCache[serializedAtn]
// if (result == nil) {
// deserializationOptions := NewATNDeserializationOptions(nil)
// deserializationOptions.generateRuleBypassTransitions = true
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
// p.bypassAltsAtnCache[serializedAtn] = result
// }
// return result
}
// The preferred method of getting a tree pattern. For example, here's a
// sample use:
//
// <pre>
// ParseTree t = parser.expr()
// ParseTreePattern p = parser.compileParseTreePattern("&ltID&gt+0",
// MyParser.RULE_expr)
// ParseTreeMatch m = p.Match(t)
// String id = m.Get("ID")
// </pre>
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
panic("NewParseTreePatternMatcher not implemented!")
//
// if (lexer == nil) {
// if (p.GetTokenStream() != nil) {
// tokenSource := p.GetTokenStream().GetTokenSource()
// if _, ok := tokenSource.(ILexer); ok {
// lexer = tokenSource
// }
// }
// }
// if (lexer == nil) {
// panic("Parser can't discover a lexer to use")
// }
// m := NewParseTreePatternMatcher(lexer, p)
// return m.compile(pattern, patternRuleIndex)
}
func (p *BaseParser) GetInputStream() IntStream {
return p.GetTokenStream()
}
func (p *BaseParser) SetInputStream(input TokenStream) {
p.SetTokenStream(input)
}
func (p *BaseParser) GetTokenStream() TokenStream {
return p.input
}
// Set the token stream and reset the parser.//
func (p *BaseParser) SetTokenStream(input TokenStream) {
p.input = nil
p.reset()
p.input = input
}
// Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID.
//
func (p *BaseParser) GetCurrentToken() Token {
return p.input.LT(1)
}
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
if offendingToken == nil {
offendingToken = p.GetCurrentToken()
}
p._SyntaxErrors++
line := offendingToken.GetLine()
column := offendingToken.GetColumn()
listener := p.GetErrorListenerDispatch()
listener.SyntaxError(p, offendingToken, line, column, msg, err)
}
func (p *BaseParser) Consume() Token {
o := p.GetCurrentToken()
if o.GetTokenType() != TokenEOF {
p.GetInputStream().Consume()
}
hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
if p.BuildParseTrees || hasListener {
if p.errHandler.inErrorRecoveryMode(p) {
node := p.ctx.AddErrorNode(o)
if p.parseListeners != nil {
for _, l := range p.parseListeners {
l.VisitErrorNode(node)
}
}
} else {
node := p.ctx.AddTokenNode(o)
if p.parseListeners != nil {
for _, l := range p.parseListeners {
l.VisitTerminal(node)
}
}
}
// node.invokingState = p.state
}
return o
}
func (p *BaseParser) addContextToParseTree() {
// add current context to parent if we have a parent
if p.ctx.GetParent() != nil {
p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
}
}
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
p.SetState(state)
p.ctx = localctx
p.ctx.SetStart(p.input.LT(1))
if p.BuildParseTrees {
p.addContextToParseTree()
}
if p.parseListeners != nil {
p.TriggerEnterRuleEvent()
}
}
func (p *BaseParser) ExitRule() {
p.ctx.SetStop(p.input.LT(-1))
// trigger event on ctx, before it reverts to parent
if p.parseListeners != nil {
p.TriggerExitRuleEvent()
}
p.SetState(p.ctx.GetInvokingState())
if p.ctx.GetParent() != nil {
p.ctx = p.ctx.GetParent().(ParserRuleContext)
} else {
p.ctx = nil
}
}
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
localctx.SetAltNumber(altNum)
// if we have Newlocalctx, make sure we replace existing ctx
// that is previous child of parse tree
if p.BuildParseTrees && p.ctx != localctx {
if p.ctx.GetParent() != nil {
p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
}
}
p.ctx = localctx
}
// Get the precedence level for the top-most precedence rule.
//
// @return The precedence level for the top-most precedence rule, or -1 if
// the parser context is not nested within a precedence rule.
func (p *BaseParser) GetPrecedence() int {
if len(p.precedenceStack) == 0 {
return -1
}
return p.precedenceStack[len(p.precedenceStack)-1]
}
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
p.SetState(state)
p.precedenceStack.Push(precedence)
p.ctx = localctx
p.ctx.SetStart(p.input.LT(1))
if p.parseListeners != nil {
p.TriggerEnterRuleEvent() // simulates rule entry for
// left-recursive rules
}
}
//
// Like {@link //EnterRule} but for recursive rules.
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
previous := p.ctx
previous.SetParent(localctx)
previous.SetInvokingState(state)
previous.SetStop(p.input.LT(-1))
p.ctx = localctx
p.ctx.SetStart(previous.GetStart())
if p.BuildParseTrees {
p.ctx.AddChild(previous)
}
if p.parseListeners != nil {
p.TriggerEnterRuleEvent() // simulates rule entry for
// left-recursive rules
}
}
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
p.precedenceStack.Pop()
p.ctx.SetStop(p.input.LT(-1))
retCtx := p.ctx // save current ctx (return value)
// unroll so ctx is as it was before call to recursive method
if p.parseListeners != nil {
for p.ctx != parentCtx {
p.TriggerExitRuleEvent()
p.ctx = p.ctx.GetParent().(ParserRuleContext)
}
} else {
p.ctx = parentCtx
}
// hook into tree
retCtx.SetParent(parentCtx)
if p.BuildParseTrees && parentCtx != nil {
// add return ctx into invoking rule's tree
parentCtx.AddChild(retCtx)
}
}
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
ctx := p.ctx
for ctx != nil {
if ctx.GetRuleIndex() == ruleIndex {
return ctx
}
ctx = ctx.GetParent().(ParserRuleContext)
}
return nil
}
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
}
func (p *BaseParser) inContext(context ParserRuleContext) bool {
// TODO: useful in parser?
return false
}
//
// Checks whether or not {@code symbol} can follow the current state in the
// ATN. The behavior of p.method is equivalent to the following, but is
// implemented such that the complete context-sensitive follow set does not
// need to be explicitly constructed.
//
// <pre>
// return getExpectedTokens().contains(symbol)
// </pre>
//
// @param symbol the symbol type to check
// @return {@code true} if {@code symbol} can follow the current state in
// the ATN, otherwise {@code false}.
func (p *BaseParser) IsExpectedToken(symbol int) bool {
atn := p.Interpreter.atn
ctx := p.ctx
s := atn.states[p.state]
following := atn.NextTokens(s, nil)
if following.contains(symbol) {
return true
}
if !following.contains(TokenEpsilon) {
return false
}
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
invokingState := atn.states[ctx.GetInvokingState()]
rt := invokingState.GetTransitions()[0]
following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
if following.contains(symbol) {
return true
}
ctx = ctx.GetParent().(ParserRuleContext)
}
if following.contains(TokenEpsilon) && symbol == TokenEOF {
return true
}
return false
}
// Computes the set of input symbols which could follow the current parser
// state and context, as given by {@link //GetState} and {@link //GetContext},
// respectively.
//
// @see ATN//getExpectedTokens(int, RuleContext)
//
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
}
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
atn := p.Interpreter.atn
s := atn.states[p.state]
return atn.NextTokens(s, nil)
}
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
func (p *BaseParser) GetRuleIndex(ruleName string) int {
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
if ok {
return ruleIndex
}
return -1
}
// Return List&ltString&gt of the rule names in your parser instance
// leading up to a call to the current rule. You could override if
// you want more details such as the file/line info of where
// in the ATN a rule is invoked.
//
// this very useful for error messages.
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
if c == nil {
c = p.ctx
}
stack := make([]string, 0)
for c != nil {
// compute what follows who invoked us
ruleIndex := c.GetRuleIndex()
if ruleIndex < 0 {
stack = append(stack, "n/a")
} else {
stack = append(stack, p.GetRuleNames()[ruleIndex])
}
vp := c.GetParent()
if vp == nil {
break
}
c = vp.(ParserRuleContext)
}
return stack
}
// For debugging and other purposes.//
func (p *BaseParser) GetDFAStrings() string {
return fmt.Sprint(p.Interpreter.decisionToDFA)
}
// For debugging and other purposes.//
func (p *BaseParser) DumpDFA() {
seenOne := false
for _, dfa := range p.Interpreter.decisionToDFA {
if dfa.numStates() > 0 {
if seenOne {
fmt.Println()
}
fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
seenOne = true
}
}
}
func (p *BaseParser) GetSourceName() string {
return p.GrammarFileName
}
// During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token Matches. p.is for quick and dirty debugging.
//
func (p *BaseParser) SetTrace(trace *TraceListener) {
if trace == nil {
p.RemoveParseListener(p.tracer)
p.tracer = nil
} else {
if p.tracer != nil {
p.RemoveParseListener(p.tracer)
}
p.tracer = NewTraceListener(p)
p.AddParseListener(p.tracer)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,362 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"reflect"
"strconv"
)
type ParserRuleContext interface {
RuleContext
SetException(RecognitionException)
AddTokenNode(token Token) *TerminalNodeImpl
AddErrorNode(badToken Token) *ErrorNodeImpl
EnterRule(listener ParseTreeListener)
ExitRule(listener ParseTreeListener)
SetStart(Token)
GetStart() Token
SetStop(Token)
GetStop() Token
AddChild(child RuleContext) RuleContext
RemoveLastChild()
}
type BaseParserRuleContext struct {
*BaseRuleContext
start, stop Token
exception RecognitionException
children []Tree
}
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
prc := new(BaseParserRuleContext)
prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a Visitor,
// we need to track all of the tokens and rule invocations associated
// with prc rule's context. This is empty for parsing w/o tree constr.
// operation because we don't the need to track the details about
// how we parse prc rule.
// /
prc.children = nil
prc.start = nil
prc.stop = nil
// The exception that forced prc rule to return. If the rule successfully
// completed, prc is {@code nil}.
prc.exception = nil
return prc
}
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
prc.exception = e
}
func (prc *BaseParserRuleContext) GetChildren() []Tree {
return prc.children
}
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
// from RuleContext
prc.parentCtx = ctx.parentCtx
prc.invokingState = ctx.invokingState
prc.children = nil
prc.start = ctx.start
prc.stop = ctx.stop
}
func (prc *BaseParserRuleContext) GetText() string {
if prc.GetChildCount() == 0 {
return ""
}
var s string
for _, child := range prc.children {
s += child.(ParseTree).GetText()
}
return s
}
// Double dispatch methods for listeners
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
}
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
}
// * Does not set parent link other add methods do that///
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if prc.children == nil {
prc.children = make([]Tree, 0)
}
if child == nil {
panic("Child may not be null")
}
prc.children = append(prc.children, child)
return child
}
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
if prc.children == nil {
prc.children = make([]Tree, 0)
}
if child == nil {
panic("Child may not be null")
}
prc.children = append(prc.children, child)
return child
}
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
// we entered a rule. If we have // label, we will need to remove
// generic ruleContext object.
// /
func (prc *BaseParserRuleContext) RemoveLastChild() {
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
}
}
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
node := NewTerminalNodeImpl(token)
prc.addTerminalNodeChild(node)
node.parentCtx = prc
return node
}
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
node := NewErrorNodeImpl(badToken)
prc.addTerminalNodeChild(node)
node.parentCtx = prc
return node
}
func (prc *BaseParserRuleContext) GetChild(i int) Tree {
if prc.children != nil && len(prc.children) >= i {
return prc.children[i]
}
return nil
}
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
if childType == nil {
return prc.GetChild(i).(RuleContext)
}
for j := 0; j < len(prc.children); j++ {
child := prc.children[j]
if reflect.TypeOf(child) == childType {
if i == 0 {
return child.(RuleContext)
}
i--
}
}
return nil
}
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
return TreesStringTree(prc, ruleNames, recog)
}
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
return prc
}
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
return visitor.VisitChildren(prc)
}
func (prc *BaseParserRuleContext) SetStart(t Token) {
prc.start = t
}
func (prc *BaseParserRuleContext) GetStart() Token {
return prc.start
}
func (prc *BaseParserRuleContext) SetStop(t Token) {
prc.stop = t
}
func (prc *BaseParserRuleContext) GetStop() Token {
return prc.stop
}
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
for j := 0; j < len(prc.children); j++ {
child := prc.children[j]
if c2, ok := child.(TerminalNode); ok {
if c2.GetSymbol().GetTokenType() == ttype {
if i == 0 {
return c2
}
i--
}
}
}
return nil
}
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
if prc.children == nil {
return make([]TerminalNode, 0)
}
tokens := make([]TerminalNode, 0)
for j := 0; j < len(prc.children); j++ {
child := prc.children[j]
if tchild, ok := child.(TerminalNode); ok {
if tchild.GetSymbol().GetTokenType() == ttype {
tokens = append(tokens, tchild)
}
}
}
return tokens
}
func (prc *BaseParserRuleContext) GetPayload() interface{} {
return prc
}
func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
if prc.children == nil || i < 0 || i >= len(prc.children) {
return nil
}
j := -1 // what element have we found with ctxType?
for _, o := range prc.children {
childType := reflect.TypeOf(o)
if childType.Implements(ctxType) {
j++
if j == i {
return o.(RuleContext)
}
}
}
return nil
}
// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
// check for convertibility
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
return prc.getChild(ctxType, i)
}
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
if prc.children == nil {
return make([]RuleContext, 0)
}
contexts := make([]RuleContext, 0)
for _, child := range prc.children {
childType := reflect.TypeOf(child)
if childType.ConvertibleTo(ctxType) {
contexts = append(contexts, child.(RuleContext))
}
}
return contexts
}
func (prc *BaseParserRuleContext) GetChildCount() int {
if prc.children == nil {
return 0
}
return len(prc.children)
}
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
if prc.start == nil || prc.stop == nil {
return TreeInvalidInterval
}
return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
}
//need to manage circular dependencies, so export now
// Print out a whole tree, not just a node, in LISP format
// (root child1 .. childN). Print just a node if b is a leaf.
//
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
var p ParserRuleContext = prc
s := "["
for p != nil && p != stop {
if ruleNames == nil {
if !p.IsEmpty() {
s += strconv.Itoa(p.GetInvokingState())
}
} else {
ri := p.GetRuleIndex()
var ruleName string
if ri >= 0 && ri < len(ruleNames) {
ruleName = ruleNames[ri]
} else {
ruleName = strconv.Itoa(ri)
}
s += ruleName
}
if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
s += " "
}
pi := p.GetParent()
if pi != nil {
p = pi.(ParserRuleContext)
} else {
p = nil
}
}
s += "]"
return s
}
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
ParserRuleContext
}
type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
prc := new(BaseInterpreterRuleContext)
prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
prc.RuleIndex = ruleIndex
return prc
}

View File

@ -0,0 +1,751 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"strconv"
)
// Represents {@code $} in local context prediction, which means wildcard.
// {@code//+x =//}.
// /
const (
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
)
// Represents {@code $} in an array in full context mode, when {@code $}
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
// {@code $} = {@link //EmptyReturnState}.
// /
var (
BasePredictionContextglobalNodeCount = 1
BasePredictionContextid = BasePredictionContextglobalNodeCount
)
type PredictionContext interface {
hash() int
GetParent(int) PredictionContext
getReturnState(int) int
equals(PredictionContext) bool
length() int
isEmpty() bool
hasEmptyPath() bool
String() string
}
type BasePredictionContext struct {
cachedHash int
}
func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
pc := new(BasePredictionContext)
pc.cachedHash = cachedHash
return pc
}
func (b *BasePredictionContext) isEmpty() bool {
return false
}
func calculateHash(parent PredictionContext, returnState int) int {
h := murmurInit(1)
h = murmurUpdate(h, parent.hash())
h = murmurUpdate(h, returnState)
return murmurFinish(h, 2)
}
var _emptyPredictionContextHash int
func init() {
_emptyPredictionContextHash = murmurInit(1)
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
}
func calculateEmptyHash() int {
return _emptyPredictionContextHash
}
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
// context cash associated with contexts in DFA states. This cache
// can be used for both lexers and parsers.
type PredictionContextCache struct {
cache map[PredictionContext]PredictionContext
}
func NewPredictionContextCache() *PredictionContextCache {
t := new(PredictionContextCache)
t.cache = make(map[PredictionContext]PredictionContext)
return t
}
// Add a context to the cache and return it. If the context already exists,
// return that one instead and do not add a Newcontext to the cache.
// Protect shared cache from unsafe thread access.
//
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
if ctx == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY
}
existing := p.cache[ctx]
if existing != nil {
return existing
}
p.cache[ctx] = ctx
return ctx
}
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
return p.cache[ctx]
}
func (p *PredictionContextCache) length() int {
return len(p.cache)
}
type SingletonPredictionContext interface {
PredictionContext
}
type BaseSingletonPredictionContext struct {
*BasePredictionContext
parentCtx PredictionContext
returnState int
}
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
var cachedHash int
if parent != nil {
cachedHash = calculateHash(parent, returnState)
} else {
cachedHash = calculateEmptyHash()
}
s := new(BaseSingletonPredictionContext)
s.BasePredictionContext = NewBasePredictionContext(cachedHash)
s.parentCtx = parent
s.returnState = returnState
return s
}
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
// someone can pass in the bits of an array ctx that mean $
return BasePredictionContextEMPTY
}
return NewBaseSingletonPredictionContext(parent, returnState)
}
func (b *BaseSingletonPredictionContext) length() int {
return 1
}
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
return b.parentCtx
}
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
return b.returnState
}
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
return b.returnState == BasePredictionContextEmptyReturnState
}
func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
if b == other {
return true
} else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
return false
} else if b.hash() != other.hash() {
return false // can't be same if hash is different
}
otherP := other.(*BaseSingletonPredictionContext)
if b.returnState != other.getReturnState(0) {
return false
} else if b.parentCtx == nil {
return otherP.parentCtx == nil
}
return b.parentCtx.equals(otherP.parentCtx)
}
func (b *BaseSingletonPredictionContext) hash() int {
return b.cachedHash
}
func (b *BaseSingletonPredictionContext) String() string {
var up string
if b.parentCtx == nil {
up = ""
} else {
up = b.parentCtx.String()
}
if len(up) == 0 {
if b.returnState == BasePredictionContextEmptyReturnState {
return "$"
}
return strconv.Itoa(b.returnState)
}
return strconv.Itoa(b.returnState) + " " + up
}
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
type EmptyPredictionContext struct {
*BaseSingletonPredictionContext
}
func NewEmptyPredictionContext() *EmptyPredictionContext {
p := new(EmptyPredictionContext)
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
return p
}
func (e *EmptyPredictionContext) isEmpty() bool {
return true
}
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
return nil
}
func (e *EmptyPredictionContext) getReturnState(index int) int {
return e.returnState
}
func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
return e == other
}
func (e *EmptyPredictionContext) String() string {
return "$"
}
type ArrayPredictionContext struct {
*BasePredictionContext
parents []PredictionContext
returnStates []int
}
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
// Parent can be nil only if full ctx mode and we make an array
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
// nil parent and
// returnState == {@link //EmptyReturnState}.
hash := murmurInit(1)
for _, parent := range parents {
hash = murmurUpdate(hash, parent.hash())
}
for _, returnState := range returnStates {
hash = murmurUpdate(hash, returnState)
}
hash = murmurFinish(hash, len(parents)<<1)
c := new(ArrayPredictionContext)
c.BasePredictionContext = NewBasePredictionContext(hash)
c.parents = parents
c.returnStates = returnStates
return c
}
func (a *ArrayPredictionContext) GetReturnStates() []int {
return a.returnStates
}
func (a *ArrayPredictionContext) hasEmptyPath() bool {
return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
}
func (a *ArrayPredictionContext) isEmpty() bool {
// since EmptyReturnState can only appear in the last position, we
// don't need to verify that size==1
return a.returnStates[0] == BasePredictionContextEmptyReturnState
}
func (a *ArrayPredictionContext) length() int {
return len(a.returnStates)
}
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
return a.parents[index]
}
func (a *ArrayPredictionContext) getReturnState(index int) int {
return a.returnStates[index]
}
func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
if _, ok := other.(*ArrayPredictionContext); !ok {
return false
} else if a.cachedHash != other.hash() {
return false // can't be same if hash is different
} else {
otherP := other.(*ArrayPredictionContext)
return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
}
}
func (a *ArrayPredictionContext) hash() int {
return a.BasePredictionContext.cachedHash
}
func (a *ArrayPredictionContext) String() string {
if a.isEmpty() {
return "[]"
}
s := "["
for i := 0; i < len(a.returnStates); i++ {
if i > 0 {
s = s + ", "
}
if a.returnStates[i] == BasePredictionContextEmptyReturnState {
s = s + "$"
continue
}
s = s + strconv.Itoa(a.returnStates[i])
if a.parents[i] != nil {
s = s + " " + a.parents[i].String()
} else {
s = s + "nil"
}
}
return s + "]"
}
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
// /
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
if outerContext == nil {
outerContext = RuleContextEmpty
}
// if we are in RuleContext of start rule, s, then BasePredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty)
if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
return BasePredictionContextEMPTY
}
// If we have a parent, convert it to a BasePredictionContext graph
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
state := a.states[outerContext.GetInvokingState()]
transition := state.GetTransitions()[0]
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
}
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
// share same graph if both same
if a == b {
return a
}
ac, ok1 := a.(*BaseSingletonPredictionContext)
bc, ok2 := b.(*BaseSingletonPredictionContext)
if ok1 && ok2 {
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
}
// At least one of a or b is array
// If one is $ and rootIsWildcard, return $ as// wildcard
if rootIsWildcard {
if _, ok := a.(*EmptyPredictionContext); ok {
return a
}
if _, ok := b.(*EmptyPredictionContext); ok {
return b
}
}
// convert singleton so both are arrays to normalize
if _, ok := a.(*BaseSingletonPredictionContext); ok {
a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
}
if _, ok := b.(*BaseSingletonPredictionContext); ok {
b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
}
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
}
//
// Merge two {@link SingletonBasePredictionContext} instances.
//
// <p>Stack tops equal, parents merge is same return left graph.<br>
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
// type="image/svg+xml"/></p>
//
// <p>Same stack top, parents differ merge parents giving array node, then
// remainders of those graphs. A Newroot node is created to point to the
// merged parents.<br>
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
// type="image/svg+xml"/></p>
//
// <p>Different stack tops pointing to same parent. Make array node for the
// root where both element in the root point to the same (original)
// parent.<br>
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
// type="image/svg+xml"/></p>
//
// <p>Different stack tops pointing to different parents. Make array node for
// the root where each element points to the corresponding original
// parent.<br>
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
// type="image/svg+xml"/></p>
//
// @param a the first {@link SingletonBasePredictionContext}
// @param b the second {@link SingletonBasePredictionContext}
// @param rootIsWildcard {@code true} if this is a local-context merge,
// otherwise false to indicate a full-context merge
// @param mergeCache
// /
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
previous := mergeCache.Get(a.hash(), b.hash())
if previous != nil {
return previous.(PredictionContext)
}
previous = mergeCache.Get(b.hash(), a.hash())
if previous != nil {
return previous.(PredictionContext)
}
}
rootMerge := mergeRoot(a, b, rootIsWildcard)
if rootMerge != nil {
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), rootMerge)
}
return rootMerge
}
if a.returnState == b.returnState {
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
// if parent is same as existing a or b parent or reduced to a parent,
// return it
if parent == a.parentCtx {
return a // ax + bx = ax, if a=b
}
if parent == b.parentCtx {
return b // ax + bx = bx, if a=b
}
// else: ax + ay = a'[x,y]
// merge parents x and y, giving array node with x,y then remainders
// of those graphs. dup a, a' points at merged array
// Newjoined parent so create Newsingleton pointing to it, a'
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), spc)
}
return spc
}
// a != b payloads differ
// see if we can collapse parents due to $+x parents if local ctx
var singleParent PredictionContext
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
// bx =
// [a,b]x
singleParent = a.parentCtx
}
if singleParent != nil { // parents are same
// sort payloads and use same parent
payloads := []int{a.returnState, b.returnState}
if a.returnState > b.returnState {
payloads[0] = b.returnState
payloads[1] = a.returnState
}
parents := []PredictionContext{singleParent, singleParent}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), apc)
}
return apc
}
// parents differ and can't merge them. Just pack together
// into array can't merge.
// ax + by = [ax,by]
payloads := []int{a.returnState, b.returnState}
parents := []PredictionContext{a.parentCtx, b.parentCtx}
if a.returnState > b.returnState { // sort by payload
payloads[0] = b.returnState
payloads[1] = a.returnState
parents = []PredictionContext{b.parentCtx, a.parentCtx}
}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), apc)
}
return apc
}
//
// Handle case where at least one of {@code a} or {@code b} is
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
// to represent {@link //EMPTY}.
//
// <h2>Local-Context Merges</h2>
//
// <p>These local-context merge operations are used when {@code rootIsWildcard}
// is true.</p>
//
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
//
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
// {@code //EMPTY} return left graph.<br>
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
//
// <p>Special case of last merge if local context.<br>
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
//
// <h2>Full-Context Merges</h2>
//
// <p>These full-context merge operations are used when {@code rootIsWildcard}
// is false.</p>
//
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
//
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
// nil parent).<br>
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
//
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
//
// @param a the first {@link SingletonBasePredictionContext}
// @param b the second {@link SingletonBasePredictionContext}
// @param rootIsWildcard {@code true} if this is a local-context merge,
// otherwise false to indicate a full-context merge
// /
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
if rootIsWildcard {
if a == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY // // + b =//
}
if b == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY // a +// =//
}
} else {
if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY // $ + $ = $
} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
parents := []PredictionContext{b.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
parents := []PredictionContext{a.GetParent(-1), nil}
return NewArrayPredictionContext(parents, payloads)
}
}
return nil
}
//
// Merge two {@link ArrayBasePredictionContext} instances.
//
// <p>Different tops, different parents.<br>
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
//
// <p>Shared top, same parents.<br>
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
//
// <p>Shared top, different parents.<br>
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
//
// <p>Shared top, all shared parents.<br>
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
// type="image/svg+xml"/></p>
//
// <p>Equal tops, merge parents and reduce top to
// {@link SingletonBasePredictionContext}.<br>
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
// /
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
previous := mergeCache.Get(a.hash(), b.hash())
if previous != nil {
return previous.(PredictionContext)
}
previous = mergeCache.Get(b.hash(), a.hash())
if previous != nil {
return previous.(PredictionContext)
}
}
// merge sorted payloads a + b => M
i := 0 // walks a
j := 0 // walks b
k := 0 // walks target M array
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
// walk and merge to yield mergedParents, mergedReturnStates
for i < len(a.returnStates) && j < len(b.returnStates) {
aParent := a.parents[i]
bParent := b.parents[j]
if a.returnStates[i] == b.returnStates[j] {
// same payload (stack tops are equal), must yield merged singleton
payload := a.returnStates[i]
// $+$ = $
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
// ->
// ax
if bothDollars || axAX {
mergedParents[k] = aParent // choose left
mergedReturnStates[k] = payload
} else { // ax+ay -> a'[x,y]
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
mergedParents[k] = mergedParent
mergedReturnStates[k] = payload
}
i++ // hop over left one as usual
j++ // but also Skip one in right side since we merge
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
mergedParents[k] = aParent
mergedReturnStates[k] = a.returnStates[i]
i++
} else { // b > a, copy b[j] to M
mergedParents[k] = bParent
mergedReturnStates[k] = b.returnStates[j]
j++
}
k++
}
// copy over any payloads remaining in either array
if i < len(a.returnStates) {
for p := i; p < len(a.returnStates); p++ {
mergedParents[k] = a.parents[p]
mergedReturnStates[k] = a.returnStates[p]
k++
}
} else {
for p := j; p < len(b.returnStates); p++ {
mergedParents[k] = b.parents[p]
mergedReturnStates[k] = b.returnStates[p]
k++
}
}
// trim merged if we combined a few that had same stack tops
if k < len(mergedParents) { // write index < last position trim
if k == 1 { // for just one merged element, return singleton top
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), pc)
}
return pc
}
mergedParents = mergedParents[0:k]
mergedReturnStates = mergedReturnStates[0:k]
}
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
if M == a {
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), a)
}
return a
}
if M == b {
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), b)
}
return b
}
combineCommonParents(mergedParents)
if mergeCache != nil {
mergeCache.set(a.hash(), b.hash(), M)
}
return M
}
//
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
// ones.
// /
func combineCommonParents(parents []PredictionContext) {
uniqueParents := make(map[PredictionContext]PredictionContext)
for p := 0; p < len(parents); p++ {
parent := parents[p]
if uniqueParents[parent] == nil {
uniqueParents[parent] = parent
}
}
for q := 0; q < len(parents); q++ {
parents[q] = uniqueParents[parents[q]]
}
}
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
if context.isEmpty() {
return context
}
existing := visited[context]
if existing != nil {
return existing
}
existing = contextCache.Get(context)
if existing != nil {
visited[context] = existing
return existing
}
changed := false
parents := make([]PredictionContext, context.length())
for i := 0; i < len(parents); i++ {
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
if changed || parent != context.GetParent(i) {
if !changed {
parents = make([]PredictionContext, context.length())
for j := 0; j < context.length(); j++ {
parents[j] = context.GetParent(j)
}
changed = true
}
parents[i] = parent
}
}
if !changed {
contextCache.add(context)
visited[context] = context
return context
}
var updated PredictionContext
if len(parents) == 0 {
updated = BasePredictionContextEMPTY
} else if len(parents) == 1 {
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
} else {
updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
}
contextCache.add(updated)
visited[updated] = updated
visited[context] = updated
return updated
}

View File

@ -0,0 +1,553 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// This enumeration defines the prediction modes available in ANTLR 4 along with
// utility methods for analyzing configuration sets for conflicts and/or
// ambiguities.
const (
//
// The SLL(*) prediction mode. This prediction mode ignores the current
// parser context when making predictions. This is the fastest prediction
// mode, and provides correct results for many grammars. This prediction
// mode is more powerful than the prediction mode provided by ANTLR 3, but
// may result in syntax errors for grammar and input combinations which are
// not SLL.
//
// <p>
// When using this prediction mode, the parser will either return a correct
// parse tree (i.e. the same parse tree that would be returned with the
// {@link //LL} prediction mode), or it will Report a syntax error. If a
// syntax error is encountered when using the {@link //SLL} prediction mode,
// it may be due to either an actual syntax error in the input or indicate
// that the particular combination of grammar and input requires the more
// powerful {@link //LL} prediction abilities to complete successfully.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeSLL = 0
//
// The LL(*) prediction mode. This prediction mode allows the current parser
// context to be used for resolving SLL conflicts that occur during
// prediction. This is the fastest prediction mode that guarantees correct
// parse results for all combinations of grammars with syntactically correct
// inputs.
//
// <p>
// When using this prediction mode, the parser will make correct decisions
// for all syntactically-correct grammar and input combinations. However, in
// cases where the grammar is truly ambiguous this prediction mode might not
// Report a precise answer for <em>exactly which</em> alternatives are
// ambiguous.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLL = 1
//
// The LL(*) prediction mode with exact ambiguity detection. In addition to
// the correctness guarantees provided by the {@link //LL} prediction mode,
// this prediction mode instructs the prediction algorithm to determine the
// complete and exact set of ambiguous alternatives for every ambiguous
// decision encountered while parsing.
//
// <p>
// This prediction mode may be used for diagnosing ambiguities during
// grammar development. Due to the performance overhead of calculating sets
// of ambiguous alternatives, this prediction mode should be avoided when
// the exact results are not necessary.</p>
//
// <p>
// This prediction mode does not provide any guarantees for prediction
// behavior for syntactically-incorrect inputs.</p>
//
PredictionModeLLExactAmbigDetection = 2
)
//
// Computes the SLL prediction termination condition.
//
// <p>
// This method computes the SLL prediction termination condition for both of
// the following cases.</p>
//
// <ul>
// <li>The usual SLL+LL fallback upon SLL conflict</li>
// <li>Pure SLL without LL fallback</li>
// </ul>
//
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
//
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
// ensured regardless of how the termination condition is computed by this
// method. Due to the substantially higher cost of LL prediction, the
// prediction should only fall back to LL when the additional lookahead
// cannot lead to a unique SLL prediction.</p>
//
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
// conflicting subsets should fall back to full LL, even if the
// configuration sets don't resolve to the same alternative (e.g.
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
// configuration, SLL could continue with the hopes that more lookahead will
// resolve via one of those non-conflicting configurations.</p>
//
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
// stops when it sees only conflicting configuration subsets. In contrast,
// full LL keeps going when there is uncertainty.</p>
//
// <p><strong>HEURISTIC</strong></p>
//
// <p>As a heuristic, we stop prediction when we see any conflicting subset
// unless we see a state that only has one alternative associated with it.
// The single-alt-state thing lets prediction continue upon rules like
// (otherwise, it would admit defeat too soon):</p>
//
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }</p>
//
// <p>When the ATN simulation reaches the state before {@code ''}, it has a
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
// processing this node because alternative to has another way to continue,
// via {@code [6|2|[]]}.</p>
//
// <p>It also let's us continue for this rule:</p>
//
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
//
// <p>After Matching input A, we reach the stop state for rule A, state 1.
// State 8 is the state right before B. Clearly alternatives 1 and 2
// conflict and no amount of further lookahead will separate the two.
// However, alternative 3 will be able to continue and so we do not stop
// working on this state. In the previous example, we're concerned with
// states associated with the conflicting alternatives. Here alt 3 is not
// associated with the conflicting configs, but since we can continue
// looking for input reasonably, don't declare the state done.</p>
//
// <p><strong>PURE SLL PARSING</strong></p>
//
// <p>To handle pure SLL parsing, all we have to do is make sure that we
// combine stack contexts for configurations that differ only by semantic
// predicate. From there, we can do the usual SLL termination heuristic.</p>
//
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
//
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
// states because they need to create the DFA cache that works in all
// semantic situations. In contrast, full LL evaluates predicates collected
// during start state computation so it can ignore predicates thereafter.
// This means that SLL termination detection can totally ignore semantic
// predicates.</p>
//
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
// semantic predicate contexts so we might see two configurations like the
// following.</p>
//
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
//
// <p>Before testing these configurations against others, we have to merge
// {@code x} and {@code x'} (without modifying the existing configurations).
// For example, we test {@code (x+x')==x''} when looking for conflicts in
// the following configurations.</p>
//
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
//
// <p>If the configuration set has predicates (as indicated by
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
// the configurations to strip out all of the predicates so that a standard
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
//
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
// configs meet this condition, then none of the configurations is able
// to Match additional input so we terminate prediction.
//
if PredictionModeallConfigsInRuleStopStates(configs) {
return true
}
// pure SLL mode parsing
if mode == PredictionModeSLL {
// Don't bother with combining configs from different semantic
// contexts if we can fail over to full LL costs more time
// since we'll often fail over anyway.
if configs.HasSemanticContext() {
// dup configs, tossing out semantic predicates
dup := NewBaseATNConfigSet(false)
for _, c := range configs.GetItems() {
// NewBaseATNConfig({semanticContext:}, c)
c = NewBaseATNConfig2(c, SemanticContextNone)
dup.Add(c, nil)
}
configs = dup
}
// now we have combined contexts for configs with dissimilar preds
}
// pure SLL or combined SLL+LL mode parsing
altsets := PredictionModegetConflictingAltSubsets(configs)
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
}
// Checks if any configuration in {@code configs} is in a
// {@link RuleStopState}. Configurations meeting this condition have reached
// the end of the decision rule (local context) or end of start rule (full
// context).
//
// @param configs the configuration set to test
// @return {@code true} if any configuration in {@code configs} is in a
// {@link RuleStopState}, otherwise {@code false}
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
for _, c := range configs.GetItems() {
if _, ok := c.GetState().(*RuleStopState); ok {
return true
}
}
return false
}
// Checks if all configurations in {@code configs} are in a
// {@link RuleStopState}. Configurations meeting this condition have reached
// the end of the decision rule (local context) or end of start rule (full
// context).
//
// @param configs the configuration set to test
// @return {@code true} if all configurations in {@code configs} are in a
// {@link RuleStopState}, otherwise {@code false}
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
for _, c := range configs.GetItems() {
if _, ok := c.GetState().(*RuleStopState); !ok {
return false
}
}
return true
}
//
// Full LL prediction termination.
//
// <p>Can we stop looking ahead during ATN simulation or is there some
// uncertainty as to which alternative we will ultimately pick, after
// consuming more input? Even if there are partial conflicts, we might know
// that everything is going to resolve to the same minimum alternative. That
// means we can stop since no more lookahead will change that fact. On the
// other hand, there might be multiple conflicts that resolve to different
// minimums. That means we need more look ahead to decide which of those
// alternatives we should predict.</p>
//
// <p>The basic idea is to split the set of configurations {@code C}, into
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
// non-conflicting configurations. Two configurations conflict if they have
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
//
// <p>Reduce these configuration subsets to the set of possible alternatives.
// You can compute the alternative subsets in one pass as follows:</p>
//
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
//
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
//
// <pre>
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
// alt and not pred
// </pre>
//
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
//
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
// {@code s} and {@code ctx}.</p>
//
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
// the union of these alternative subsets is a singleton, then no amount of
// more lookahead will help us. We will always pick that alternative. If,
// however, there is more than one alternative, then we are uncertain which
// alternative to predict and must continue looking for resolution. We may
// or may not discover an ambiguity in the future, even if there are no
// conflicting subsets this round.</p>
//
// <p>The biggest sin is to terminate early because it means we've made a
// decision but were uncertain as to the eventual outcome. We haven't used
// enough lookahead. On the other hand, announcing a conflict too late is no
// big deal you will still have the conflict. It's just inefficient. It
// might even look until the end of file.</p>
//
// <p>No special consideration for semantic predicates is required because
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
// no configuration contains a semantic context during the termination
// check.</p>
//
// <p><strong>CONFLICTING CONFIGS</strong></p>
//
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
// when {@code i!=j} but {@code x=x'}. Because we merge all
// {@code (s, i, _)} configurations together, that means that there are at
// most {@code n} configurations associated with state {@code s} for
// {@code n} possible alternatives in the decision. The merged stacks
// complicate the comparison of configuration contexts {@code x} and
// {@code x'}. Sam checks to see if one is a subset of the other by calling
// merge and checking to see if the merged result is either {@code x} or
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
// is the superset, then {@code i} is the only possible prediction since the
// others resolve to {@code min(i)} as well. However, if {@code x} is
// associated with {@code j>i} then at least one stack configuration for
// {@code j} is not in conflict with alternative {@code i}. The algorithm
// should keep going, looking for more lookahead due to the uncertainty.</p>
//
// <p>For simplicity, I'm doing a equality check between {@code x} and
// {@code x'} that lets the algorithm continue to consume lookahead longer
// than necessary. The reason I like the equality is of course the
// simplicity but also because that is the test you need to detect the
// alternatives that are actually in conflict.</p>
//
// <p><strong>CONTINUE/STOP RULE</strong></p>
//
// <p>Continue if union of resolved alternative sets from non-conflicting and
// conflicting alternative subsets has more than one alternative. We are
// uncertain about which alternative to predict.</p>
//
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
// alternatives are still in the running for the amount of input we've
// consumed at this point. The conflicting sets let us to strip away
// configurations that won't lead to more states because we resolve
// conflicts to the configuration with a minimum alternate for the
// conflicting set.</p>
//
// <p><strong>CASES</strong></p>
//
// <ul>
//
// <li>no conflicts and more than 1 alternative in set =&gt continue</li>
//
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
// {@code {1,3}} =&gt continue
// </li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
// {@code {1}} =&gt stop and predict 1</li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
// {@code {1}} = {@code {1}} =&gt stop and predict 1, can announce
// ambiguity {@code {1,2}}</li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
// {@code {2}} = {@code {1,2}} =&gt continue</li>
//
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
// {@code {3}} = {@code {1,3}} =&gt continue</li>
//
// </ul>
//
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
//
// <p>If all states Report the same conflicting set of alternatives, then we
// know we have the exact ambiguity set.</p>
//
// <p><code>|A_<em>i</em>|&gt1</code> and
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
//
// <p>In other words, we continue examining lookahead until all {@code A_i}
// have more than one alternative and all {@code A_i} are the same. If
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
// because the resolved set is {@code {1}}. To determine what the real
// ambiguity is, we have to know whether the ambiguity is between one and
// two or one and three so we keep going. We can only stop prediction when
// we need exact ambiguity detection when the sets look like
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
//
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
return PredictionModegetSingleViableAlt(altsets)
}
//
// Determines if every alternative subset in {@code altsets} contains more
// than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every {@link BitSet} in {@code altsets} has
// {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}
//
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
return !PredictionModehasNonConflictingAltSet(altsets)
}
//
// Determines if any single alternative subset in {@code altsets} contains
// exactly one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
//
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
if alts.length() == 1 {
return true
}
}
return false
}
//
// Determines if any single alternative subset in {@code altsets} contains
// more than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} &gt 1, otherwise {@code false}
//
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
if alts.length() > 1 {
return true
}
}
return false
}
//
// Determines if every alternative subset in {@code altsets} is equivalent.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every member of {@code altsets} is equal to the
// others, otherwise {@code false}
//
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
var first *BitSet
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
if first == nil {
first = alts
} else if alts != first {
return false
}
}
return true
}
//
// Returns the unique alternative predicted by all alternative subsets in
// {@code altsets}. If no such alternative exists, this method returns
// {@link ATN//INVALID_ALT_NUMBER}.
//
// @param altsets a collection of alternative subsets
//
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
all := PredictionModeGetAlts(altsets)
if all.length() == 1 {
return all.minValue()
}
return ATNInvalidAltNumber
}
// Gets the complete set of represented alternatives for a collection of
// alternative subsets. This method returns the union of each {@link BitSet}
// in {@code altsets}.
//
// @param altsets a collection of alternative subsets
// @return the set of represented alternatives in {@code altsets}
//
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
all := NewBitSet()
for _, alts := range altsets {
all.or(alts)
}
return all
}
//
// This func gets the conflicting alt subsets from a configuration set.
// For each configuration {@code c} in {@code configs}:
//
// <pre>
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
// alt and not pred
// </pre>
//
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
configToAlts := make(map[int]*BitSet)
for _, c := range configs.GetItems() {
key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
alts, ok := configToAlts[key]
if !ok {
alts = NewBitSet()
configToAlts[key] = alts
}
alts.add(c.GetAlt())
}
values := make([]*BitSet, 0, 10)
for _, v := range configToAlts {
values = append(values, v)
}
return values
}
//
// Get a map from state to alt subset from a configuration set. For each
// configuration {@code c} in {@code configs}:
//
// <pre>
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
// </pre>
//
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
m := NewAltDict()
for _, c := range configs.GetItems() {
alts := m.Get(c.GetState().String())
if alts == nil {
alts = NewBitSet()
m.put(c.GetState().String(), alts)
}
alts.(*BitSet).add(c.GetAlt())
}
return m
}
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
values := PredictionModeGetStateToAltMap(configs).values()
for i := 0; i < len(values); i++ {
if values[i].(*BitSet).length() == 1 {
return true
}
}
return false
}
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
result := ATNInvalidAltNumber
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
minAlt := alts.minValue()
if result == ATNInvalidAltNumber {
result = minAlt
} else if result != minAlt { // more than 1 viable alt
return ATNInvalidAltNumber
}
}
return result
}

View File

@ -0,0 +1,217 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strings"
"strconv"
)
type Recognizer interface {
GetLiteralNames() []string
GetSymbolicNames() []string
GetRuleNames() []string
Sempred(RuleContext, int, int) bool
Precpred(RuleContext, int) bool
GetState() int
SetState(int)
Action(RuleContext, int, int)
AddErrorListener(ErrorListener)
RemoveErrorListeners()
GetATN() *ATN
GetErrorListenerDispatch() ErrorListener
}
type BaseRecognizer struct {
listeners []ErrorListener
state int
RuleNames []string
LiteralNames []string
SymbolicNames []string
GrammarFileName string
}
func NewBaseRecognizer() *BaseRecognizer {
rec := new(BaseRecognizer)
rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
rec.state = -1
return rec
}
var tokenTypeMapCache = make(map[string]int)
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
runtimeVersion := "4.9.3"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}
}
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
panic("action not implemented on Recognizer!")
}
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
b.listeners = append(b.listeners, listener)
}
func (b *BaseRecognizer) RemoveErrorListeners() {
b.listeners = make([]ErrorListener, 0)
}
func (b *BaseRecognizer) GetRuleNames() []string {
return b.RuleNames
}
func (b *BaseRecognizer) GetTokenNames() []string {
return b.LiteralNames
}
func (b *BaseRecognizer) GetSymbolicNames() []string {
return b.SymbolicNames
}
func (b *BaseRecognizer) GetLiteralNames() []string {
return b.LiteralNames
}
func (b *BaseRecognizer) GetState() int {
return b.state
}
func (b *BaseRecognizer) SetState(v int) {
b.state = v
}
//func (b *Recognizer) GetTokenTypeMap() {
// var tokenNames = b.GetTokenNames()
// if (tokenNames==nil) {
// panic("The current recognizer does not provide a list of token names.")
// }
// var result = tokenTypeMapCache[tokenNames]
// if(result==nil) {
// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
// result.EOF = TokenEOF
// tokenTypeMapCache[tokenNames] = result
// }
// return result
//}
// Get a map from rule names to rule indexes.
//
// <p>Used for XPath and tree pattern compilation.</p>
//
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
panic("Method not defined!")
// var ruleNames = b.GetRuleNames()
// if (ruleNames==nil) {
// panic("The current recognizer does not provide a list of rule names.")
// }
//
// var result = ruleIndexMapCache[ruleNames]
// if(result==nil) {
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
// ruleIndexMapCache[ruleNames] = result
// }
// return result
}
func (b *BaseRecognizer) GetTokenType(tokenName string) int {
panic("Method not defined!")
// var ttype = b.GetTokenTypeMap()[tokenName]
// if (ttype !=nil) {
// return ttype
// } else {
// return TokenInvalidType
// }
}
//func (b *Recognizer) GetTokenTypeMap() map[string]int {
// Vocabulary vocabulary = getVocabulary()
//
// Synchronized (tokenTypeMapCache) {
// Map<String, Integer> result = tokenTypeMapCache.Get(vocabulary)
// if (result == null) {
// result = new HashMap<String, Integer>()
// for (int i = 0; i < GetATN().maxTokenType; i++) {
// String literalName = vocabulary.getLiteralName(i)
// if (literalName != null) {
// result.put(literalName, i)
// }
//
// String symbolicName = vocabulary.GetSymbolicName(i)
// if (symbolicName != null) {
// result.put(symbolicName, i)
// }
// }
//
// result.put("EOF", Token.EOF)
// result = Collections.unmodifiableMap(result)
// tokenTypeMapCache.put(vocabulary, result)
// }
//
// return result
// }
//}
// What is the error header, normally line/character position information?//
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
line := e.GetOffendingToken().GetLine()
column := e.GetOffendingToken().GetColumn()
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
}
// How should a token be displayed in an error message? The default
// is to display just the text, but during development you might
// want to have a lot of information spit out. Override in that case
// to use t.String() (which, for CommonToken, dumps everything about
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
//
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
// implementations of {@link ANTLRErrorStrategy} may provide a similar
// feature when necessary. For example, see
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
//
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
if t == nil {
return "<no token>"
}
s := t.GetText()
if s == "" {
if t.GetTokenType() == TokenEOF {
s = "<EOF>"
} else {
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
}
}
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
return "'" + s + "'"
}
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
return NewProxyErrorListener(b.listeners)
}
// subclass needs to override these if there are sempreds or actions
// that the ATN interp needs to execute
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
return true
}
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
return true
}

View File

@ -0,0 +1,114 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// A rule context is a record of a single rule invocation. It knows
// which context invoked it, if any. If there is no parent context, then
// naturally the invoking state is not valid. The parent link
// provides a chain upwards from the current rule invocation to the root
// of the invocation tree, forming a stack. We actually carry no
// information about the rule associated with b context (except
// when parsing). We keep only the state number of the invoking state from
// the ATN submachine that invoked b. Contrast b with the s
// pointer inside ParserRuleContext that tracks the current state
// being "executed" for the current rule.
//
// The parent contexts are useful for computing lookahead sets and
// getting error information.
//
// These objects are used during parsing and prediction.
// For the special case of parsers, we use the subclass
// ParserRuleContext.
//
// @see ParserRuleContext
//
type RuleContext interface {
RuleNode
GetInvokingState() int
SetInvokingState(int)
GetRuleIndex() int
IsEmpty() bool
GetAltNumber() int
SetAltNumber(altNumber int)
String([]string, RuleContext) string
}
type BaseRuleContext struct {
parentCtx RuleContext
invokingState int
RuleIndex int
}
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
rn := new(BaseRuleContext)
// What context invoked b rule?
rn.parentCtx = parent
// What state invoked the rule associated with b context?
// The "return address" is the followState of invokingState
// If parent is nil, b should be -1.
if parent == nil {
rn.invokingState = -1
} else {
rn.invokingState = invokingState
}
return rn
}
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
return b
}
func (b *BaseRuleContext) SetParent(v Tree) {
if v == nil {
b.parentCtx = nil
} else {
b.parentCtx = v.(RuleContext)
}
}
func (b *BaseRuleContext) GetInvokingState() int {
return b.invokingState
}
func (b *BaseRuleContext) SetInvokingState(t int) {
b.invokingState = t
}
func (b *BaseRuleContext) GetRuleIndex() int {
return b.RuleIndex
}
func (b *BaseRuleContext) GetAltNumber() int {
return ATNInvalidAltNumber
}
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
// A context is empty if there is no invoking state meaning nobody call
// current context.
func (b *BaseRuleContext) IsEmpty() bool {
return b.invokingState == -1
}
// Return the combined text of all child nodes. This method only considers
// tokens which have been added to the parse tree.
// <p>
// Since tokens on hidden channels (e.g. whitespace or comments) are not
// added to the parse trees, they will not appear in the output of b
// method.
//
func (b *BaseRuleContext) GetParent() Tree {
return b.parentCtx
}

View File

@ -0,0 +1,466 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strconv"
)
// A tree structure used to record the semantic context in which
// an ATN configuration is valid. It's either a single predicate,
// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
//
// <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
// {@link SemanticContext} within the scope of this outer class.</p>
//
type SemanticContext interface {
comparable
evaluate(parser Recognizer, outerContext RuleContext) bool
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
hash() int
String() string
}
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
if a == nil || a == SemanticContextNone {
return b
}
if b == nil || b == SemanticContextNone {
return a
}
result := NewAND(a, b)
if len(result.opnds) == 1 {
return result.opnds[0]
}
return result
}
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
if a == nil {
return b
}
if b == nil {
return a
}
if a == SemanticContextNone || b == SemanticContextNone {
return SemanticContextNone
}
result := NewOR(a, b)
if len(result.opnds) == 1 {
return result.opnds[0]
}
return result
}
type Predicate struct {
ruleIndex int
predIndex int
isCtxDependent bool
}
func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
p := new(Predicate)
p.ruleIndex = ruleIndex
p.predIndex = predIndex
p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
return p
}
//The default {@link SemanticContext}, which is semantically equivalent to
//a predicate of the form {@code {true}?}.
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
return p
}
func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
var localctx RuleContext
if p.isCtxDependent {
localctx = outerContext
}
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
}
func (p *Predicate) equals(other interface{}) bool {
if p == other {
return true
} else if _, ok := other.(*Predicate); !ok {
return false
} else {
return p.ruleIndex == other.(*Predicate).ruleIndex &&
p.predIndex == other.(*Predicate).predIndex &&
p.isCtxDependent == other.(*Predicate).isCtxDependent
}
}
func (p *Predicate) hash() int {
h := murmurInit(0)
h = murmurUpdate(h, p.ruleIndex)
h = murmurUpdate(h, p.predIndex)
if p.isCtxDependent {
h = murmurUpdate(h, 1)
} else {
h = murmurUpdate(h, 0)
}
return murmurFinish(h, 3)
}
func (p *Predicate) String() string {
return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
}
type PrecedencePredicate struct {
precedence int
}
func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
p := new(PrecedencePredicate)
p.precedence = precedence
return p
}
func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
return parser.Precpred(outerContext, p.precedence)
}
func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
if parser.Precpred(outerContext, p.precedence) {
return SemanticContextNone
}
return nil
}
func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
return p.precedence - other.precedence
}
func (p *PrecedencePredicate) equals(other interface{}) bool {
if p == other {
return true
} else if _, ok := other.(*PrecedencePredicate); !ok {
return false
} else {
return p.precedence == other.(*PrecedencePredicate).precedence
}
}
func (p *PrecedencePredicate) hash() int {
h := uint32(1)
h = 31*h + uint32(p.precedence)
return int(h)
}
func (p *PrecedencePredicate) String() string {
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
}
func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
result := make([]*PrecedencePredicate, 0)
set.Each(func(v interface{}) bool {
if c2, ok := v.(*PrecedencePredicate); ok {
result = append(result, c2)
}
return true
})
return result
}
// A semantic context which is true whenever none of the contained contexts
// is false.`
type AND struct {
opnds []SemanticContext
}
func NewAND(a, b SemanticContext) *AND {
operands := NewArray2DHashSet(nil, nil)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.Add(o)
}
} else {
operands.Add(a)
}
if ba, ok := b.(*AND); ok {
for _, o := range ba.opnds {
operands.Add(o)
}
} else {
operands.Add(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
// interested in the transition with the lowest precedence
var reduced *PrecedencePredicate
for _, p := range precedencePredicates {
if reduced == nil || p.precedence < reduced.precedence {
reduced = p
}
}
operands.Add(reduced)
}
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
for i, v := range vs {
opnds[i] = v.(SemanticContext)
}
and := new(AND)
and.opnds = opnds
return and
}
func (a *AND) equals(other interface{}) bool {
if a == other {
return true
} else if _, ok := other.(*AND); !ok {
return false
} else {
for i, v := range other.(*AND).opnds {
if !a.opnds[i].equals(v) {
return false
}
}
return true
}
}
//
// {@inheritDoc}
//
// <p>
// The evaluation of predicates by a context is short-circuiting, but
// unordered.</p>
//
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(a.opnds); i++ {
if !a.opnds[i].evaluate(parser, outerContext) {
return false
}
}
return true
}
func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
differs := false
operands := make([]SemanticContext, 0)
for i := 0; i < len(a.opnds); i++ {
context := a.opnds[i]
evaluated := context.evalPrecedence(parser, outerContext)
differs = differs || (evaluated != context)
if evaluated == nil {
// The AND context is false if any element is false
return nil
} else if evaluated != SemanticContextNone {
// Reduce the result by Skipping true elements
operands = append(operands, evaluated)
}
}
if !differs {
return a
}
if len(operands) == 0 {
// all elements were true, so the AND context is true
return SemanticContextNone
}
var result SemanticContext
for _, o := range operands {
if result == nil {
result = o
} else {
result = SemanticContextandContext(result, o)
}
}
return result
}
func (a *AND) hash() int {
h := murmurInit(37) // Init with a value different from OR
for _, op := range a.opnds {
h = murmurUpdate(h, op.hash())
}
return murmurFinish(h, len(a.opnds))
}
func (a *OR) hash() int {
h := murmurInit(41) // Init with a value different from AND
for _, op := range a.opnds {
h = murmurUpdate(h, op.hash())
}
return murmurFinish(h, len(a.opnds))
}
func (a *AND) String() string {
s := ""
for _, o := range a.opnds {
s += "&& " + fmt.Sprint(o)
}
if len(s) > 3 {
return s[0:3]
}
return s
}
//
// A semantic context which is true whenever at least one of the contained
// contexts is true.
//
type OR struct {
opnds []SemanticContext
}
func NewOR(a, b SemanticContext) *OR {
operands := NewArray2DHashSet(nil, nil)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.Add(o)
}
} else {
operands.Add(a)
}
if ba, ok := b.(*OR); ok {
for _, o := range ba.opnds {
operands.Add(o)
}
} else {
operands.Add(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
// interested in the transition with the lowest precedence
var reduced *PrecedencePredicate
for _, p := range precedencePredicates {
if reduced == nil || p.precedence > reduced.precedence {
reduced = p
}
}
operands.Add(reduced)
}
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
for i, v := range vs {
opnds[i] = v.(SemanticContext)
}
o := new(OR)
o.opnds = opnds
return o
}
func (o *OR) equals(other interface{}) bool {
if o == other {
return true
} else if _, ok := other.(*OR); !ok {
return false
} else {
for i, v := range other.(*OR).opnds {
if !o.opnds[i].equals(v) {
return false
}
}
return true
}
}
// <p>
// The evaluation of predicates by o context is short-circuiting, but
// unordered.</p>
//
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(o.opnds); i++ {
if o.opnds[i].evaluate(parser, outerContext) {
return true
}
}
return false
}
func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
differs := false
operands := make([]SemanticContext, 0)
for i := 0; i < len(o.opnds); i++ {
context := o.opnds[i]
evaluated := context.evalPrecedence(parser, outerContext)
differs = differs || (evaluated != context)
if evaluated == SemanticContextNone {
// The OR context is true if any element is true
return SemanticContextNone
} else if evaluated != nil {
// Reduce the result by Skipping false elements
operands = append(operands, evaluated)
}
}
if !differs {
return o
}
if len(operands) == 0 {
// all elements were false, so the OR context is false
return nil
}
var result SemanticContext
for _, o := range operands {
if result == nil {
result = o
} else {
result = SemanticContextorContext(result, o)
}
}
return result
}
func (o *OR) String() string {
s := ""
for _, o := range o.opnds {
s += "|| " + fmt.Sprint(o)
}
if len(s) > 3 {
return s[0:3]
}
return s
}

View File

@ -0,0 +1,210 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"strconv"
"strings"
)
type TokenSourceCharStreamPair struct {
tokenSource TokenSource
charStream CharStream
}
// A token has properties: text, type, line, character position in the line
// (so we can ignore tabs), token channel, index, and source from which
// we obtained this token.
type Token interface {
GetSource() *TokenSourceCharStreamPair
GetTokenType() int
GetChannel() int
GetStart() int
GetStop() int
GetLine() int
GetColumn() int
GetText() string
SetText(s string)
GetTokenIndex() int
SetTokenIndex(v int)
GetTokenSource() TokenSource
GetInputStream() CharStream
}
type BaseToken struct {
source *TokenSourceCharStreamPair
tokenType int // token type of the token
channel int // The parser ignores everything not on DEFAULT_CHANNEL
start int // optional return -1 if not implemented.
stop int // optional return -1 if not implemented.
tokenIndex int // from 0..n-1 of the token object in the input stream
line int // line=1..n of the 1st character
column int // beginning of the line at which it occurs, 0..n-1
text string // text of the token.
readOnly bool
}
const (
TokenInvalidType = 0
// During lookahead operations, this "token" signifies we hit rule end ATN state
// and did not follow it despite needing to.
TokenEpsilon = -2
TokenMinUserTokenType = 1
TokenEOF = -1
// All tokens go to the parser (unless Skip() is called in that rule)
// on a particular "channel". The parser tunes to a particular channel
// so that whitespace etc... can go to the parser on a "hidden" channel.
TokenDefaultChannel = 0
// Anything on different channel than DEFAULT_CHANNEL is not parsed
// by parser.
TokenHiddenChannel = 1
)
func (b *BaseToken) GetChannel() int {
return b.channel
}
func (b *BaseToken) GetStart() int {
return b.start
}
func (b *BaseToken) GetStop() int {
return b.stop
}
func (b *BaseToken) GetLine() int {
return b.line
}
func (b *BaseToken) GetColumn() int {
return b.column
}
func (b *BaseToken) GetTokenType() int {
return b.tokenType
}
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
return b.source
}
func (b *BaseToken) GetTokenIndex() int {
return b.tokenIndex
}
func (b *BaseToken) SetTokenIndex(v int) {
b.tokenIndex = v
}
func (b *BaseToken) GetTokenSource() TokenSource {
return b.source.tokenSource
}
func (b *BaseToken) GetInputStream() CharStream {
return b.source.charStream
}
type CommonToken struct {
*BaseToken
}
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
t := new(CommonToken)
t.BaseToken = new(BaseToken)
t.source = source
t.tokenType = tokenType
t.channel = channel
t.start = start
t.stop = stop
t.tokenIndex = -1
if t.source.tokenSource != nil {
t.line = source.tokenSource.GetLine()
t.column = source.tokenSource.GetCharPositionInLine()
} else {
t.column = -1
}
return t
}
// An empty {@link Pair} which is used as the default value of
// {@link //source} for tokens that do not have a source.
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
//
// <p>
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
// constructed token will share a reference to the {@link //text} field and
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
// be assigned the result of calling {@link //GetText}, and {@link //source}
// will be constructed from the result of {@link Token//GetTokenSource} and
// {@link Token//GetInputStream}.</p>
//
// @param oldToken The token to copy.
//
func (c *CommonToken) clone() *CommonToken {
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
t.tokenIndex = c.GetTokenIndex()
t.line = c.GetLine()
t.column = c.GetColumn()
t.text = c.GetText()
return t
}
func (c *CommonToken) GetText() string {
if c.text != "" {
return c.text
}
input := c.GetInputStream()
if input == nil {
return ""
}
n := input.Size()
if c.start < n && c.stop < n {
return input.GetTextFromInterval(NewInterval(c.start, c.stop))
}
return "<EOF>"
}
func (c *CommonToken) SetText(text string) {
c.text = text
}
func (c *CommonToken) String() string {
txt := c.GetText()
if txt != "" {
txt = strings.Replace(txt, "\n", "\\n", -1)
txt = strings.Replace(txt, "\r", "\\r", -1)
txt = strings.Replace(txt, "\t", "\\t", -1)
} else {
txt = "<no text>"
}
var ch string
if c.channel > 0 {
ch = ",channel=" + strconv.Itoa(c.channel)
} else {
ch = ""
}
return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
}

View File

@ -0,0 +1,17 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type TokenSource interface {
NextToken() Token
Skip()
More()
GetLine() int
GetCharPositionInLine() int
GetInputStream() CharStream
GetSourceName() string
setTokenFactory(factory TokenFactory)
GetTokenFactory() TokenFactory
}

View File

@ -0,0 +1,20 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
type TokenStream interface {
IntStream
LT(k int) Token
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
GetTextFromInterval(*Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}

View File

@ -0,0 +1,649 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"bytes"
"fmt"
)
//
// Useful for rewriting out a buffered input token stream after doing some
// augmentation or other manipulations on it.
// <p>
// You can insert stuff, replace, and delete chunks. Note that the operations
// are done lazily--only if you convert the buffer to a {@link String} with
// {@link TokenStream#getText()}. This is very efficient because you are not
// moving data around all the time. As the buffer of tokens is converted to
// strings, the {@link #getText()} method(s) scan the input token stream and
// check to see if there is an operation at the current index. If so, the
// operation is done and then normal {@link String} rendering continues on the
// buffer. This is like having multiple Turing machine instruction streams
// (programs) operating on a single input tape. :)</p>
// <p>
// This rewriter makes no modifications to the token stream. It does not ask the
// stream to fill itself up nor does it advance the input cursor. The token
// stream {@link TokenStream#index()} will return the same value before and
// after any {@link #getText()} call.</p>
// <p>
// The rewriter only works on tokens that you have in the buffer and ignores the
// current input cursor. If you are buffering tokens on-demand, calling
// {@link #getText()} halfway through the input will only do rewrites for those
// tokens in the first half of the file.</p>
// <p>
// Since the operations are done lazily at {@link #getText}-time, operations do
// not screw up the token index values. That is, an insert operation at token
// index {@code i} does not change the index values for tokens
// {@code i}+1..n-1.</p>
// <p>
// Because operations never actually alter the buffer, you may always get the
// original token stream back without undoing anything. Since the instructions
// are queued up, you can easily simulate transactions and roll back any changes
// if there is an error just by removing instructions. For example,</p>
// <pre>
// CharStream input = new ANTLRFileStream("input");
// TLexer lex = new TLexer(input);
// CommonTokenStream tokens = new CommonTokenStream(lex);
// T parser = new T(tokens);
// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
// parser.startRule();
// </pre>
// <p>
// Then in the rules, you can execute (assuming rewriter is visible):</p>
// <pre>
// Token t,u;
// ...
// rewriter.insertAfter(t, "text to put after t");}
// rewriter.insertAfter(u, "text after u");}
// System.out.println(rewriter.getText());
// </pre>
// <p>
// You can also have multiple "instruction streams" and get multiple rewrites
// from a single pass over the input. Just name the instruction streams and use
// that name again when printing the buffer. This could be useful for generating
// a C file and also its header file--all from the same buffer:</p>
// <pre>
// rewriter.insertAfter("pass1", t, "text to put after t");}
// rewriter.insertAfter("pass2", u, "text after u");}
// System.out.println(rewriter.getText("pass1"));
// System.out.println(rewriter.getText("pass2"));
// </pre>
// <p>
// If you don't use named rewrite streams, a "default" stream is used as the
// first example shows.</p>
const(
Default_Program_Name = "default"
Program_Init_Size = 100
Min_Token_Index = 0
)
// Define the rewrite operation hierarchy
type RewriteOperation interface {
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
Execute(buffer *bytes.Buffer) int
String() string
GetInstructionIndex() int
GetIndex() int
GetText() string
GetOpName() string
GetTokens() TokenStream
SetInstructionIndex(val int)
SetIndex(int)
SetText(string)
SetOpName(string)
SetTokens(TokenStream)
}
type BaseRewriteOperation struct {
//Current index of rewrites list
instruction_index int
//Token buffer index
index int
//Substitution text
text string
//Actual operation name
op_name string
//Pointer to token steam
tokens TokenStream
}
func (op *BaseRewriteOperation)GetInstructionIndex() int{
return op.instruction_index
}
func (op *BaseRewriteOperation)GetIndex() int{
return op.index
}
func (op *BaseRewriteOperation)GetText() string{
return op.text
}
func (op *BaseRewriteOperation)GetOpName() string{
return op.op_name
}
func (op *BaseRewriteOperation)GetTokens() TokenStream{
return op.tokens
}
func (op *BaseRewriteOperation)SetInstructionIndex(val int){
op.instruction_index = val
}
func (op *BaseRewriteOperation)SetIndex(val int) {
op.index = val
}
func (op *BaseRewriteOperation)SetText(val string){
op.text = val
}
func (op *BaseRewriteOperation)SetOpName(val string){
op.op_name = val
}
func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
op.tokens = val
}
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
return op.index
}
func (op *BaseRewriteOperation) String() string {
return fmt.Sprintf("<%s@%d:\"%s\">",
op.op_name,
op.tokens.Get(op.GetIndex()),
op.text,
)
}
type InsertBeforeOp struct {
BaseRewriteOperation
}
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
index:index,
text:text,
op_name:"InsertBeforeOp",
tokens:stream,
}}
}
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
buffer.WriteString(op.text)
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
return op.index+1
}
func (op *InsertBeforeOp) String() string {
return op.BaseRewriteOperation.String()
}
// Distinguish between insert after/before to do the "insert afters"
// first and then the "insert befores" at same index. Implementation
// of "insert after" is "insert before index+1".
type InsertAfterOp struct {
BaseRewriteOperation
}
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
index:index+1,
text:text,
tokens:stream,
}}
}
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
buffer.WriteString(op.text)
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
return op.index+1
}
func (op *InsertAfterOp) String() string {
return op.BaseRewriteOperation.String()
}
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
// instructions.
type ReplaceOp struct{
BaseRewriteOperation
LastIndex int
}
func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
return &ReplaceOp{
BaseRewriteOperation:BaseRewriteOperation{
index:from,
text:text,
op_name:"ReplaceOp",
tokens:stream,
},
LastIndex:to,
}
}
func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
if op.text != ""{
buffer.WriteString(op.text)
}
return op.LastIndex +1
}
func (op *ReplaceOp) String() string {
if op.text == "" {
return fmt.Sprintf("<DeleteOP@%d..%d>",
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
}
return fmt.Sprintf("<ReplaceOp@%d..%d:\"%s\">",
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
}
type TokenStreamRewriter struct {
//Our source stream
tokens TokenStream
// You may have multiple, named streams of rewrite operations.
// I'm calling these things "programs."
// Maps String (name) &rarr; rewrite (List)
programs map[string][]RewriteOperation
last_rewrite_token_indexes map[string]int
}
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
return &TokenStreamRewriter{
tokens: tokens,
programs: map[string][]RewriteOperation{
Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
},
last_rewrite_token_indexes: map[string]int{},
}
}
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
return tsr.tokens
}
// Rollback the instruction stream for a program so that
// the indicated instruction (via instructionIndex) is no
// longer in the stream. UNTESTED!
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
is, ok := tsr.programs[program_name]
if ok{
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
}
}
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
tsr.Rollback(Default_Program_Name, instruction_index)
}
//Reset the program so that no instructions exist
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
}
func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
tsr.DeleteProgram(Default_Program_Name)
}
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
// to insert after, just insert before next index (even if past end)
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
tsr.InsertAfter(Default_Program_Name, index, text)
}
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
}
func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
tsr.InsertBefore(Default_Program_Name, index, text)
}
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
}
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
from, to, tsr.tokens.Size()))
}
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
tsr.Replace(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
tsr.ReplaceDefault(index, index, text)
}
func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
}
func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
tsr.ReplaceToken(Default_Program_Name, from, to, text)
}
func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
tsr.ReplaceTokenDefault(index, index, text)
}
func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
tsr.Replace(program_name, from, to, "" )
}
func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
tsr.Delete(Default_Program_Name, from, to)
}
func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
tsr.DeleteDefault(index,index)
}
func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
tsr.ReplaceToken(program_name, from, to, "")
}
func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
tsr.DeleteToken(Default_Program_Name, from, to)
}
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
i, ok := tsr.last_rewrite_token_indexes[program_name]
if !ok{
return -1
}
return i
}
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
}
func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
tsr.last_rewrite_token_indexes[program_name] = i
}
func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
is := make([]RewriteOperation, 0, Program_Init_Size)
tsr.programs[name] = is
return is
}
func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
is := tsr.GetProgram(name)
is = append(is, op)
tsr.programs[name] = is
}
func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
is, ok := tsr.programs[name]
if !ok{
is = tsr.InitializeProgram(name)
}
return is
}
// Return the text from the original tokens altered per the
// instructions given to this rewriter.
func (tsr *TokenStreamRewriter)GetTextDefault() string{
return tsr.GetText(
Default_Program_Name,
NewInterval(0, tsr.tokens.Size()-1))
}
// Return the text from the original tokens altered per the
// instructions given to this rewriter.
func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
rewrites := tsr.programs[program_name]
start := interval.Start
stop := interval.Stop
// ensure start/end are in range
stop = min(stop, tsr.tokens.Size()-1)
start = max(start,0)
if rewrites == nil || len(rewrites) == 0{
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
}
buf := bytes.Buffer{}
// First, optimize instruction stream
indexToOp := reduceToSingleOperationPerIndex(rewrites)
// Walk buffer, executing instructions and emitting tokens
for i:=start; i<=stop && i<tsr.tokens.Size();{
op := indexToOp[i]
delete(indexToOp, i)// remove so any left have index size-1
t := tsr.tokens.Get(i)
if op == nil{
// no operation at that index, just dump token
if t.GetTokenType() != TokenEOF {buf.WriteString(t.GetText())}
i++ // move to next token
}else {
i = op.Execute(&buf)// execute operation and skip
}
}
// include stuff after end if it's last index in buffer
// So, if they did an insertAfter(lastValidIndex, "foo"), include
// foo if end==lastValidIndex.
if stop == tsr.tokens.Size()-1{
// Scan any remaining operations after last token
// should be included (they will be inserts).
for _, op := range indexToOp{
if op.GetIndex() >= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
}
}
return buf.String()
}
// We need to combine operations and report invalid operations (like
// overlapping replaces that are not completed nested). Inserts to
// same index need to be combined etc... Here are the cases:
//
// I.i.u I.j.v leave alone, nonoverlapping
// I.i.u I.i.v combine: Iivu
//
// R.i-j.u R.x-y.v | i-j in x-y delete first R
// R.i-j.u R.i-j.v delete first R
// R.i-j.u R.x-y.v | x-y in i-j ERROR
// R.i-j.u R.x-y.v | boundaries overlap ERROR
//
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
//
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
// we're not deleting i)
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
// R.x-y.v I.i.u | i in x-y ERROR
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
//
// I.i.u = insert u before op @ index i
// R.x-y.u = replace x-y indexed tokens with u
//
// First we need to examine replaces. For any replace op:
//
// 1. wipe out any insertions before op within that range.
// 2. Drop any replace op before that is contained completely within
// that range.
// 3. Throw exception upon boundary overlap with any previous replace.
//
// Then we can deal with inserts:
//
// 1. for any inserts to same index, combine even if not adjacent.
// 2. for any prior replace with same left boundary, combine this
// insert with replace and delete this replace.
// 3. throw exception if index in same range as previous replace
//
// Don't actually delete; make op null in list. Easier to walk list.
// Later we can throw as we add to index &rarr; op map.
//
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
// inserted stuff would be before the replace range. But, if you
// add tokens in front of a method body '{' and then delete the method
// body, I think the stuff before the '{' you added should disappear too.
//
// Return a map from token index to operation.
//
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
// WALK REPLACES
for i:=0; i < len(rewrites); i++{
op := rewrites[i]
if op == nil{continue}
rop, ok := op.(*ReplaceOp)
if !ok{continue}
// Wipe prior inserts within range
for j:=0; j<i && j < len(rewrites); j++{
if iop, ok := rewrites[j].(*InsertBeforeOp);ok{
if iop.index == rop.index{
// E.g., insert before 2, delete 2..2; update replace
// text to include insert before, kill insert
rewrites[iop.instruction_index] = nil
if rop.text != ""{
rop.text = iop.text + rop.text
}else{
rop.text = iop.text
}
}else if iop.index > rop.index && iop.index <=rop.LastIndex{
// delete insert as it's a no-op.
rewrites[iop.instruction_index] = nil
}
}
}
// Drop any prior replaces contained within
for j:=0; j<i && j < len(rewrites); j++{
if prevop, ok := rewrites[j].(*ReplaceOp);ok{
if prevop.index>=rop.index && prevop.LastIndex <= rop.LastIndex{
// delete replace as it's a no-op.
rewrites[prevop.instruction_index] = nil
continue
}
// throw exception unless disjoint or identical
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
if prevop.text == "" && rop.text == "" && !disjoint{
rewrites[prevop.instruction_index] = nil
rop.index = min(prevop.index, rop.index)
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
}else if !disjoint{
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
}
}
}
}
// WALK INSERTS
for i:=0; i < len(rewrites); i++ {
op := rewrites[i]
if op == nil{continue}
//hack to replicate inheritance in composition
_, iok := rewrites[i].(*InsertBeforeOp)
_, aok := rewrites[i].(*InsertAfterOp)
if !iok && !aok{continue}
iop := rewrites[i]
// combine current insert with prior if any at same index
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
for j:=0; j<i && j < len(rewrites); j++{
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok{
if nextIop.index == iop.GetIndex(){
iop.SetText(nextIop.text + iop.GetText())
rewrites[j] = nil
}
}
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok{
if prevIop.index == iop.GetIndex(){
iop.SetText(iop.GetText() + prevIop.text)
rewrites[prevIop.instruction_index] = nil
}
}
}
// look for replaces where iop.index is in range; error
for j:=0; j<i && j < len(rewrites); j++{
if rop,ok := rewrites[j].(*ReplaceOp); ok{
if iop.GetIndex() == rop.index{
rop.text = iop.GetText() + rop.text
rewrites[i] = nil
continue
}
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex{
panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
}
}
}
}
m := map[int]RewriteOperation{}
for i:=0; i < len(rewrites); i++{
op := rewrites[i]
if op == nil {continue}
if _, ok := m[op.GetIndex()]; ok{
panic("should only be one op per index")
}
m[op.GetIndex()] = op
}
return m
}
/*
Quick fixing Go lack of overloads
*/
func max(a,b int)int{
if a>b{
return a
}else {
return b
}
}
func min(a,b int)int{
if a<b{
return a
}else {
return b
}
}

View File

@ -0,0 +1,32 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import "fmt"
type TraceListener struct {
parser *BaseParser
}
func NewTraceListener(parser *BaseParser) *TraceListener {
tl := new(TraceListener)
tl.parser = parser
return tl
}
func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
}
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
}
func (t *TraceListener) VisitTerminal(node TerminalNode) {
fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
}
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
}

View File

@ -0,0 +1,428 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"fmt"
"strconv"
"strings"
)
// atom, set, epsilon, action, predicate, rule transitions.
//
// <p>This is a one way link. It emanates from a state (usually via a list of
// transitions) and has a target state.</p>
//
// <p>Since we never have to change the ATN transitions once we construct it,
// the states. We'll use the term Edge for the DFA to distinguish them from
// ATN transitions.</p>
type Transition interface {
getTarget() ATNState
setTarget(ATNState)
getIsEpsilon() bool
getLabel() *IntervalSet
getSerializationType() int
Matches(int, int, int) bool
}
type BaseTransition struct {
target ATNState
isEpsilon bool
label int
intervalSet *IntervalSet
serializationType int
}
func NewBaseTransition(target ATNState) *BaseTransition {
if target == nil {
panic("target cannot be nil.")
}
t := new(BaseTransition)
t.target = target
// Are we epsilon, action, sempred?
t.isEpsilon = false
t.intervalSet = nil
return t
}
func (t *BaseTransition) getTarget() ATNState {
return t.target
}
func (t *BaseTransition) setTarget(s ATNState) {
t.target = s
}
func (t *BaseTransition) getIsEpsilon() bool {
return t.isEpsilon
}
func (t *BaseTransition) getLabel() *IntervalSet {
return t.intervalSet
}
func (t *BaseTransition) getSerializationType() int {
return t.serializationType
}
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
panic("Not implemented")
}
const (
TransitionEPSILON = 1
TransitionRANGE = 2
TransitionRULE = 3
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
TransitionATOM = 5
TransitionACTION = 6
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
TransitionNOTSET = 8
TransitionWILDCARD = 9
TransitionPRECEDENCE = 10
)
var TransitionserializationNames = []string{
"INVALID",
"EPSILON",
"RANGE",
"RULE",
"PREDICATE",
"ATOM",
"ACTION",
"SET",
"NOT_SET",
"WILDCARD",
"PRECEDENCE",
}
//var TransitionserializationTypes struct {
// EpsilonTransition int
// RangeTransition int
// RuleTransition int
// PredicateTransition int
// AtomTransition int
// ActionTransition int
// SetTransition int
// NotSetTransition int
// WildcardTransition int
// PrecedencePredicateTransition int
//}{
// TransitionEPSILON,
// TransitionRANGE,
// TransitionRULE,
// TransitionPREDICATE,
// TransitionATOM,
// TransitionACTION,
// TransitionSET,
// TransitionNOTSET,
// TransitionWILDCARD,
// TransitionPRECEDENCE
//}
// TODO: make all transitions sets? no, should remove set edges
type AtomTransition struct {
*BaseTransition
}
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
t := new(AtomTransition)
t.BaseTransition = NewBaseTransition(target)
t.label = intervalSet // The token type or character value or, signifies special intervalSet.
t.intervalSet = t.makeLabel()
t.serializationType = TransitionATOM
return t
}
func (t *AtomTransition) makeLabel() *IntervalSet {
s := NewIntervalSet()
s.addOne(t.label)
return s
}
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.label == symbol
}
func (t *AtomTransition) String() string {
return strconv.Itoa(t.label)
}
type RuleTransition struct {
*BaseTransition
followState ATNState
ruleIndex, precedence int
}
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
t := new(RuleTransition)
t.BaseTransition = NewBaseTransition(ruleStart)
t.ruleIndex = ruleIndex
t.precedence = precedence
t.followState = followState
t.serializationType = TransitionRULE
t.isEpsilon = true
return t
}
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
type EpsilonTransition struct {
*BaseTransition
outermostPrecedenceReturn int
}
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
t := new(EpsilonTransition)
t.BaseTransition = NewBaseTransition(target)
t.serializationType = TransitionEPSILON
t.isEpsilon = true
t.outermostPrecedenceReturn = outermostPrecedenceReturn
return t
}
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
func (t *EpsilonTransition) String() string {
return "epsilon"
}
type RangeTransition struct {
*BaseTransition
start, stop int
}
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
t := new(RangeTransition)
t.BaseTransition = NewBaseTransition(target)
t.serializationType = TransitionRANGE
t.start = start
t.stop = stop
t.intervalSet = t.makeLabel()
return t
}
func (t *RangeTransition) makeLabel() *IntervalSet {
s := NewIntervalSet()
s.addRange(t.start, t.stop)
return s
}
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= t.start && symbol <= t.stop
}
func (t *RangeTransition) String() string {
var sb strings.Builder
sb.WriteByte('\'')
sb.WriteRune(rune(t.start))
sb.WriteString("'..'")
sb.WriteRune(rune(t.stop))
sb.WriteByte('\'')
return sb.String()
}
type AbstractPredicateTransition interface {
Transition
IAbstractPredicateTransitionFoo()
}
type BaseAbstractPredicateTransition struct {
*BaseTransition
}
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
t := new(BaseAbstractPredicateTransition)
t.BaseTransition = NewBaseTransition(target)
return t
}
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
type PredicateTransition struct {
*BaseAbstractPredicateTransition
isCtxDependent bool
ruleIndex, predIndex int
}
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
t := new(PredicateTransition)
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
t.serializationType = TransitionPREDICATE
t.ruleIndex = ruleIndex
t.predIndex = predIndex
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
t.isEpsilon = true
return t
}
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
func (t *PredicateTransition) getPredicate() *Predicate {
return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
}
func (t *PredicateTransition) String() string {
return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
}
type ActionTransition struct {
*BaseTransition
isCtxDependent bool
ruleIndex, actionIndex, predIndex int
}
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
t := new(ActionTransition)
t.BaseTransition = NewBaseTransition(target)
t.serializationType = TransitionACTION
t.ruleIndex = ruleIndex
t.actionIndex = actionIndex
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
t.isEpsilon = true
return t
}
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
func (t *ActionTransition) String() string {
return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
}
type SetTransition struct {
*BaseTransition
}
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
t := new(SetTransition)
t.BaseTransition = NewBaseTransition(target)
t.serializationType = TransitionSET
if set != nil {
t.intervalSet = set
} else {
t.intervalSet = NewIntervalSet()
t.intervalSet.addOne(TokenInvalidType)
}
return t
}
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return t.intervalSet.contains(symbol)
}
func (t *SetTransition) String() string {
return t.intervalSet.String()
}
type NotSetTransition struct {
*SetTransition
}
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
t := new(NotSetTransition)
t.SetTransition = NewSetTransition(target, set)
t.serializationType = TransitionNOTSET
return t
}
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
}
func (t *NotSetTransition) String() string {
return "~" + t.intervalSet.String()
}
type WildcardTransition struct {
*BaseTransition
}
func NewWildcardTransition(target ATNState) *WildcardTransition {
t := new(WildcardTransition)
t.BaseTransition = NewBaseTransition(target)
t.serializationType = TransitionWILDCARD
return t
}
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
}
func (t *WildcardTransition) String() string {
return "."
}
type PrecedencePredicateTransition struct {
*BaseAbstractPredicateTransition
precedence int
}
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
t := new(PrecedencePredicateTransition)
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
t.serializationType = TransitionPRECEDENCE
t.precedence = precedence
t.isEpsilon = true
return t
}
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
return false
}
func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
return NewPrecedencePredicate(t.precedence)
}
func (t *PrecedencePredicateTransition) String() string {
return fmt.Sprint(t.precedence) + " >= _p"
}

256
vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
// The basic notion of a tree has a parent, a payload, and a list of children.
// It is the most abstract interface for all the trees used by ANTLR.
///
var TreeInvalidInterval = NewInterval(-1, -2)
type Tree interface {
GetParent() Tree
SetParent(Tree)
GetPayload() interface{}
GetChild(i int) Tree
GetChildCount() int
GetChildren() []Tree
}
type SyntaxTree interface {
Tree
GetSourceInterval() *Interval
}
type ParseTree interface {
SyntaxTree
Accept(Visitor ParseTreeVisitor) interface{}
GetText() string
ToStringTree([]string, Recognizer) string
}
type RuleNode interface {
ParseTree
GetRuleContext() RuleContext
GetBaseRuleContext() *BaseRuleContext
}
type TerminalNode interface {
ParseTree
GetSymbol() Token
}
type ErrorNode interface {
TerminalNode
errorNode()
}
type ParseTreeVisitor interface {
Visit(tree ParseTree) interface{}
VisitChildren(node RuleNode) interface{}
VisitTerminal(node TerminalNode) interface{}
VisitErrorNode(node ErrorNode) interface{}
}
type BaseParseTreeVisitor struct{}
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil }
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
// TODO
//func (this ParseTreeVisitor) Visit(ctx) {
// if (Utils.isArray(ctx)) {
// self := this
// return ctx.map(function(child) { return VisitAtom(self, child)})
// } else {
// return VisitAtom(this, ctx)
// }
//}
//
//func VisitAtom(Visitor, ctx) {
// if (ctx.parser == nil) { //is terminal
// return
// }
//
// name := ctx.parser.ruleNames[ctx.ruleIndex]
// funcName := "Visit" + Utils.titleCase(name)
//
// return Visitor[funcName](ctx)
//}
type ParseTreeListener interface {
VisitTerminal(node TerminalNode)
VisitErrorNode(node ErrorNode)
EnterEveryRule(ctx ParserRuleContext)
ExitEveryRule(ctx ParserRuleContext)
}
type BaseParseTreeListener struct{}
var _ ParseTreeListener = &BaseParseTreeListener{}
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
type TerminalNodeImpl struct {
parentCtx RuleContext
symbol Token
}
var _ TerminalNode = &TerminalNodeImpl{}
func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
tn := new(TerminalNodeImpl)
tn.parentCtx = nil
tn.symbol = symbol
return tn
}
func (t *TerminalNodeImpl) GetChild(i int) Tree {
return nil
}
func (t *TerminalNodeImpl) GetChildren() []Tree {
return nil
}
func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
panic("Cannot set children on terminal node")
}
func (t *TerminalNodeImpl) GetSymbol() Token {
return t.symbol
}
func (t *TerminalNodeImpl) GetParent() Tree {
return t.parentCtx
}
func (t *TerminalNodeImpl) SetParent(tree Tree) {
t.parentCtx = tree.(RuleContext)
}
func (t *TerminalNodeImpl) GetPayload() interface{} {
return t.symbol
}
func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
if t.symbol == nil {
return TreeInvalidInterval
}
tokenIndex := t.symbol.GetTokenIndex()
return NewInterval(tokenIndex, tokenIndex)
}
func (t *TerminalNodeImpl) GetChildCount() int {
return 0
}
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
return v.VisitTerminal(t)
}
func (t *TerminalNodeImpl) GetText() string {
return t.symbol.GetText()
}
func (t *TerminalNodeImpl) String() string {
if t.symbol.GetTokenType() == TokenEOF {
return "<EOF>"
}
return t.symbol.GetText()
}
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
return t.String()
}
// Represents a token that was consumed during reSynchronization
// rather than during a valid Match operation. For example,
// we will create this kind of a node during single token insertion
// and deletion as well as during "consume until error recovery set"
// upon no viable alternative exceptions.
type ErrorNodeImpl struct {
*TerminalNodeImpl
}
var _ ErrorNode = &ErrorNodeImpl{}
func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
en := new(ErrorNodeImpl)
en.TerminalNodeImpl = NewTerminalNodeImpl(token)
return en
}
func (e *ErrorNodeImpl) errorNode() {}
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
return v.VisitErrorNode(e)
}
type ParseTreeWalker struct {
}
func NewParseTreeWalker() *ParseTreeWalker {
return new(ParseTreeWalker)
}
// Performs a walk on the given parse tree starting at the root and going down recursively
// with depth-first search. On each node, EnterRule is called before
// recursively walking down into child nodes, then
// ExitRule is called after the recursive call to wind up.
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
switch tt := t.(type) {
case ErrorNode:
listener.VisitErrorNode(tt)
case TerminalNode:
listener.VisitTerminal(tt)
default:
p.EnterRule(listener, t.(RuleNode))
for i := 0; i < t.GetChildCount(); i++ {
child := t.GetChild(i)
p.Walk(listener, child)
}
p.ExitRule(listener, t.(RuleNode))
}
}
//
// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
// then by triggering the event specific to the given parse tree node
//
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
ctx := r.GetRuleContext().(ParserRuleContext)
listener.EnterEveryRule(ctx)
ctx.EnterRule(listener)
}
// Exits a grammar rule by first triggering the event specific to the given parse tree node
// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
//
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
ctx := r.GetRuleContext().(ParserRuleContext)
ctx.ExitRule(listener)
listener.ExitEveryRule(ctx)
}
var ParseTreeWalkerDefault = NewParseTreeWalker()

View File

@ -0,0 +1,137 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import "fmt"
/** A set of utility routines useful for all kinds of ANTLR trees. */
// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
// node payloads to get the text for the nodes. Detect
// parse trees and extract data appropriately.
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
if recog != nil {
ruleNames = recog.GetRuleNames()
}
s := TreesGetNodeText(tree, ruleNames, nil)
s = EscapeWhitespace(s, false)
c := tree.GetChildCount()
if c == 0 {
return s
}
res := "(" + s + " "
if c > 0 {
s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
res += s
}
for i := 1; i < c; i++ {
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
res += (" " + s)
}
res += ")"
return res
}
func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
if recog != nil {
ruleNames = recog.GetRuleNames()
}
if ruleNames != nil {
switch t2 := t.(type) {
case RuleNode:
t3 := t2.GetRuleContext()
altNumber := t3.GetAltNumber()
if altNumber != ATNInvalidAltNumber {
return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
}
return ruleNames[t3.GetRuleIndex()]
case ErrorNode:
return fmt.Sprint(t2)
case TerminalNode:
if t2.GetSymbol() != nil {
return t2.GetSymbol().GetText()
}
}
}
// no recog for rule names
payload := t.GetPayload()
if p2, ok := payload.(Token); ok {
return p2.GetText()
}
return fmt.Sprint(t.GetPayload())
}
// Return ordered list of all children of this node
func TreesGetChildren(t Tree) []Tree {
list := make([]Tree, 0)
for i := 0; i < t.GetChildCount(); i++ {
list = append(list, t.GetChild(i))
}
return list
}
// Return a list of all ancestors of this node. The first node of
// list is the root and the last is the parent of this node.
//
func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
for t != nil {
f := []Tree{t}
ancestors = append(f, ancestors...)
t = t.GetParent()
}
return ancestors
}
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
}
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
return TreesfindAllNodes(t, ruleIndex, false)
}
func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
nodes := make([]ParseTree, 0)
treesFindAllNodes(t, index, findTokens, &nodes)
return nodes
}
func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
// check this node (the root) first
t2, ok := t.(TerminalNode)
t3, ok2 := t.(ParserRuleContext)
if findTokens && ok {
if t2.GetSymbol().GetTokenType() == index {
*nodes = append(*nodes, t2)
}
} else if !findTokens && ok2 {
if t3.GetRuleIndex() == index {
*nodes = append(*nodes, t3)
}
}
// check children
for i := 0; i < t.GetChildCount(); i++ {
treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
}
}
func TreesDescendants(t ParseTree) []ParseTree {
nodes := []ParseTree{t}
for i := 0; i < t.GetChildCount(); i++ {
nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
}
return nodes
}

View File

@ -0,0 +1,344 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"bytes"
"errors"
"fmt"
"math/bits"
"strconv"
"strings"
)
func intMin(a, b int) int {
if a < b {
return a
}
return b
}
func intMax(a, b int) int {
if a > b {
return a
}
return b
}
// A simple integer stack
type IntStack []int
var ErrEmptyStack = errors.New("Stack is empty")
func (s *IntStack) Pop() (int, error) {
l := len(*s) - 1
if l < 0 {
return 0, ErrEmptyStack
}
v := (*s)[l]
*s = (*s)[0:l]
return v, nil
}
func (s *IntStack) Push(e int) {
*s = append(*s, e)
}
func standardEqualsFunction(a interface{}, b interface{}) bool {
ac, oka := a.(comparable)
bc, okb := b.(comparable)
if !oka || !okb {
panic("Not Comparable")
}
return ac.equals(bc)
}
func standardHashFunction(a interface{}) int {
if h, ok := a.(hasher); ok {
return h.hash()
}
panic("Not Hasher")
}
type hasher interface {
hash() int
}
const bitsPerWord = 64
func indexForBit(bit int) int {
return bit / bitsPerWord
}
func wordForBit(data []uint64, bit int) uint64 {
idx := indexForBit(bit)
if idx >= len(data) {
return 0
}
return data[idx]
}
func maskForBit(bit int) uint64 {
return uint64(1) << (bit % bitsPerWord)
}
func wordsNeeded(bit int) int {
return indexForBit(bit) + 1
}
type BitSet struct {
data []uint64
}
func NewBitSet() *BitSet {
return &BitSet{}
}
func (b *BitSet) add(value int) {
idx := indexForBit(value)
if idx >= len(b.data) {
size := wordsNeeded(value)
data := make([]uint64, size)
copy(data, b.data)
b.data = data
}
b.data[idx] |= maskForBit(value)
}
func (b *BitSet) clear(index int) {
idx := indexForBit(index)
if idx >= len(b.data) {
return
}
b.data[idx] &= ^maskForBit(index)
}
func (b *BitSet) or(set *BitSet) {
size := intMax(b.minLen(), set.minLen())
if size > len(b.data) {
data := make([]uint64, size)
copy(data, b.data)
b.data = data
}
for i := 0; i < size; i++ {
b.data[i] |= set.data[i]
}
}
func (b *BitSet) remove(value int) {
b.clear(value)
}
func (b *BitSet) contains(value int) bool {
idx := indexForBit(value)
if idx >= len(b.data) {
return false
}
return (b.data[idx] & maskForBit(value)) != 0
}
func (b *BitSet) minValue() int {
for i, v := range b.data {
if v == 0 {
continue
}
return i*bitsPerWord + bits.TrailingZeros64(v)
}
return 2147483647
}
func (b *BitSet) equals(other interface{}) bool {
otherBitSet, ok := other.(*BitSet)
if !ok {
return false
}
if b == otherBitSet {
return true
}
if len(b.data) != len(otherBitSet.data) {
return false
}
for k := range b.data {
if b.data[k] != otherBitSet.data[k] {
return false
}
}
return true
}
func (b *BitSet) minLen() int {
for i := len(b.data); i > 0; i-- {
if b.data[i-1] != 0 {
return i
}
}
return 0
}
func (b *BitSet) length() int {
cnt := 0
for _, val := range b.data {
cnt += bits.OnesCount64(val)
}
return cnt
}
func (b *BitSet) String() string {
vals := make([]string, 0, b.length())
for i, v := range b.data {
for v != 0 {
n := bits.TrailingZeros64(v)
vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
v &= ^(uint64(1) << n)
}
}
return "{" + strings.Join(vals, ", ") + "}"
}
type AltDict struct {
data map[string]interface{}
}
func NewAltDict() *AltDict {
d := new(AltDict)
d.data = make(map[string]interface{})
return d
}
func (a *AltDict) Get(key string) interface{} {
key = "k-" + key
return a.data[key]
}
func (a *AltDict) put(key string, value interface{}) {
key = "k-" + key
a.data[key] = value
}
func (a *AltDict) values() []interface{} {
vs := make([]interface{}, len(a.data))
i := 0
for _, v := range a.data {
vs[i] = v
i++
}
return vs
}
type DoubleDict struct {
data map[int]map[int]interface{}
}
func NewDoubleDict() *DoubleDict {
dd := new(DoubleDict)
dd.data = make(map[int]map[int]interface{})
return dd
}
func (d *DoubleDict) Get(a, b int) interface{} {
data := d.data[a]
if data == nil {
return nil
}
return data[b]
}
func (d *DoubleDict) set(a, b int, o interface{}) {
data := d.data[a]
if data == nil {
data = make(map[int]interface{})
d.data[a] = data
}
data[b] = o
}
func EscapeWhitespace(s string, escapeSpaces bool) string {
s = strings.Replace(s, "\t", "\\t", -1)
s = strings.Replace(s, "\n", "\\n", -1)
s = strings.Replace(s, "\r", "\\r", -1)
if escapeSpaces {
s = strings.Replace(s, " ", "\u00B7", -1)
}
return s
}
func TerminalNodeToStringArray(sa []TerminalNode) []string {
st := make([]string, len(sa))
for i, s := range sa {
st[i] = fmt.Sprintf("%v", s)
}
return st
}
func PrintArrayJavaStyle(sa []string) string {
var buffer bytes.Buffer
buffer.WriteString("[")
for i, s := range sa {
buffer.WriteString(s)
if i != len(sa)-1 {
buffer.WriteString(", ")
}
}
buffer.WriteString("]")
return buffer.String()
}
// murmur hash
func murmurInit(seed int) int {
return seed
}
func murmurUpdate(h int, value int) int {
const c1 uint32 = 0xCC9E2D51
const c2 uint32 = 0x1B873593
const r1 uint32 = 15
const r2 uint32 = 13
const m uint32 = 5
const n uint32 = 0xE6546B64
k := uint32(value)
k *= c1
k = (k << r1) | (k >> (32 - r1))
k *= c2
hash := uint32(h) ^ k
hash = (hash << r2) | (hash >> (32 - r2))
hash = hash*m + n
return int(hash)
}
func murmurFinish(h int, numberOfWords int) int {
var hash = uint32(h)
hash ^= uint32(numberOfWords) << 2
hash ^= hash >> 16
hash *= 0x85ebca6b
hash ^= hash >> 13
hash *= 0xc2b2ae35
hash ^= hash >> 16
return int(hash)
}

View File

@ -0,0 +1,237 @@
package antlr
import "math"
const (
_initalCapacity = 16
_initalBucketCapacity = 8
_loadFactor = 0.75
)
var _ Set = (*Array2DHashSet)(nil)
type Set interface {
Add(value interface{}) (added interface{})
Len() int
Get(value interface{}) (found interface{})
Contains(value interface{}) bool
Values() []interface{}
Each(f func(interface{}) bool)
}
type Array2DHashSet struct {
buckets [][]interface{}
hashcodeFunction func(interface{}) int
equalsFunction func(interface{}, interface{}) bool
n int // How many elements in set
threshold int // when to expand
currentPrime int // jump by 4 primes each expand or whatever
initialBucketCapacity int
}
func (as *Array2DHashSet) Each(f func(interface{}) bool) {
if as.Len() < 1 {
return
}
for _, bucket := range as.buckets {
for _, o := range bucket {
if o == nil {
break
}
if !f(o) {
return
}
}
}
}
func (as *Array2DHashSet) Values() []interface{} {
if as.Len() < 1 {
return nil
}
values := make([]interface{}, 0, as.Len())
as.Each(func(i interface{}) bool {
values = append(values, i)
return true
})
return values
}
func (as *Array2DHashSet) Contains(value interface{}) bool {
return as.Get(value) != nil
}
func (as *Array2DHashSet) Add(value interface{}) interface{} {
if as.n > as.threshold {
as.expand()
}
return as.innerAdd(value)
}
func (as *Array2DHashSet) expand() {
old := as.buckets
as.currentPrime += 4
var (
newCapacity = len(as.buckets) << 1
newTable = as.createBuckets(newCapacity)
newBucketLengths = make([]int, len(newTable))
)
as.buckets = newTable
as.threshold = int(float64(newCapacity) * _loadFactor)
for _, bucket := range old {
if bucket == nil {
continue
}
for _, o := range bucket {
if o == nil {
break
}
b := as.getBuckets(o)
bucketLength := newBucketLengths[b]
var newBucket []interface{}
if bucketLength == 0 {
// new bucket
newBucket = as.createBucket(as.initialBucketCapacity)
newTable[b] = newBucket
} else {
newBucket = newTable[b]
if bucketLength == len(newBucket) {
// expand
newBucketCopy := make([]interface{}, len(newBucket)<<1)
copy(newBucketCopy[:bucketLength], newBucket)
newBucket = newBucketCopy
newTable[b] = newBucket
}
}
newBucket[bucketLength] = o
newBucketLengths[b]++
}
}
}
func (as *Array2DHashSet) Len() int {
return as.n
}
func (as *Array2DHashSet) Get(o interface{}) interface{} {
if o == nil {
return nil
}
b := as.getBuckets(o)
bucket := as.buckets[b]
if bucket == nil { // no bucket
return nil
}
for _, e := range bucket {
if e == nil {
return nil // empty slot; not there
}
if as.equalsFunction(e, o) {
return e
}
}
return nil
}
func (as *Array2DHashSet) innerAdd(o interface{}) interface{} {
b := as.getBuckets(o)
bucket := as.buckets[b]
// new bucket
if bucket == nil {
bucket = as.createBucket(as.initialBucketCapacity)
bucket[0] = o
as.buckets[b] = bucket
as.n++
return o
}
// look for it in bucket
for i := 0; i < len(bucket); i++ {
existing := bucket[i]
if existing == nil { // empty slot; not there, add.
bucket[i] = o
as.n++
return o
}
if as.equalsFunction(existing, o) { // found existing, quit
return existing
}
}
// full bucket, expand and add to end
oldLength := len(bucket)
bucketCopy := make([]interface{}, oldLength<<1)
copy(bucketCopy[:oldLength], bucket)
bucket = bucketCopy
as.buckets[b] = bucket
bucket[oldLength] = o
as.n++
return o
}
func (as *Array2DHashSet) getBuckets(value interface{}) int {
hash := as.hashcodeFunction(value)
return hash & (len(as.buckets) - 1)
}
func (as *Array2DHashSet) createBuckets(cap int) [][]interface{} {
return make([][]interface{}, cap)
}
func (as *Array2DHashSet) createBucket(cap int) []interface{} {
return make([]interface{}, cap)
}
func NewArray2DHashSetWithCap(
hashcodeFunction func(interface{}) int,
equalsFunction func(interface{}, interface{}) bool,
initCap int,
initBucketCap int,
) *Array2DHashSet {
if hashcodeFunction == nil {
hashcodeFunction = standardHashFunction
}
if equalsFunction == nil {
equalsFunction = standardEqualsFunction
}
ret := &Array2DHashSet{
hashcodeFunction: hashcodeFunction,
equalsFunction: equalsFunction,
n: 0,
threshold: int(math.Floor(_initalCapacity * _loadFactor)),
currentPrime: 1,
initialBucketCapacity: initBucketCap,
}
ret.buckets = ret.createBuckets(initCap)
return ret
}
func NewArray2DHashSet(
hashcodeFunction func(interface{}) int,
equalsFunction func(interface{}, interface{}) bool,
) *Array2DHashSet {
return NewArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
}

View File

@ -0,0 +1,79 @@
lexer grammar CESQLLexer;
// NOTE:
// This grammar is case-sensitive, although CESQL keywords are case-insensitive.
// In order to implement case-insensitivity, check out
// https://github.com/antlr/antlr4/blob/master/doc/case-insensitive-lexing.md#custom-character-streams-approach
// Skip tab, carriage return and newlines
SPACE: [ \t\r\n]+ -> skip;
// Fragments for Literal primitives
fragment ID_LITERAL: [a-zA-Z0-9]+;
fragment DQUOTA_STRING: '"' ( '\\'. | '""' | ~('"'| '\\') )* '"';
fragment SQUOTA_STRING: '\'' ('\\'. | '\'\'' | ~('\'' | '\\'))* '\'';
fragment INT_DIGIT: [0-9];
fragment FN_LITERAL: [A-Z] [A-Z_]*;
// Constructors symbols
LR_BRACKET: '(';
RR_BRACKET: ')';
COMMA: ',';
SINGLE_QUOTE_SYMB: '\'';
DOUBLE_QUOTE_SYMB: '"';
fragment QUOTE_SYMB
: SINGLE_QUOTE_SYMB | DOUBLE_QUOTE_SYMB
;
// Operators
// - Logic
AND: 'AND';
OR: 'OR';
XOR: 'XOR';
NOT: 'NOT';
// - Arithmetics
STAR: '*';
DIVIDE: '/';
MODULE: '%';
PLUS: '+';
MINUS: '-';
// - Comparison
EQUAL: '=';
NOT_EQUAL: '!=';
GREATER: '>';
GREATER_OR_EQUAL: '>=';
LESS: '<';
LESS_GREATER: '<>';
LESS_OR_EQUAL: '<=';
// Like, exists, in
LIKE: 'LIKE';
EXISTS: 'EXISTS';
IN: 'IN';
// Booleans
TRUE: 'TRUE';
FALSE: 'FALSE';
// Literals
DQUOTED_STRING_LITERAL: DQUOTA_STRING;
SQUOTED_STRING_LITERAL: SQUOTA_STRING;
INTEGER_LITERAL: INT_DIGIT+;
// Identifiers
IDENTIFIER: [a-zA-Z]+;
IDENTIFIER_WITH_NUMBER: [a-zA-Z0-9]+;
FUNCTION_IDENTIFIER_WITH_UNDERSCORE: [A-Z] [A-Z_]*;

View File

@ -0,0 +1,62 @@
grammar CESQLParser;
import CESQLLexer;
// Entrypoint
cesql: expression EOF;
// Structure of operations, function invocations and expression
expression
: functionIdentifier functionParameterList #functionInvocationExpression
// unary operators are the highest priority
| NOT expression #unaryLogicExpression
| MINUS expression # unaryNumericExpression
// LIKE, EXISTS and IN takes precedence over all the other binary operators
| expression NOT? LIKE stringLiteral #likeExpression
| EXISTS identifier #existsExpression
| expression NOT? IN setExpression #inExpression
// Numeric operations
| expression (STAR | DIVIDE | MODULE) expression #binaryMultiplicativeExpression
| expression (PLUS | MINUS) expression #binaryAdditiveExpression
// Comparison operations
| expression (EQUAL | NOT_EQUAL | LESS_GREATER | GREATER_OR_EQUAL | LESS_OR_EQUAL | LESS | GREATER) expression #binaryComparisonExpression
// Logic operations
|<assoc=right> expression (AND | OR | XOR) expression #binaryLogicExpression
// Subexpressions and atoms
| LR_BRACKET expression RR_BRACKET #subExpression
| atom #atomExpression
;
atom
: booleanLiteral #booleanAtom
| integerLiteral #integerAtom
| stringLiteral #stringAtom
| identifier #identifierAtom
;
// Identifiers
identifier
: (IDENTIFIER | IDENTIFIER_WITH_NUMBER)
;
functionIdentifier
: (IDENTIFIER | FUNCTION_IDENTIFIER_WITH_UNDERSCORE)
;
// Literals
booleanLiteral: (TRUE | FALSE);
stringLiteral: (DQUOTED_STRING_LITERAL | SQUOTED_STRING_LITERAL);
integerLiteral: INTEGER_LITERAL;
// Functions
functionParameterList
: LR_BRACKET ( expression ( COMMA expression )* )? RR_BRACKET
;
// Sets
setExpression
: LR_BRACKET expression ( COMMA expression )* RR_BRACKET // Empty sets are not allowed
;

201
vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

0
vendor/github.com/cloudevents/sdk-go/sql/v2/Makefile generated vendored Normal file
View File

32
vendor/github.com/cloudevents/sdk-go/sql/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,32 @@
# CloudEvents Expression Language Go implementation
CloudEvents Expression Language implementation.
Note: this package is a work in progress, APIs might break in future releases.
## User guide
To start using it:
```go
import cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
// Parse the expression
expression, err := cesqlparser.Parse("subject = 'Hello world'")
// Res can be either int32, bool or string
res, err := expression.Evaluate(event)
```
## Development guide
To regenerate the parser, make sure you have [ANTLR4 installed](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md) and then run:
```shell
antlr4 -Dlanguage=Go -package gen -o gen -visitor -no-listener CESQLParser.g4
```
Then you need to run this sed command as a workaround until this ANTLR [issue](https://github.com/antlr/antlr4/issues/2433) is resolved. Without this, building for 32bit platforms will throw an int overflow error:
```shell
sed -i 's/(1<</(int64(1)<</g' gen/cesqlparser_parser.go
```

View File

@ -0,0 +1,17 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package v2
import cloudevents "github.com/cloudevents/sdk-go/v2"
// Expression represents a parsed CloudEvents SQL Expression.
type Expression interface {
// Evaluate the expression using the provided input type.
// The return value can be either int32, bool or string.
// The evaluation fails as soon as an error arises.
Evaluate(event cloudevents.Event) (interface{}, error)
}

View File

@ -0,0 +1,17 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import cesql "github.com/cloudevents/sdk-go/sql/v2"
type baseUnaryExpression struct {
child cesql.Expression
}
type baseBinaryExpression struct {
left cesql.Expression
right cesql.Expression
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type equalExpression struct {
baseBinaryExpression
equal bool
}
func (s equalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
leftVal, err := s.left.Evaluate(event)
if err != nil {
return nil, err
}
rightVal, err := s.right.Evaluate(event)
if err != nil {
return nil, err
}
leftVal, err = utils.Cast(leftVal, cesql.TypeFromVal(rightVal))
if err != nil {
return nil, err
}
return (leftVal == rightVal) == s.equal, nil
}
func NewEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return equalExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
equal: true,
}
}
func NewNotEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return equalExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
equal: false,
}
}

View File

@ -0,0 +1,24 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type existsExpression struct {
identifier string
}
func (l existsExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
return utils.ContainsAttribute(event, l.identifier), nil
}
func NewExistsExpression(identifier string) cesql.Expression {
return existsExpression{identifier: identifier}
}

View File

@ -0,0 +1,57 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
"fmt"
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/runtime"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type functionInvocationExpression struct {
name string
argumentsExpression []cesql.Expression
}
func (expr functionInvocationExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
fn := runtime.ResolveFunction(expr.name, len(expr.argumentsExpression))
if fn == nil {
return nil, fmt.Errorf("cannot resolve function %s", expr.name)
}
args := make([]interface{}, len(expr.argumentsExpression))
for i, expr := range expr.argumentsExpression {
arg, err := expr.Evaluate(event)
if err != nil {
return nil, err
}
argType := fn.ArgType(i)
if argType == nil {
return nil, fmt.Errorf("cannot resolve arg type at index %d", i)
}
arg, err = utils.Cast(arg, *argType)
if err != nil {
return nil, err
}
args[i] = arg
}
return fn.Run(event, args)
}
func NewFunctionInvocationExpression(name string, argumentsExpression []cesql.Expression) cesql.Expression {
return functionInvocationExpression{
name: name,
argumentsExpression: argumentsExpression,
}
}

View File

@ -0,0 +1,31 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
"fmt"
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type identifierExpression struct {
identifier string
}
func (l identifierExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
value := utils.GetAttribute(event, l.identifier)
if value == nil {
return nil, fmt.Errorf("missing attribute '%s'", l.identifier)
}
return value, nil
}
func NewIdentifierExpression(identifier string) cesql.Expression {
return identifierExpression{identifier: identifier}
}

View File

@ -0,0 +1,46 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type inExpression struct {
leftExpression cesql.Expression
setExpression []cesql.Expression
}
func (l inExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
leftValue, err := l.leftExpression.Evaluate(event)
if err != nil {
return nil, err
}
for _, rightExpression := range l.setExpression {
rightValue, err := rightExpression.Evaluate(event)
if err != nil {
return nil, err
}
rightValue, err = utils.Cast(rightValue, cesql.TypeFromVal(leftValue))
if err != nil {
return nil, err
}
if leftValue == rightValue {
return true, nil
}
}
return false, nil
}
func NewInExpression(leftExpression cesql.Expression, setExpression []cesql.Expression) cesql.Expression {
return inExpression{leftExpression, setExpression}
}

View File

@ -0,0 +1,89 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type integerComparisonExpression struct {
baseBinaryExpression
fn func(x, y int32) bool
}
func (s integerComparisonExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
leftVal, err := s.left.Evaluate(event)
if err != nil {
return nil, err
}
rightVal, err := s.right.Evaluate(event)
if err != nil {
return nil, err
}
leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
if err != nil {
return nil, err
}
rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
if err != nil {
return nil, err
}
return s.fn(leftVal.(int32), rightVal.(int32)), nil
}
func NewLessExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return integerComparisonExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) bool {
return x < y
},
}
}
func NewLessOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return integerComparisonExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) bool {
return x <= y
},
}
}
func NewGreaterExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return integerComparisonExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) bool {
return x > y
},
}
}
func NewGreaterOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return integerComparisonExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) bool {
return x >= y
},
}
}

View File

@ -0,0 +1,96 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
"regexp"
"strings"
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type likeExpression struct {
baseUnaryExpression
pattern *regexp.Regexp
}
func (l likeExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
val, err := l.child.Evaluate(event)
if err != nil {
return nil, err
}
val, err = utils.Cast(val, cesql.StringType)
if err != nil {
return nil, err
}
return l.pattern.MatchString(val.(string)), nil
}
func NewLikeExpression(child cesql.Expression, pattern string) (cesql.Expression, error) {
// Converting to regex is not the most performant impl, but it works
p, err := convertLikePatternToRegex(pattern)
if err != nil {
return nil, err
}
return likeExpression{
baseUnaryExpression: baseUnaryExpression{
child: child,
},
pattern: p,
}, nil
}
func convertLikePatternToRegex(pattern string) (*regexp.Regexp, error) {
var chunks []string
chunks = append(chunks, "^")
var chunk strings.Builder
for i := 0; i < len(pattern); i++ {
if pattern[i] == '\\' && i < len(pattern)-1 {
if pattern[i+1] == '%' {
// \% case
chunk.WriteRune('%')
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
chunk.Reset()
i++
continue
} else if pattern[i+1] == '_' {
// \_ case
chunk.WriteRune('_')
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
chunk.Reset()
i++
continue
}
} else if pattern[i] == '_' {
// replace with .
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
chunk.Reset()
chunks = append(chunks, ".")
} else if pattern[i] == '%' {
// replace with .*
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
chunk.Reset()
chunks = append(chunks, ".*")
} else {
chunk.WriteByte(pattern[i])
}
}
if chunk.Len() != 0 {
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
}
chunks = append(chunks, "$")
return regexp.Compile(strings.Join(chunks, ""))
}

View File

@ -0,0 +1,23 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type literalExpression struct {
value interface{}
}
func (l literalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
return l.value, nil
}
func NewLiteralExpression(value interface{}) cesql.Expression {
return literalExpression{value: value}
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type logicExpression struct {
baseBinaryExpression
fn func(x, y bool) bool
}
func (s logicExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
leftVal, err := s.left.Evaluate(event)
if err != nil {
return nil, err
}
rightVal, err := s.right.Evaluate(event)
if err != nil {
return nil, err
}
leftVal, err = utils.Cast(leftVal, cesql.BooleanType)
if err != nil {
return nil, err
}
rightVal, err = utils.Cast(rightVal, cesql.BooleanType)
if err != nil {
return nil, err
}
return s.fn(leftVal.(bool), rightVal.(bool)), nil
}
func NewAndExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return logicExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y bool) bool {
return x && y
},
}
}
func NewOrExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return logicExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y bool) bool {
return x || y
},
}
}
func NewXorExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return logicExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y bool) bool {
return x != y
},
}
}

View File

@ -0,0 +1,109 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
"errors"
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type mathExpression struct {
baseBinaryExpression
fn func(x, y int32) (int32, error)
}
func (s mathExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
leftVal, err := s.left.Evaluate(event)
if err != nil {
return nil, err
}
rightVal, err := s.right.Evaluate(event)
if err != nil {
return nil, err
}
leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
if err != nil {
return nil, err
}
rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
if err != nil {
return nil, err
}
return s.fn(leftVal.(int32), rightVal.(int32))
}
func NewSumExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return mathExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) (int32, error) {
return x + y, nil
},
}
}
func NewDifferenceExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return mathExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) (int32, error) {
return x - y, nil
},
}
}
func NewMultiplicationExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return mathExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) (int32, error) {
return x * y, nil
},
}
}
func NewModuleExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return mathExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) (int32, error) {
if y == 0 {
return 0, errors.New("math error: division by zero")
}
return x % y, nil
},
}
}
func NewDivisionExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
return mathExpression{
baseBinaryExpression: baseBinaryExpression{
left: left,
right: right,
},
fn: func(x, y int32) (int32, error) {
if y == 0 {
return 0, errors.New("math error: division by zero")
}
return x / y, nil
},
}
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type negateExpression baseUnaryExpression
func (l negateExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
val, err := l.child.Evaluate(event)
if err != nil {
return nil, err
}
val, err = utils.Cast(val, cesql.IntegerType)
if err != nil {
return nil, err
}
return -(val.(int32)), nil
}
func NewNegateExpression(child cesql.Expression) cesql.Expression {
return negateExpression{child: child}
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package expression
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type notExpression baseUnaryExpression
func (l notExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
val, err := l.child.Evaluate(event)
if err != nil {
return nil, err
}
val, err = utils.Cast(val, cesql.BooleanType)
if err != nil {
return nil, err
}
return !(val.(bool)), nil
}
func NewNotExpression(child cesql.Expression) cesql.Expression {
return notExpression{child: child}
}

View File

@ -0,0 +1,17 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package v2
import cloudevents "github.com/cloudevents/sdk-go/v2"
type Function interface {
Name() string
Arity() int
IsVariadic() bool
ArgType(index int) *Type
Run(event cloudevents.Event, arguments []interface{}) (interface{}, error)
}

View File

@ -0,0 +1,57 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package function
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/utils"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
var IntFunction function = function{
name: "INT",
fixedArgs: []cesql.Type{cesql.AnyType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return utils.Cast(i[0], cesql.IntegerType)
},
}
var BoolFunction function = function{
name: "BOOL",
fixedArgs: []cesql.Type{cesql.AnyType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return utils.Cast(i[0], cesql.BooleanType)
},
}
var StringFunction function = function{
name: "STRING",
fixedArgs: []cesql.Type{cesql.AnyType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return utils.Cast(i[0], cesql.StringType)
},
}
var IsIntFunction function = function{
name: "IS_INT",
fixedArgs: []cesql.Type{cesql.AnyType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return utils.CanCast(i[0], cesql.IntegerType), nil
},
}
var IsBoolFunction function = function{
name: "IS_BOOL",
fixedArgs: []cesql.Type{cesql.AnyType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return utils.CanCast(i[0], cesql.BooleanType), nil
},
}

View File

@ -0,0 +1,41 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package function
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type function struct {
name string
fixedArgs []cesql.Type
variadicArgs *cesql.Type
fn func(cloudevents.Event, []interface{}) (interface{}, error)
}
func (f function) Name() string {
return f.name
}
func (f function) Arity() int {
return len(f.fixedArgs)
}
func (f function) IsVariadic() bool {
return f.variadicArgs != nil
}
func (f function) ArgType(index int) *cesql.Type {
if index < len(f.fixedArgs) {
return &f.fixedArgs[index]
}
return f.variadicArgs
}
func (f function) Run(event cloudevents.Event, arguments []interface{}) (interface{}, error) {
return f.fn(event, arguments)
}

View File

@ -0,0 +1,24 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package function
import (
cesql "github.com/cloudevents/sdk-go/sql/v2"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
var AbsFunction function = function{
name: "ABS",
fixedArgs: []cesql.Type{cesql.IntegerType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
x := i[0].(int32)
if x < 0 {
return -x, nil
}
return x, nil
},
}

View File

@ -0,0 +1,177 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package function
import (
"fmt"
"strings"
cesql "github.com/cloudevents/sdk-go/sql/v2"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
var LengthFunction function = function{
name: "LENGTH",
fixedArgs: []cesql.Type{cesql.StringType},
variadicArgs: nil,
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return int32(len(i[0].(string))), nil
},
}
var ConcatFunction function = function{
name: "CONCAT",
variadicArgs: cesql.TypePtr(cesql.StringType),
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
var sb strings.Builder
for _, v := range i {
sb.WriteString(v.(string))
}
return sb.String(), nil
},
}
var ConcatWSFunction function = function{
name: "CONCAT_WS",
fixedArgs: []cesql.Type{cesql.StringType},
variadicArgs: cesql.TypePtr(cesql.StringType),
fn: func(event cloudevents.Event, args []interface{}) (interface{}, error) {
if len(args) == 1 {
return "", nil
}
separator := args[0].(string)
var sb strings.Builder
for i := 1; i < len(args)-1; i++ {
sb.WriteString(args[i].(string))
sb.WriteString(separator)
}
sb.WriteString(args[len(args)-1].(string))
return sb.String(), nil
},
}
var LowerFunction function = function{
name: "LOWER",
fixedArgs: []cesql.Type{cesql.StringType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return strings.ToLower(i[0].(string)), nil
},
}
var UpperFunction function = function{
name: "UPPER",
fixedArgs: []cesql.Type{cesql.StringType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return strings.ToUpper(i[0].(string)), nil
},
}
var TrimFunction function = function{
name: "TRIM",
fixedArgs: []cesql.Type{cesql.StringType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
return strings.TrimSpace(i[0].(string)), nil
},
}
var LeftFunction function = function{
name: "LEFT",
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
str := i[0].(string)
y := int(i[1].(int32))
if y > len(str) {
return str, nil
}
if y < 0 {
return nil, fmt.Errorf("LEFT y argument is < 0: %d", y)
}
return str[0:y], nil
},
}
var RightFunction function = function{
name: "RIGHT",
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
str := i[0].(string)
y := int(i[1].(int32))
if y > len(str) {
return str, nil
}
if y < 0 {
return nil, fmt.Errorf("RIGHT y argument is < 0: %d", y)
}
return str[len(str)-y:], nil
},
}
var SubstringFunction function = function{
name: "SUBSTRING",
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
str := i[0].(string)
pos := int(i[1].(int32))
if pos == 0 {
return "", nil
}
if pos < -len(str) || pos > len(str) {
return "", fmt.Errorf("SUBSTRING invalid pos argument: %d", pos)
}
var beginning int
if pos < 0 {
beginning = len(str) + pos
} else {
beginning = pos - 1
}
return str[beginning:], nil
},
}
var SubstringWithLengthFunction function = function{
name: "SUBSTRING",
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType, cesql.IntegerType},
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
str := i[0].(string)
pos := int(i[1].(int32))
length := int(i[2].(int32))
if pos == 0 {
return "", nil
}
if pos < -len(str) || pos > len(str) {
return "", fmt.Errorf("SUBSTRING invalid pos argument: %d", pos)
}
var beginning int
if pos < 0 {
beginning = len(str) + pos
} else {
beginning = pos - 1
}
var end int
if beginning+length > len(str) {
end = len(str)
} else {
end = beginning + length
}
return str[beginning:end], nil
},
}

View File

@ -0,0 +1,87 @@
token literal names:
null
null
'('
')'
','
'\''
'"'
'AND'
'OR'
'XOR'
'NOT'
'*'
'/'
'%'
'+'
'-'
'='
'!='
'>'
'>='
'<'
'<>'
'<='
'LIKE'
'EXISTS'
'IN'
'TRUE'
'FALSE'
null
null
null
null
null
null
token symbolic names:
null
SPACE
LR_BRACKET
RR_BRACKET
COMMA
SINGLE_QUOTE_SYMB
DOUBLE_QUOTE_SYMB
AND
OR
XOR
NOT
STAR
DIVIDE
MODULE
PLUS
MINUS
EQUAL
NOT_EQUAL
GREATER
GREATER_OR_EQUAL
LESS
LESS_GREATER
LESS_OR_EQUAL
LIKE
EXISTS
IN
TRUE
FALSE
DQUOTED_STRING_LITERAL
SQUOTED_STRING_LITERAL
INTEGER_LITERAL
IDENTIFIER
IDENTIFIER_WITH_NUMBER
FUNCTION_IDENTIFIER_WITH_UNDERSCORE
rule names:
cesql
expression
atom
identifier
functionIdentifier
booleanLiteral
stringLiteral
integerLiteral
functionParameterList
setExpression
atn:
[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 35, 112, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 41, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 57, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 63, 10, 3, 3, 3, 3, 3, 7, 3, 67, 10, 3, 12, 3, 14, 3, 70, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 76, 10, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 7, 10, 92, 10, 10, 12, 10, 14, 10, 95, 11, 10, 5, 10, 97, 10, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 7, 11, 105, 10, 11, 12, 11, 14, 11, 108, 11, 11, 3, 11, 3, 11, 3, 11, 2, 3, 4, 12, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 2, 10, 3, 2, 13, 15, 3, 2, 16, 17, 3, 2, 18, 24, 3, 2, 9, 11, 3, 2, 33, 34, 4, 2, 33, 33, 35, 35, 3, 2, 28, 29, 3, 2, 30, 31, 2, 120, 2, 22, 3, 2, 2, 2, 4, 40, 3, 2, 2, 2, 6, 75, 3, 2, 2, 2, 8, 77, 3, 2, 2, 2, 10, 79, 3, 2, 2, 2, 12, 81, 3, 2, 2, 2, 14, 83, 3, 2, 2, 2, 16, 85, 3, 2, 2, 2, 18, 87, 3, 2, 2, 2, 20, 100, 3, 2, 2, 2, 22, 23, 5, 4, 3, 2, 23, 24, 7, 2, 2, 3, 24, 3, 3, 2, 2, 2, 25, 26, 8, 3, 1, 2, 26, 27, 5, 10, 6, 2, 27, 28, 5, 18, 10, 2, 28, 41, 3, 2, 2, 2, 29, 30, 7, 12, 2, 2, 30, 41, 5, 4, 3, 13, 31, 32, 7, 17, 2, 2, 32, 41, 5, 4, 3, 12, 33, 34, 7, 26, 2, 2, 34, 41, 5, 8, 5, 2, 35, 36, 7, 4, 2, 2, 36, 37, 5, 4, 3, 2, 37, 38, 7, 5, 2, 2, 38, 41, 3, 2, 2, 2, 39, 41, 5, 6, 4, 2, 40, 25, 3, 2, 2, 2, 40, 29, 3, 2, 2, 2, 40, 31, 3, 2, 2, 2, 40, 33, 3, 2, 2, 2, 40, 35, 3, 2, 2, 2, 40, 39, 3, 2, 2, 2, 41, 68, 3, 2, 2, 2, 42, 43, 12, 8, 2, 2, 43, 44, 9, 2, 2, 2, 44, 67, 5, 4, 3, 9, 45, 46, 12, 7, 2, 2, 46, 47, 9, 3, 2, 2, 47, 67, 5, 4, 3, 8, 48, 49, 12, 6, 2, 2, 49, 50, 9, 4, 2, 2, 50, 67, 5, 4, 3, 7, 51, 52, 12, 5, 2, 2, 52, 53, 9, 5, 2, 2, 53, 67, 5, 4, 3, 5, 54, 56, 12, 11, 2, 2, 55, 57, 7, 12, 2, 2, 56, 55, 3, 2, 2, 2, 56, 57, 3, 2, 2, 2, 57, 58, 3, 2, 2, 2, 58, 59, 7, 25, 2, 2, 59, 67, 5, 14, 8, 2, 60, 62, 12, 9, 2, 2, 61, 63, 7, 12, 2, 2, 62, 61, 3, 2, 2, 2, 62, 63, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 65, 7, 27, 2, 2, 65, 67, 5, 20, 11, 2, 66, 42, 3, 2, 2, 2, 66, 45, 3, 2, 2, 2, 66, 48, 3, 2, 2, 2, 66, 51, 3, 2, 2, 2, 66, 54, 3, 2, 2, 2, 66, 60, 3, 2, 2, 2, 67, 70, 3, 2, 2, 2, 68, 66, 3, 2, 2, 2, 68, 69, 3, 2, 2, 2, 69, 5, 3, 2, 2, 2, 70, 68, 3, 2, 2, 2, 71, 76, 5, 12, 7, 2, 72, 76, 5, 16, 9, 2, 73, 76, 5, 14, 8, 2, 74, 76, 5, 8, 5, 2, 75, 71, 3, 2, 2, 2, 75, 72, 3, 2, 2, 2, 75, 73, 3, 2, 2, 2, 75, 74, 3, 2, 2, 2, 76, 7, 3, 2, 2, 2, 77, 78, 9, 6, 2, 2, 78, 9, 3, 2, 2, 2, 79, 80, 9, 7, 2, 2, 80, 11, 3, 2, 2, 2, 81, 82, 9, 8, 2, 2, 82, 13, 3, 2, 2, 2, 83, 84, 9, 9, 2, 2, 84, 15, 3, 2, 2, 2, 85, 86, 7, 32, 2, 2, 86, 17, 3, 2, 2, 2, 87, 96, 7, 4, 2, 2, 88, 93, 5, 4, 3, 2, 89, 90, 7, 6, 2, 2, 90, 92, 5, 4, 3, 2, 91, 89, 3, 2, 2, 2, 92, 95, 3, 2, 2, 2, 93, 91, 3, 2, 2, 2, 93, 94, 3, 2, 2, 2, 94, 97, 3, 2, 2, 2, 95, 93, 3, 2, 2, 2, 96, 88, 3, 2, 2, 2, 96, 97, 3, 2, 2, 2, 97, 98, 3, 2, 2, 2, 98, 99, 7, 5, 2, 2, 99, 19, 3, 2, 2, 2, 100, 101, 7, 4, 2, 2, 101, 106, 5, 4, 3, 2, 102, 103, 7, 6, 2, 2, 103, 105, 5, 4, 3, 2, 104, 102, 3, 2, 2, 2, 105, 108, 3, 2, 2, 2, 106, 104, 3, 2, 2, 2, 106, 107, 3, 2, 2, 2, 107, 109, 3, 2, 2, 2, 108, 106, 3, 2, 2, 2, 109, 110, 7, 5, 2, 2, 110, 21, 3, 2, 2, 2, 11, 40, 56, 62, 66, 68, 75, 93, 96, 106]

View File

@ -0,0 +1,59 @@
SPACE=1
LR_BRACKET=2
RR_BRACKET=3
COMMA=4
SINGLE_QUOTE_SYMB=5
DOUBLE_QUOTE_SYMB=6
AND=7
OR=8
XOR=9
NOT=10
STAR=11
DIVIDE=12
MODULE=13
PLUS=14
MINUS=15
EQUAL=16
NOT_EQUAL=17
GREATER=18
GREATER_OR_EQUAL=19
LESS=20
LESS_GREATER=21
LESS_OR_EQUAL=22
LIKE=23
EXISTS=24
IN=25
TRUE=26
FALSE=27
DQUOTED_STRING_LITERAL=28
SQUOTED_STRING_LITERAL=29
INTEGER_LITERAL=30
IDENTIFIER=31
IDENTIFIER_WITH_NUMBER=32
FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
'('=2
')'=3
','=4
'\''=5
'"'=6
'AND'=7
'OR'=8
'XOR'=9
'NOT'=10
'*'=11
'/'=12
'%'=13
'+'=14
'-'=15
'='=16
'!='=17
'>'=18
'>='=19
'<'=20
'<>'=21
'<='=22
'LIKE'=23
'EXISTS'=24
'IN'=25
'TRUE'=26
'FALSE'=27

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,59 @@
SPACE=1
LR_BRACKET=2
RR_BRACKET=3
COMMA=4
SINGLE_QUOTE_SYMB=5
DOUBLE_QUOTE_SYMB=6
AND=7
OR=8
XOR=9
NOT=10
STAR=11
DIVIDE=12
MODULE=13
PLUS=14
MINUS=15
EQUAL=16
NOT_EQUAL=17
GREATER=18
GREATER_OR_EQUAL=19
LESS=20
LESS_GREATER=21
LESS_OR_EQUAL=22
LIKE=23
EXISTS=24
IN=25
TRUE=26
FALSE=27
DQUOTED_STRING_LITERAL=28
SQUOTED_STRING_LITERAL=29
INTEGER_LITERAL=30
IDENTIFIER=31
IDENTIFIER_WITH_NUMBER=32
FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
'('=2
')'=3
','=4
'\''=5
'"'=6
'AND'=7
'OR'=8
'XOR'=9
'NOT'=10
'*'=11
'/'=12
'%'=13
'+'=14
'-'=15
'='=16
'!='=17
'>'=18
'>='=19
'<'=20
'<>'=21
'<='=22
'LIKE'=23
'EXISTS'=24
'IN'=25
'TRUE'=26
'FALSE'=27

View File

@ -0,0 +1,109 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
package gen // CESQLParser
import "github.com/antlr/antlr4/runtime/Go/antlr"
type BaseCESQLParserVisitor struct {
*antlr.BaseParseTreeVisitor
}
func (v *BaseCESQLParserVisitor) VisitCesql(ctx *CesqlContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitInExpression(ctx *InExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitBinaryComparisonExpression(ctx *BinaryComparisonExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitAtomExpression(ctx *AtomExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitExistsExpression(ctx *ExistsExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitBinaryLogicExpression(ctx *BinaryLogicExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitLikeExpression(ctx *LikeExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitFunctionInvocationExpression(ctx *FunctionInvocationExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitBinaryMultiplicativeExpression(ctx *BinaryMultiplicativeExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitUnaryLogicExpression(ctx *UnaryLogicExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitUnaryNumericExpression(ctx *UnaryNumericExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitSubExpression(ctx *SubExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitBinaryAdditiveExpression(ctx *BinaryAdditiveExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitBooleanAtom(ctx *BooleanAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitIntegerAtom(ctx *IntegerAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitStringAtom(ctx *StringAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitIdentifierAtom(ctx *IdentifierAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitIdentifier(ctx *IdentifierContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitFunctionIdentifier(ctx *FunctionIdentifierContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitStringLiteral(ctx *StringLiteralContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitIntegerLiteral(ctx *IntegerLiteralContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitFunctionParameterList(ctx *FunctionParameterListContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *BaseCESQLParserVisitor) VisitSetExpression(ctx *SetExpressionContext) interface{} {
return v.VisitChildren(ctx)
}

View File

@ -0,0 +1,231 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
package gen
import (
"fmt"
"unicode"
"github.com/antlr/antlr4/runtime/Go/antlr"
)
// Suppress unused import error
var _ = fmt.Printf
var _ = unicode.IsLetter
var serializedLexerAtn = []uint16{
3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 237,
8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7,
9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12,
4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4,
18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23,
9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9,
28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33,
4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4,
39, 9, 39, 4, 40, 9, 40, 3, 2, 6, 2, 83, 10, 2, 13, 2, 14, 2, 84, 3, 2,
3, 2, 3, 3, 6, 3, 90, 10, 3, 13, 3, 14, 3, 91, 3, 4, 3, 4, 3, 4, 3, 4,
3, 4, 3, 4, 7, 4, 100, 10, 4, 12, 4, 14, 4, 103, 11, 4, 3, 4, 3, 4, 3,
5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 113, 10, 5, 12, 5, 14, 5, 116, 11,
5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 7, 7, 124, 10, 7, 12, 7, 14, 7,
127, 11, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12,
3, 12, 3, 13, 3, 13, 5, 13, 141, 10, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3,
15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17,
3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3,
23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 27,
3, 27, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3,
30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32,
3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3,
34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 37, 6, 37, 217, 10, 37, 13, 37,
14, 37, 218, 3, 38, 6, 38, 222, 10, 38, 13, 38, 14, 38, 223, 3, 39, 6,
39, 227, 10, 39, 13, 39, 14, 39, 228, 3, 40, 3, 40, 7, 40, 233, 10, 40,
12, 40, 14, 40, 236, 11, 40, 2, 2, 41, 3, 3, 5, 2, 7, 2, 9, 2, 11, 2, 13,
2, 15, 4, 17, 5, 19, 6, 21, 7, 23, 8, 25, 2, 27, 9, 29, 10, 31, 11, 33,
12, 35, 13, 37, 14, 39, 15, 41, 16, 43, 17, 45, 18, 47, 19, 49, 20, 51,
21, 53, 22, 55, 23, 57, 24, 59, 25, 61, 26, 63, 27, 65, 28, 67, 29, 69,
30, 71, 31, 73, 32, 75, 33, 77, 34, 79, 35, 3, 2, 10, 5, 2, 11, 12, 15,
15, 34, 34, 5, 2, 50, 59, 67, 92, 99, 124, 4, 2, 36, 36, 94, 94, 4, 2,
41, 41, 94, 94, 3, 2, 50, 59, 3, 2, 67, 92, 4, 2, 67, 92, 97, 97, 4, 2,
67, 92, 99, 124, 2, 244, 2, 3, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3,
2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 27,
3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2,
35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2,
2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2,
2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2,
2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3,
2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73,
3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 3,
82, 3, 2, 2, 2, 5, 89, 3, 2, 2, 2, 7, 93, 3, 2, 2, 2, 9, 106, 3, 2, 2,
2, 11, 119, 3, 2, 2, 2, 13, 121, 3, 2, 2, 2, 15, 128, 3, 2, 2, 2, 17, 130,
3, 2, 2, 2, 19, 132, 3, 2, 2, 2, 21, 134, 3, 2, 2, 2, 23, 136, 3, 2, 2,
2, 25, 140, 3, 2, 2, 2, 27, 142, 3, 2, 2, 2, 29, 146, 3, 2, 2, 2, 31, 149,
3, 2, 2, 2, 33, 153, 3, 2, 2, 2, 35, 157, 3, 2, 2, 2, 37, 159, 3, 2, 2,
2, 39, 161, 3, 2, 2, 2, 41, 163, 3, 2, 2, 2, 43, 165, 3, 2, 2, 2, 45, 167,
3, 2, 2, 2, 47, 169, 3, 2, 2, 2, 49, 172, 3, 2, 2, 2, 51, 174, 3, 2, 2,
2, 53, 177, 3, 2, 2, 2, 55, 179, 3, 2, 2, 2, 57, 182, 3, 2, 2, 2, 59, 185,
3, 2, 2, 2, 61, 190, 3, 2, 2, 2, 63, 197, 3, 2, 2, 2, 65, 200, 3, 2, 2,
2, 67, 205, 3, 2, 2, 2, 69, 211, 3, 2, 2, 2, 71, 213, 3, 2, 2, 2, 73, 216,
3, 2, 2, 2, 75, 221, 3, 2, 2, 2, 77, 226, 3, 2, 2, 2, 79, 230, 3, 2, 2,
2, 81, 83, 9, 2, 2, 2, 82, 81, 3, 2, 2, 2, 83, 84, 3, 2, 2, 2, 84, 82,
3, 2, 2, 2, 84, 85, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 87, 8, 2, 2, 2,
87, 4, 3, 2, 2, 2, 88, 90, 9, 3, 2, 2, 89, 88, 3, 2, 2, 2, 90, 91, 3, 2,
2, 2, 91, 89, 3, 2, 2, 2, 91, 92, 3, 2, 2, 2, 92, 6, 3, 2, 2, 2, 93, 101,
7, 36, 2, 2, 94, 95, 7, 94, 2, 2, 95, 100, 11, 2, 2, 2, 96, 97, 7, 36,
2, 2, 97, 100, 7, 36, 2, 2, 98, 100, 10, 4, 2, 2, 99, 94, 3, 2, 2, 2, 99,
96, 3, 2, 2, 2, 99, 98, 3, 2, 2, 2, 100, 103, 3, 2, 2, 2, 101, 99, 3, 2,
2, 2, 101, 102, 3, 2, 2, 2, 102, 104, 3, 2, 2, 2, 103, 101, 3, 2, 2, 2,
104, 105, 7, 36, 2, 2, 105, 8, 3, 2, 2, 2, 106, 114, 7, 41, 2, 2, 107,
108, 7, 94, 2, 2, 108, 113, 11, 2, 2, 2, 109, 110, 7, 41, 2, 2, 110, 113,
7, 41, 2, 2, 111, 113, 10, 5, 2, 2, 112, 107, 3, 2, 2, 2, 112, 109, 3,
2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 116, 3, 2, 2, 2, 114, 112, 3, 2, 2,
2, 114, 115, 3, 2, 2, 2, 115, 117, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117,
118, 7, 41, 2, 2, 118, 10, 3, 2, 2, 2, 119, 120, 9, 6, 2, 2, 120, 12, 3,
2, 2, 2, 121, 125, 9, 7, 2, 2, 122, 124, 9, 8, 2, 2, 123, 122, 3, 2, 2,
2, 124, 127, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126,
14, 3, 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 129, 7, 42, 2, 2, 129, 16, 3,
2, 2, 2, 130, 131, 7, 43, 2, 2, 131, 18, 3, 2, 2, 2, 132, 133, 7, 46, 2,
2, 133, 20, 3, 2, 2, 2, 134, 135, 7, 41, 2, 2, 135, 22, 3, 2, 2, 2, 136,
137, 7, 36, 2, 2, 137, 24, 3, 2, 2, 2, 138, 141, 5, 21, 11, 2, 139, 141,
5, 23, 12, 2, 140, 138, 3, 2, 2, 2, 140, 139, 3, 2, 2, 2, 141, 26, 3, 2,
2, 2, 142, 143, 7, 67, 2, 2, 143, 144, 7, 80, 2, 2, 144, 145, 7, 70, 2,
2, 145, 28, 3, 2, 2, 2, 146, 147, 7, 81, 2, 2, 147, 148, 7, 84, 2, 2, 148,
30, 3, 2, 2, 2, 149, 150, 7, 90, 2, 2, 150, 151, 7, 81, 2, 2, 151, 152,
7, 84, 2, 2, 152, 32, 3, 2, 2, 2, 153, 154, 7, 80, 2, 2, 154, 155, 7, 81,
2, 2, 155, 156, 7, 86, 2, 2, 156, 34, 3, 2, 2, 2, 157, 158, 7, 44, 2, 2,
158, 36, 3, 2, 2, 2, 159, 160, 7, 49, 2, 2, 160, 38, 3, 2, 2, 2, 161, 162,
7, 39, 2, 2, 162, 40, 3, 2, 2, 2, 163, 164, 7, 45, 2, 2, 164, 42, 3, 2,
2, 2, 165, 166, 7, 47, 2, 2, 166, 44, 3, 2, 2, 2, 167, 168, 7, 63, 2, 2,
168, 46, 3, 2, 2, 2, 169, 170, 7, 35, 2, 2, 170, 171, 7, 63, 2, 2, 171,
48, 3, 2, 2, 2, 172, 173, 7, 64, 2, 2, 173, 50, 3, 2, 2, 2, 174, 175, 7,
64, 2, 2, 175, 176, 7, 63, 2, 2, 176, 52, 3, 2, 2, 2, 177, 178, 7, 62,
2, 2, 178, 54, 3, 2, 2, 2, 179, 180, 7, 62, 2, 2, 180, 181, 7, 64, 2, 2,
181, 56, 3, 2, 2, 2, 182, 183, 7, 62, 2, 2, 183, 184, 7, 63, 2, 2, 184,
58, 3, 2, 2, 2, 185, 186, 7, 78, 2, 2, 186, 187, 7, 75, 2, 2, 187, 188,
7, 77, 2, 2, 188, 189, 7, 71, 2, 2, 189, 60, 3, 2, 2, 2, 190, 191, 7, 71,
2, 2, 191, 192, 7, 90, 2, 2, 192, 193, 7, 75, 2, 2, 193, 194, 7, 85, 2,
2, 194, 195, 7, 86, 2, 2, 195, 196, 7, 85, 2, 2, 196, 62, 3, 2, 2, 2, 197,
198, 7, 75, 2, 2, 198, 199, 7, 80, 2, 2, 199, 64, 3, 2, 2, 2, 200, 201,
7, 86, 2, 2, 201, 202, 7, 84, 2, 2, 202, 203, 7, 87, 2, 2, 203, 204, 7,
71, 2, 2, 204, 66, 3, 2, 2, 2, 205, 206, 7, 72, 2, 2, 206, 207, 7, 67,
2, 2, 207, 208, 7, 78, 2, 2, 208, 209, 7, 85, 2, 2, 209, 210, 7, 71, 2,
2, 210, 68, 3, 2, 2, 2, 211, 212, 5, 7, 4, 2, 212, 70, 3, 2, 2, 2, 213,
214, 5, 9, 5, 2, 214, 72, 3, 2, 2, 2, 215, 217, 5, 11, 6, 2, 216, 215,
3, 2, 2, 2, 217, 218, 3, 2, 2, 2, 218, 216, 3, 2, 2, 2, 218, 219, 3, 2,
2, 2, 219, 74, 3, 2, 2, 2, 220, 222, 9, 9, 2, 2, 221, 220, 3, 2, 2, 2,
222, 223, 3, 2, 2, 2, 223, 221, 3, 2, 2, 2, 223, 224, 3, 2, 2, 2, 224,
76, 3, 2, 2, 2, 225, 227, 9, 3, 2, 2, 226, 225, 3, 2, 2, 2, 227, 228, 3,
2, 2, 2, 228, 226, 3, 2, 2, 2, 228, 229, 3, 2, 2, 2, 229, 78, 3, 2, 2,
2, 230, 234, 9, 7, 2, 2, 231, 233, 9, 8, 2, 2, 232, 231, 3, 2, 2, 2, 233,
236, 3, 2, 2, 2, 234, 232, 3, 2, 2, 2, 234, 235, 3, 2, 2, 2, 235, 80, 3,
2, 2, 2, 236, 234, 3, 2, 2, 2, 15, 2, 84, 91, 99, 101, 112, 114, 125, 140,
218, 223, 228, 234, 3, 8, 2, 2,
}
var lexerChannelNames = []string{
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
}
var lexerModeNames = []string{
"DEFAULT_MODE",
}
var lexerLiteralNames = []string{
"", "", "'('", "')'", "','", "'''", "'\"'", "'AND'", "'OR'", "'XOR'", "'NOT'",
"'*'", "'/'", "'%'", "'+'", "'-'", "'='", "'!='", "'>'", "'>='", "'<'",
"'<>'", "'<='", "'LIKE'", "'EXISTS'", "'IN'", "'TRUE'", "'FALSE'",
}
var lexerSymbolicNames = []string{
"", "SPACE", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
"DOUBLE_QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE",
"PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS",
"LESS_GREATER", "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE",
"DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL",
"IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
}
var lexerRuleNames = []string{
"SPACE", "ID_LITERAL", "DQUOTA_STRING", "SQUOTA_STRING", "INT_DIGIT", "FN_LITERAL",
"LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB", "DOUBLE_QUOTE_SYMB",
"QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE", "PLUS",
"MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS", "LESS_GREATER",
"LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE", "DQUOTED_STRING_LITERAL",
"SQUOTED_STRING_LITERAL", "INTEGER_LITERAL", "IDENTIFIER", "IDENTIFIER_WITH_NUMBER",
"FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
}
type CESQLParserLexer struct {
*antlr.BaseLexer
channelNames []string
modeNames []string
// TODO: EOF string
}
// NewCESQLParserLexer produces a new lexer instance for the optional input antlr.CharStream.
//
// The *CESQLParserLexer instance produced may be reused by calling the SetInputStream method.
// The initial lexer configuration is expensive to construct, and the object is not thread-safe;
// however, if used within a Golang sync.Pool, the construction cost amortizes well and the
// objects can be used in a thread-safe manner.
func NewCESQLParserLexer(input antlr.CharStream) *CESQLParserLexer {
l := new(CESQLParserLexer)
lexerDeserializer := antlr.NewATNDeserializer(nil)
lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn)
lexerDecisionToDFA := make([]*antlr.DFA, len(lexerAtn.DecisionToState))
for index, ds := range lexerAtn.DecisionToState {
lexerDecisionToDFA[index] = antlr.NewDFA(ds, index)
}
l.BaseLexer = antlr.NewBaseLexer(input)
l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache())
l.channelNames = lexerChannelNames
l.modeNames = lexerModeNames
l.RuleNames = lexerRuleNames
l.LiteralNames = lexerLiteralNames
l.SymbolicNames = lexerSymbolicNames
l.GrammarFileName = "CESQLParser.g4"
// TODO: l.EOF = antlr.TokenEOF
return l
}
// CESQLParserLexer tokens.
const (
CESQLParserLexerSPACE = 1
CESQLParserLexerLR_BRACKET = 2
CESQLParserLexerRR_BRACKET = 3
CESQLParserLexerCOMMA = 4
CESQLParserLexerSINGLE_QUOTE_SYMB = 5
CESQLParserLexerDOUBLE_QUOTE_SYMB = 6
CESQLParserLexerAND = 7
CESQLParserLexerOR = 8
CESQLParserLexerXOR = 9
CESQLParserLexerNOT = 10
CESQLParserLexerSTAR = 11
CESQLParserLexerDIVIDE = 12
CESQLParserLexerMODULE = 13
CESQLParserLexerPLUS = 14
CESQLParserLexerMINUS = 15
CESQLParserLexerEQUAL = 16
CESQLParserLexerNOT_EQUAL = 17
CESQLParserLexerGREATER = 18
CESQLParserLexerGREATER_OR_EQUAL = 19
CESQLParserLexerLESS = 20
CESQLParserLexerLESS_GREATER = 21
CESQLParserLexerLESS_OR_EQUAL = 22
CESQLParserLexerLIKE = 23
CESQLParserLexerEXISTS = 24
CESQLParserLexerIN = 25
CESQLParserLexerTRUE = 26
CESQLParserLexerFALSE = 27
CESQLParserLexerDQUOTED_STRING_LITERAL = 28
CESQLParserLexerSQUOTED_STRING_LITERAL = 29
CESQLParserLexerINTEGER_LITERAL = 30
CESQLParserLexerIDENTIFIER = 31
CESQLParserLexerIDENTIFIER_WITH_NUMBER = 32
CESQLParserLexerFUNCTION_IDENTIFIER_WITH_UNDERSCORE = 33
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
package gen // CESQLParser
import "github.com/antlr/antlr4/runtime/Go/antlr"
// A complete Visitor for a parse tree produced by CESQLParserParser.
type CESQLParserVisitor interface {
antlr.ParseTreeVisitor
// Visit a parse tree produced by CESQLParserParser#cesql.
VisitCesql(ctx *CesqlContext) interface{}
// Visit a parse tree produced by CESQLParserParser#inExpression.
VisitInExpression(ctx *InExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#binaryComparisonExpression.
VisitBinaryComparisonExpression(ctx *BinaryComparisonExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#atomExpression.
VisitAtomExpression(ctx *AtomExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#existsExpression.
VisitExistsExpression(ctx *ExistsExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#binaryLogicExpression.
VisitBinaryLogicExpression(ctx *BinaryLogicExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#likeExpression.
VisitLikeExpression(ctx *LikeExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#functionInvocationExpression.
VisitFunctionInvocationExpression(ctx *FunctionInvocationExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#binaryMultiplicativeExpression.
VisitBinaryMultiplicativeExpression(ctx *BinaryMultiplicativeExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#unaryLogicExpression.
VisitUnaryLogicExpression(ctx *UnaryLogicExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#unaryNumericExpression.
VisitUnaryNumericExpression(ctx *UnaryNumericExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#subExpression.
VisitSubExpression(ctx *SubExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#binaryAdditiveExpression.
VisitBinaryAdditiveExpression(ctx *BinaryAdditiveExpressionContext) interface{}
// Visit a parse tree produced by CESQLParserParser#booleanAtom.
VisitBooleanAtom(ctx *BooleanAtomContext) interface{}
// Visit a parse tree produced by CESQLParserParser#integerAtom.
VisitIntegerAtom(ctx *IntegerAtomContext) interface{}
// Visit a parse tree produced by CESQLParserParser#stringAtom.
VisitStringAtom(ctx *StringAtomContext) interface{}
// Visit a parse tree produced by CESQLParserParser#identifierAtom.
VisitIdentifierAtom(ctx *IdentifierAtomContext) interface{}
// Visit a parse tree produced by CESQLParserParser#identifier.
VisitIdentifier(ctx *IdentifierContext) interface{}
// Visit a parse tree produced by CESQLParserParser#functionIdentifier.
VisitFunctionIdentifier(ctx *FunctionIdentifierContext) interface{}
// Visit a parse tree produced by CESQLParserParser#booleanLiteral.
VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{}
// Visit a parse tree produced by CESQLParserParser#stringLiteral.
VisitStringLiteral(ctx *StringLiteralContext) interface{}
// Visit a parse tree produced by CESQLParserParser#integerLiteral.
VisitIntegerLiteral(ctx *IntegerLiteralContext) interface{}
// Visit a parse tree produced by CESQLParserParser#functionParameterList.
VisitFunctionParameterList(ctx *FunctionParameterListContext) interface{}
// Visit a parse tree produced by CESQLParserParser#setExpression.
VisitSetExpression(ctx *SetExpressionContext) interface{}
}

10
vendor/github.com/cloudevents/sdk-go/sql/v2/go.mod generated vendored Normal file
View File

@ -0,0 +1,10 @@
module github.com/cloudevents/sdk-go/sql/v2
go 1.14
require (
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96
github.com/cloudevents/sdk-go/v2 v2.8.0
github.com/stretchr/testify v1.5.1
sigs.k8s.io/yaml v1.2.0
)

55
vendor/github.com/cloudevents/sdk-go/sql/v2/go.sum generated vendored Normal file
View File

@ -0,0 +1,55 @@
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96 h1:2P/dm3KbCLnRHQN/Ma50elhMx1Si9loEZe5hOrsuvuE=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/cloudevents/sdk-go/v2 v2.8.0 h1:kmRaLbsafZmidZ0rZ6h7WOMqCkRMcVTLV5lxV/HKQ9Y=
github.com/cloudevents/sdk-go/v2 v2.8.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -0,0 +1,44 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package parser
import (
"unicode"
"github.com/antlr/antlr4/runtime/Go/antlr"
)
// Took from https://github.com/antlr/antlr4/blob/master/doc/resources/case_changing_stream.go
// CaseChangingStream wraps an existing CharStream, but upper cases, or
// lower cases the input before it is tokenized.
type CaseChangingStream struct {
antlr.CharStream
upper bool
}
// NewCaseChangingStream returns a new CaseChangingStream that forces
// all tokens read from the underlying stream to be either upper case
// or lower case based on the upper argument.
func NewCaseChangingStream(in antlr.CharStream, upper bool) *CaseChangingStream {
return &CaseChangingStream{in, upper}
}
// LA gets the value of the symbol at offset from the current position
// from the underlying CharStream and converts it to either upper case
// or lower case.
func (is *CaseChangingStream) LA(offset int) int {
in := is.CharStream.LA(offset)
if in < 0 {
// Such as antlr.TokenEOF which is -1
return in
}
if is.upper {
return int(unicode.ToUpper(rune(in)))
}
return int(unicode.ToLower(rune(in)))
}

View File

@ -0,0 +1,343 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package parser
import (
"strconv"
"strings"
"github.com/antlr/antlr4/runtime/Go/antlr"
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/expression"
"github.com/cloudevents/sdk-go/sql/v2/gen"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
type expressionVisitor struct {
parsingErrors []error
}
var _ gen.CESQLParserVisitor = (*expressionVisitor)(nil)
func NewExpressionVisitor() gen.CESQLParserVisitor {
return &expressionVisitor{}
}
// antlr.ParseTreeVisitor implementation
func (v *expressionVisitor) Visit(tree antlr.ParseTree) interface{} {
// If you're wondering why I had to manually implement this stuff:
// https://github.com/antlr/antlr4/issues/2504
switch tree.(type) {
case *gen.CesqlContext:
return v.VisitCesql(tree.(*gen.CesqlContext))
case *gen.AtomExpressionContext:
return v.VisitAtomExpression(tree.(*gen.AtomExpressionContext))
case *gen.UnaryNumericExpressionContext:
return v.VisitUnaryNumericExpression(tree.(*gen.UnaryNumericExpressionContext))
case *gen.UnaryLogicExpressionContext:
return v.VisitUnaryLogicExpression(tree.(*gen.UnaryLogicExpressionContext))
case *gen.BooleanAtomContext:
return v.VisitBooleanAtom(tree.(*gen.BooleanAtomContext))
case *gen.BooleanLiteralContext:
return v.VisitBooleanLiteral(tree.(*gen.BooleanLiteralContext))
case *gen.IntegerAtomContext:
return v.VisitIntegerAtom(tree.(*gen.IntegerAtomContext))
case *gen.IntegerLiteralContext:
return v.VisitIntegerLiteral(tree.(*gen.IntegerLiteralContext))
case *gen.StringAtomContext:
return v.VisitStringAtom(tree.(*gen.StringAtomContext))
case *gen.StringLiteralContext:
return v.VisitStringLiteral(tree.(*gen.StringLiteralContext))
case *gen.ExistsExpressionContext:
return v.VisitExistsExpression(tree.(*gen.ExistsExpressionContext))
case *gen.InExpressionContext:
return v.VisitInExpression(tree.(*gen.InExpressionContext))
case *gen.IdentifierAtomContext:
return v.VisitIdentifierAtom(tree.(*gen.IdentifierAtomContext))
case *gen.IdentifierContext:
return v.VisitIdentifier(tree.(*gen.IdentifierContext))
case *gen.BinaryMultiplicativeExpressionContext:
return v.VisitBinaryMultiplicativeExpression(tree.(*gen.BinaryMultiplicativeExpressionContext))
case *gen.BinaryAdditiveExpressionContext:
return v.VisitBinaryAdditiveExpression(tree.(*gen.BinaryAdditiveExpressionContext))
case *gen.SubExpressionContext:
return v.VisitSubExpression(tree.(*gen.SubExpressionContext))
case *gen.BinaryLogicExpressionContext:
return v.VisitBinaryLogicExpression(tree.(*gen.BinaryLogicExpressionContext))
case *gen.BinaryComparisonExpressionContext:
return v.VisitBinaryComparisonExpression(tree.(*gen.BinaryComparisonExpressionContext))
case *gen.LikeExpressionContext:
return v.VisitLikeExpression(tree.(*gen.LikeExpressionContext))
case *gen.FunctionInvocationExpressionContext:
return v.VisitFunctionInvocationExpression(tree.(*gen.FunctionInvocationExpressionContext))
}
return nil
}
func (v *expressionVisitor) VisitChildren(node antlr.RuleNode) interface{} {
return v.Visit(node.GetChild(0).(antlr.ParseTree))
}
func (v *expressionVisitor) VisitTerminal(node antlr.TerminalNode) interface{} {
// We never visit terminal nodes
return nil
}
func (v *expressionVisitor) VisitErrorNode(node antlr.ErrorNode) interface{} {
// We already collect errors using the error listener
return nil
}
// gen.CESQLParserVisitor implementation
func (v *expressionVisitor) VisitInExpression(ctx *gen.InExpressionContext) interface{} {
leftExpression := v.Visit(ctx.Expression()).(cesql.Expression)
var setExpression []cesql.Expression
for _, expr := range ctx.SetExpression().(*gen.SetExpressionContext).AllExpression() {
setExpression = append(setExpression, v.Visit(expr).(cesql.Expression))
}
if ctx.NOT() != nil {
return expression.NewNotExpression(expression.NewInExpression(leftExpression, setExpression))
}
return expression.NewInExpression(leftExpression, setExpression)
}
func (v *expressionVisitor) VisitBinaryComparisonExpression(ctx *gen.BinaryComparisonExpressionContext) interface{} {
if ctx.LESS() != nil {
return expression.NewLessExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else if ctx.LESS_OR_EQUAL() != nil {
return expression.NewLessOrEqualExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else if ctx.GREATER() != nil {
return expression.NewGreaterExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else if ctx.GREATER_OR_EQUAL() != nil {
return expression.NewGreaterOrEqualExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else if ctx.EQUAL() != nil {
return expression.NewEqualExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else {
return expression.NewNotEqualExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
}
}
func (v *expressionVisitor) VisitExistsExpression(ctx *gen.ExistsExpressionContext) interface{} {
return expression.NewExistsExpression(strings.ToLower(ctx.Identifier().GetText()))
}
func (v *expressionVisitor) VisitBinaryLogicExpression(ctx *gen.BinaryLogicExpressionContext) interface{} {
if ctx.AND() != nil {
return expression.NewAndExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else if ctx.OR() != nil {
return expression.NewOrExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else {
return expression.NewXorExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
}
}
func (v *expressionVisitor) VisitLikeExpression(ctx *gen.LikeExpressionContext) interface{} {
patternContext := ctx.StringLiteral().(*gen.StringLiteralContext)
var pattern string
if patternContext.DQUOTED_STRING_LITERAL() != nil {
// Parse double quoted string
pattern = dQuotedStringToString(patternContext.DQUOTED_STRING_LITERAL().GetText())
} else {
// Parse single quoted string
pattern = sQuotedStringToString(patternContext.SQUOTED_STRING_LITERAL().GetText())
}
likeExpression, err := expression.NewLikeExpression(v.Visit(ctx.Expression()).(cesql.Expression), pattern)
if err != nil {
v.parsingErrors = append(v.parsingErrors, err)
return noopExpression{}
}
if ctx.NOT() != nil {
return expression.NewNotExpression(likeExpression)
}
return likeExpression
}
func (v *expressionVisitor) VisitFunctionInvocationExpression(ctx *gen.FunctionInvocationExpressionContext) interface{} {
paramsCtx := ctx.FunctionParameterList().(*gen.FunctionParameterListContext)
name := ctx.FunctionIdentifier().GetText()
var args []cesql.Expression
for _, expr := range paramsCtx.AllExpression() {
args = append(args, v.Visit(expr).(cesql.Expression))
}
return expression.NewFunctionInvocationExpression(strings.ToUpper(name), args)
}
func (v *expressionVisitor) VisitBinaryMultiplicativeExpression(ctx *gen.BinaryMultiplicativeExpressionContext) interface{} {
if ctx.STAR() != nil {
return expression.NewMultiplicationExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else if ctx.MODULE() != nil {
return expression.NewModuleExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else {
return expression.NewDivisionExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
}
}
func (v *expressionVisitor) VisitUnaryLogicExpression(ctx *gen.UnaryLogicExpressionContext) interface{} {
return expression.NewNotExpression(
v.Visit(ctx.Expression()).(cesql.Expression),
)
}
func (v *expressionVisitor) VisitUnaryNumericExpression(ctx *gen.UnaryNumericExpressionContext) interface{} {
return expression.NewNegateExpression(
v.Visit(ctx.Expression()).(cesql.Expression),
)
}
func (v *expressionVisitor) VisitSubExpression(ctx *gen.SubExpressionContext) interface{} {
return v.Visit(ctx.Expression())
}
func (v *expressionVisitor) VisitBinaryAdditiveExpression(ctx *gen.BinaryAdditiveExpressionContext) interface{} {
if ctx.PLUS() != nil {
return expression.NewSumExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
} else {
return expression.NewDifferenceExpression(
v.Visit(ctx.Expression(0)).(cesql.Expression),
v.Visit(ctx.Expression(1)).(cesql.Expression),
)
}
}
func (v *expressionVisitor) VisitIdentifier(ctx *gen.IdentifierContext) interface{} {
return expression.NewIdentifierExpression(strings.ToLower(ctx.GetText()))
}
func (v *expressionVisitor) VisitBooleanLiteral(ctx *gen.BooleanLiteralContext) interface{} {
return expression.NewLiteralExpression(ctx.TRUE() != nil)
}
func (v *expressionVisitor) VisitStringLiteral(ctx *gen.StringLiteralContext) interface{} {
var str string
if ctx.DQUOTED_STRING_LITERAL() != nil {
// Parse double quoted string
str = dQuotedStringToString(ctx.DQUOTED_STRING_LITERAL().GetText())
} else {
// Parse single quoted string
str = sQuotedStringToString(ctx.SQUOTED_STRING_LITERAL().GetText())
}
return expression.NewLiteralExpression(str)
}
func (v *expressionVisitor) VisitIntegerLiteral(ctx *gen.IntegerLiteralContext) interface{} {
val, err := strconv.Atoi(ctx.GetText())
if err != nil {
v.parsingErrors = append(v.parsingErrors, err)
}
return expression.NewLiteralExpression(int32(val))
}
// gen.CESQLParserVisitor implementation - noop methods
func (v *expressionVisitor) VisitCesql(ctx *gen.CesqlContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitAtomExpression(ctx *gen.AtomExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitBooleanAtom(ctx *gen.BooleanAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitIntegerAtom(ctx *gen.IntegerAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitStringAtom(ctx *gen.StringAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitIdentifierAtom(ctx *gen.IdentifierAtomContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitSetExpression(ctx *gen.SetExpressionContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitFunctionIdentifier(ctx *gen.FunctionIdentifierContext) interface{} {
return v.VisitChildren(ctx)
}
func (v *expressionVisitor) VisitFunctionParameterList(ctx *gen.FunctionParameterListContext) interface{} {
return v.VisitChildren(ctx)
}
// noop expression. This is necessary to continue to walk through the tree, even if there's a failure in the parsing
type noopExpression struct{}
func (n noopExpression) Evaluate(cloudevents.Event) (interface{}, error) {
return 0, nil
}
// Utilities
func dQuotedStringToString(str string) string {
str = str[1 : len(str)-1]
return strings.ReplaceAll(str, "\\\"", "\"")
}
func sQuotedStringToString(str string) string {
str = str[1 : len(str)-1]
return strings.ReplaceAll(str, "\\'", "'")
}

View File

@ -0,0 +1,74 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package parser
import (
"errors"
"fmt"
"strings"
"github.com/antlr/antlr4/runtime/Go/antlr"
"github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/gen"
)
type Parser struct {
// TODO parser options
}
func (p *Parser) Parse(input string) (v2.Expression, error) {
var is antlr.CharStream = antlr.NewInputStream(input)
is = NewCaseChangingStream(is, true)
// Create the JSON Lexer
lexer := gen.NewCESQLParserLexer(is)
var stream antlr.TokenStream = antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel)
// Create the JSON Parser
antlrParser := gen.NewCESQLParserParser(stream)
antlrParser.RemoveErrorListeners()
collectingErrorListener := errorListener{}
antlrParser.AddErrorListener(&collectingErrorListener)
// Finally walk the tree
visitor := expressionVisitor{}
result := antlrParser.Cesql().Accept(&visitor)
if result == nil {
return nil, mergeErrs(append(collectingErrorListener.errs, visitor.parsingErrors...))
}
return result.(v2.Expression), mergeErrs(append(collectingErrorListener.errs, visitor.parsingErrors...))
}
type errorListener struct {
antlr.DefaultErrorListener
errs []error
}
func (d *errorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
d.errs = append(d.errs, fmt.Errorf("syntax error: %v", e.GetMessage()))
}
func mergeErrs(errs []error) error {
if len(errs) == 0 {
return nil
}
var errStrings []string
for _, err := range errs {
errStrings = append(errStrings, err.Error())
}
return errors.New(strings.Join(errStrings, ","))
}
var defaultParser = Parser{}
func Parse(input string) (v2.Expression, error) {
return defaultParser.Parse(input)
}

View File

@ -0,0 +1,107 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package runtime
import (
"errors"
"strings"
cesql "github.com/cloudevents/sdk-go/sql/v2"
"github.com/cloudevents/sdk-go/sql/v2/function"
)
type functionItem struct {
fixedArgsFunctions map[int]cesql.Function
variadicFunction cesql.Function
}
type functionTable map[string]*functionItem
func (table functionTable) AddFunction(function cesql.Function) error {
item := table[function.Name()]
if item == nil {
item = &functionItem{
fixedArgsFunctions: make(map[int]cesql.Function),
}
table[function.Name()] = item
}
if function.IsVariadic() {
if item.variadicFunction != nil {
return errors.New("cannot add the variadic function, " +
"because there is already another variadic function defined with the same name")
}
maxArity := -1
for a := range item.fixedArgsFunctions {
if a > maxArity {
maxArity = a
}
}
if maxArity >= function.Arity() {
return errors.New("cannot add the variadic function, " +
"because there is already another function defined with the same name and same or greater arity")
}
item.variadicFunction = function
return nil
} else {
if _, ok := item.fixedArgsFunctions[function.Arity()]; ok {
return errors.New("cannot add the function, " +
"because there is already another function defined with the same arity and same name")
}
item.fixedArgsFunctions[function.Arity()] = function
return nil
}
}
func (table functionTable) ResolveFunction(name string, args int) cesql.Function {
item := table[strings.ToUpper(name)]
if item == nil {
return nil
}
if fn, ok := item.fixedArgsFunctions[args]; ok {
return fn
}
if item.variadicFunction == nil || item.variadicFunction.Arity() > args {
return nil
}
return item.variadicFunction
}
var globalFunctionTable = functionTable{}
func init() {
for _, fn := range []cesql.Function{
function.IntFunction,
function.BoolFunction,
function.StringFunction,
function.IsBoolFunction,
function.IsIntFunction,
function.AbsFunction,
function.LengthFunction,
function.ConcatFunction,
function.ConcatWSFunction,
function.LowerFunction,
function.UpperFunction,
function.TrimFunction,
function.LeftFunction,
function.RightFunction,
function.SubstringFunction,
function.SubstringWithLengthFunction,
} {
if err := globalFunctionTable.AddFunction(fn); err != nil {
panic(err)
}
}
}
func ResolveFunction(name string, args int) cesql.Function {
return globalFunctionTable.ResolveFunction(name, args)
}

47
vendor/github.com/cloudevents/sdk-go/sql/v2/types.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package v2
type Type uint8
const (
StringType Type = iota
IntegerType
BooleanType
AnyType
)
func TypePtr(t Type) *Type {
return &t
}
func (t Type) IsSameType(val interface{}) bool {
return TypeFromVal(val) == t
}
func (t Type) String() string {
switch t {
case IntegerType:
return "Integer"
case BooleanType:
return "Boolean"
case StringType:
return "String"
}
return "Any"
}
func TypeFromVal(val interface{}) Type {
switch val.(type) {
case string:
return StringType
case int32:
return IntegerType
case bool:
return BooleanType
}
return AnyType
}

View File

@ -0,0 +1,65 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package utils
import (
"fmt"
"strconv"
"strings"
cesql "github.com/cloudevents/sdk-go/sql/v2"
)
func Cast(val interface{}, target cesql.Type) (interface{}, error) {
if target.IsSameType(val) {
return val, nil
}
switch target {
case cesql.StringType:
switch val.(type) {
case int32:
return strconv.Itoa(int(val.(int32))), nil
case bool:
if val.(bool) {
return "true", nil
} else {
return "false", nil
}
}
// Casting to string is always defined
return fmt.Sprintf("%v", val), nil
case cesql.IntegerType:
switch val.(type) {
case string:
v, err := strconv.Atoi(val.(string))
if err != nil {
err = fmt.Errorf("cannot cast from String to Integer: %w", err)
}
return int32(v), err
}
return 0, fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target)
case cesql.BooleanType:
switch val.(type) {
case string:
lowerCase := strings.ToLower(val.(string))
if lowerCase == "true" {
return true, nil
} else if lowerCase == "false" {
return false, nil
}
return false, fmt.Errorf("cannot cast String to Boolean, actual value: %v", val)
}
return false, fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target)
}
// AnyType doesn't need casting
return val, nil
}
func CanCast(val interface{}, target cesql.Type) bool {
_, err := Cast(val, target)
return err == nil
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2021 The CloudEvents Authors
SPDX-License-Identifier: Apache-2.0
*/
package utils
import (
"fmt"
"time"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/binding/spec"
"github.com/cloudevents/sdk-go/v2/types"
)
func GetAttribute(event cloudevents.Event, attributeName string) interface{} {
var val interface{}
if a := spec.V1.Attribute(attributeName); a != nil { // Standard attribute
val = a.Get(event.Context)
} else {
val = event.Extensions()[attributeName]
}
if val == nil {
return nil
}
// Type cohercion
switch val.(type) {
case bool, int32, string:
return val
case int8:
return int32(val.(int8))
case uint8:
return int32(val.(uint8))
case int16:
return int32(val.(int16))
case uint16:
return int32(val.(uint16))
case uint32:
return int32(val.(uint32))
case int64:
return int32(val.(int64))
case uint64:
return int32(val.(uint64))
case time.Time:
return val.(time.Time).Format(time.RFC3339Nano)
case []byte:
return types.FormatBinary(val.([]byte))
}
return fmt.Sprintf("%v", val)
}
func ContainsAttribute(event cloudevents.Event, attributeName string) bool {
if attributeName == "specversion" || attributeName == "id" || attributeName == "source" || attributeName == "type" {
return true
}
if attr := spec.V1.Attribute(attributeName); attr != nil {
return attr.Get(event.Context) != nil
}
_, ok := event.Extensions()[attributeName]
return ok
}

View File

@ -1,19 +0,0 @@
language: go
go:
- 1.14
- 1.13
install:
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- go get github.com/jessevdk/go-flags
script:
- go get
- go test -cover ./...
- cd ./v5
- go get
- go test -cover ./...
notifications:
email: false

View File

@ -39,6 +39,25 @@ go get -u github.com/evanphx/json-patch/v5
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.
These global variables control the behavior of `jsonpatch.Apply`.
An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
Structure `jsonpatch.ApplyOptions` includes the configuration options above
and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
`remove` operations whose `path` points to a non-existent location in the JSON document.
`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
returning an error when hitting a missing `path` on `remove`.
When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
that `add` operations produce all the `path` elements that are missing from the target object.
Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
whose values are populated from the global configuration variables.
## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.

View File

@ -38,7 +38,10 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
cur, ok := (*doc)[k]
if !ok || cur == nil {
if !mergeMerge {
pruneNulls(v)
}
(*doc)[k] = v
} else {
(*doc)[k] = merge(cur, v, mergeMerge)
@ -79,8 +82,8 @@ func pruneAryNulls(ary *partialArray) *partialArray {
for _, v := range *ary {
if v != nil {
pruneNulls(v)
newAry = append(newAry, v)
}
newAry = append(newAry, v)
}
*ary = newAry
@ -88,8 +91,8 @@ func pruneAryNulls(ary *partialArray) *partialArray {
return ary
}
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
// MergeMergePatches merges two merge patches together, such that
@ -114,19 +117,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
patchErr := json.Unmarshal(patchData, patch)
if _, ok := docErr.(*json.SyntaxError); ok {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
if _, ok := patchErr.(*json.SyntaxError); ok {
return nil, errBadJSONPatch
return nil, ErrBadJSONPatch
}
if docErr == nil && *doc == nil {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
if patchErr == nil && *patch == nil {
return nil, errBadJSONPatch
return nil, ErrBadJSONPatch
}
if docErr != nil || patchErr != nil {
@ -142,7 +145,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
patchErr = json.Unmarshal(patchData, patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
return nil, ErrBadJSONPatch
}
pruneAryNulls(patchAry)
@ -150,7 +153,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
out, patchErr := json.Marshal(patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
return nil, ErrBadJSONPatch
}
return out, nil
@ -207,12 +210,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
err := json.Unmarshal(originalJSON, &originalDoc)
if err != nil {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
dest, err := getDiff(originalDoc, modifiedDoc)
@ -233,17 +236,17 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
err := json.Unmarshal(originalJSON, &originalDocs)
if err != nil {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
total := len(originalDocs)
if len(modifiedDocs) != total {
return nil, errBadJSONDoc
return nil, ErrBadJSONDoc
}
result := []json.RawMessage{}

View File

@ -721,6 +721,10 @@ func (p Patch) Apply(doc []byte) ([]byte, error) {
// ApplyIndent mutates a JSON document according to the patch, and returns the new
// document indented.
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
if len(doc) == 0 {
return doc, nil
}
var pd container
if doc[0] == '[' {
pd = &partialArray{}

View File

@ -796,7 +796,7 @@ func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
}
// DescendGreaterThan calls the iterator for every value in the tree within
// the range (pivot, last], until iterator returns false.
// the range [last, pivot), until iterator returns false.
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return

Some files were not shown because too many files have changed in this diff Show More