mirror of https://github.com/knative/client.git
upgrade to latest dependencies (#1555)
bumping knative.dev/eventing 4d705ed...d829745: > d829745 Temporary disable CE SQL validation (# 6012) > aab5298 upgrade to latest dependencies (# 6008) bumping knative.dev/networking 2d4af36...62388a5: > 62388a5 Update community files (# 595) Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
parent
2c9a236917
commit
a2bb4049d0
4
go.mod
4
go.mod
|
|
@ -22,9 +22,9 @@ require (
|
||||||
k8s.io/client-go v0.21.4
|
k8s.io/client-go v0.21.4
|
||||||
k8s.io/code-generator v0.21.4
|
k8s.io/code-generator v0.21.4
|
||||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 // indirect
|
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 // indirect
|
||||||
knative.dev/eventing v0.28.1-0.20211222075718-4d705ede0dfa
|
knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455
|
||||||
knative.dev/hack v0.0.0-20211222071919-abd085fc43de
|
knative.dev/hack v0.0.0-20211222071919-abd085fc43de
|
||||||
knative.dev/networking v0.0.0-20211222131718-2d4af360eb01
|
knative.dev/networking v0.0.0-20211223013028-62388a5f2853
|
||||||
knative.dev/pkg v0.0.0-20211216142117-79271798f696
|
knative.dev/pkg v0.0.0-20211216142117-79271798f696
|
||||||
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7
|
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7
|
||||||
sigs.k8s.io/yaml v1.3.0
|
sigs.k8s.io/yaml v1.3.0
|
||||||
|
|
|
||||||
16
go.sum
16
go.sum
|
|
@ -128,8 +128,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
||||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/antlr/antlr4 v0.0.0-20210105192202-5c2b686f95e1 h1:9K5yytxEEQc4yIn6c1rvQD6qQilQn9mYIF7pXKPT8i4=
|
|
||||||
github.com/antlr/antlr4 v0.0.0-20210105192202-5c2b686f95e1/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y=
|
|
||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
|
|
@ -185,8 +183,6 @@ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc=
|
github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc=
|
||||||
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.4.1/go.mod h1:lhEpxMrIUkeu9rVRgoAbyqZ8GR8Hd3DUy+thHUxAHoI=
|
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.4.1/go.mod h1:lhEpxMrIUkeu9rVRgoAbyqZ8GR8Hd3DUy+thHUxAHoI=
|
||||||
github.com/cloudevents/sdk-go/sql/v2 v2.7.0 h1:+z92bVkW/B9yCUbpNNiHstb5oyqOaGQH08sYcm0QRo8=
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2 v2.7.0/go.mod h1:zstvxyTe102YSI3TJ1Dm3NIglgZJmFqL5Dv8XxroBGw=
|
|
||||||
github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY=
|
github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY=
|
||||||
github.com/cloudevents/sdk-go/v2 v2.7.0 h1:Pt+cOKWNG0tZZKRzuvfVsxcWArO0eq/UPKUxskyuSb8=
|
github.com/cloudevents/sdk-go/v2 v2.7.0 h1:Pt+cOKWNG0tZZKRzuvfVsxcWArO0eq/UPKUxskyuSb8=
|
||||||
github.com/cloudevents/sdk-go/v2 v2.7.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
|
github.com/cloudevents/sdk-go/v2 v2.7.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
|
||||||
|
|
@ -1722,20 +1718,20 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
||||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
|
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
|
||||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
knative.dev/caching v0.0.0-20211206133228-c29dc56d8f03/go.mod h1:xki+LBTL1riXSoU2dKznqUfgOlQ2eO/F1WF+GMXxH0k=
|
knative.dev/caching v0.0.0-20211206133228-c29dc56d8f03/go.mod h1:xki+LBTL1riXSoU2dKznqUfgOlQ2eO/F1WF+GMXxH0k=
|
||||||
knative.dev/eventing v0.28.1-0.20211222075718-4d705ede0dfa h1:z4CYEFoz4xvW/RmVfxiLb5rc5kRD/4IIk7a4/7+LS84=
|
knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455 h1:LnatQYBFh/tum+ATTVZKB1xV5UxwvA2bhFZUGPSve6I=
|
||||||
knative.dev/eventing v0.28.1-0.20211222075718-4d705ede0dfa/go.mod h1:AuLc9dw9a1pJVxdN16lpW4aj+L7K7uz02ECWgHg++k0=
|
knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455/go.mod h1:4o3oerr1tmjWTV2n33Ar9Ss+jF/QksOsa4/81ghhOVg=
|
||||||
knative.dev/hack v0.0.0-20211122162614-813559cefdda/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
knative.dev/hack v0.0.0-20211122162614-813559cefdda/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||||
knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||||
knative.dev/hack v0.0.0-20211222071919-abd085fc43de h1:K7UeyvIfdTjznffAZg2L4fDkOuFWEDiaxgEa+B33nP8=
|
knative.dev/hack v0.0.0-20211222071919-abd085fc43de h1:K7UeyvIfdTjznffAZg2L4fDkOuFWEDiaxgEa+B33nP8=
|
||||||
knative.dev/hack v0.0.0-20211222071919-abd085fc43de/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
knative.dev/hack v0.0.0-20211222071919-abd085fc43de/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
|
||||||
knative.dev/hack/schema v0.0.0-20211203062838-e11ac125e707/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
|
knative.dev/hack/schema v0.0.0-20211222071919-abd085fc43de/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
|
||||||
knative.dev/networking v0.0.0-20211209101835-8ef631418fc0/go.mod h1:+ozCw7PVf//G9+HOW04hfWnU8UJE5fmWAQkb+ieMaXY=
|
knative.dev/networking v0.0.0-20211209101835-8ef631418fc0/go.mod h1:+ozCw7PVf//G9+HOW04hfWnU8UJE5fmWAQkb+ieMaXY=
|
||||||
knative.dev/networking v0.0.0-20211222131718-2d4af360eb01 h1:xCyO9JgZwBCDLDpme7V8pZD2E3BOkTn2dBdZxdNsJBc=
|
knative.dev/networking v0.0.0-20211223013028-62388a5f2853 h1:VZ/yJoR/eiyI/wyo1JNEgpRFyRaqGD8paBYOH1gU/nQ=
|
||||||
knative.dev/networking v0.0.0-20211222131718-2d4af360eb01/go.mod h1:NTfJpL2xQVdJtdPYuIE2j7rxC4/Cttplh1g0oYqQJFE=
|
knative.dev/networking v0.0.0-20211223013028-62388a5f2853/go.mod h1:NTfJpL2xQVdJtdPYuIE2j7rxC4/Cttplh1g0oYqQJFE=
|
||||||
knative.dev/pkg v0.0.0-20211206113427-18589ac7627e/go.mod h1:E6B4RTjZyxe55a0kxOlnEHEl71zuG7gghnqYvNBKwBw=
|
knative.dev/pkg v0.0.0-20211206113427-18589ac7627e/go.mod h1:E6B4RTjZyxe55a0kxOlnEHEl71zuG7gghnqYvNBKwBw=
|
||||||
knative.dev/pkg v0.0.0-20211216142117-79271798f696 h1:L/r5prSBhm+7x4br5g8Gij/OfF4nx12sorqMXCcnpm0=
|
knative.dev/pkg v0.0.0-20211216142117-79271798f696 h1:L/r5prSBhm+7x4br5g8Gij/OfF4nx12sorqMXCcnpm0=
|
||||||
knative.dev/pkg v0.0.0-20211216142117-79271798f696/go.mod h1:hrD91/shO1o4KMZa4oWhnbRPmVJhvq86TLy/STF/qf8=
|
knative.dev/pkg v0.0.0-20211216142117-79271798f696/go.mod h1:hrD91/shO1o4KMZa4oWhnbRPmVJhvq86TLy/STF/qf8=
|
||||||
knative.dev/reconciler-test v0.0.0-20211207070557-0d138a88867b/go.mod h1:dCq1Fuu+eUISdnxABMvoDhefF91DYwE6O3rTYTraXbw=
|
knative.dev/reconciler-test v0.0.0-20211222120418-816f2192fec9/go.mod h1:dCq1Fuu+eUISdnxABMvoDhefF91DYwE6O3rTYTraXbw=
|
||||||
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7 h1:vkv/sstZZtV9al/ZJ84l8TyWTLPGWZOpk7Ke9d6itBg=
|
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7 h1:vkv/sstZZtV9al/ZJ84l8TyWTLPGWZOpk7Ke9d6itBg=
|
||||||
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7/go.mod h1:1d8YYUu0hY19KlIRs2SgAn/o64Hr265+3fhOtV3FFVA=
|
knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7/go.mod h1:1d8YYUu0hY19KlIRs2SgAn/o64Hr265+3fhOtV3FFVA=
|
||||||
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
||||||
|
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
[The "BSD 3-clause license"]
|
|
||||||
Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions
|
|
||||||
are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in the
|
|
||||||
documentation and/or other materials provided with the distribution.
|
|
||||||
3. Neither the name of the copyright holder nor the names of its contributors
|
|
||||||
may be used to endorse or promote products derived from this software
|
|
||||||
without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
||||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
||||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
||||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
||||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
=====
|
|
||||||
|
|
||||||
MIT License for codepointat.js from https://git.io/codepointat
|
|
||||||
MIT License for fromcodepoint.js from https://git.io/vDW1m
|
|
||||||
|
|
||||||
Copyright Mathias Bynens <https://mathiasbynens.be/>
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
@ -1,201 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
[The "BSD 3-clause license"]
|
|
||||||
Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions
|
|
||||||
are met:
|
|
||||||
|
|
||||||
1. Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in the
|
|
||||||
documentation and/or other materials provided with the distribution.
|
|
||||||
3. Neither the name of the copyright holder nor the names of its contributors
|
|
||||||
may be used to endorse or promote products derived from this software
|
|
||||||
without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
||||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
||||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
||||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
||||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
=====
|
|
||||||
|
|
||||||
MIT License for codepointat.js from https://git.io/codepointat
|
|
||||||
MIT License for fromcodepoint.js from https://git.io/vDW1m
|
|
||||||
|
|
||||||
Copyright Mathias Bynens <https://mathiasbynens.be/>
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
@ -1,152 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
var ATNInvalidAltNumber int
|
|
||||||
|
|
||||||
type ATN struct {
|
|
||||||
// DecisionToState is the decision points for all rules, subrules, optional
|
|
||||||
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
|
|
||||||
DecisionToState []DecisionState
|
|
||||||
|
|
||||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
|
||||||
grammarType int
|
|
||||||
|
|
||||||
// lexerActions is referenced by action transitions in the ATN for lexer ATNs.
|
|
||||||
lexerActions []LexerAction
|
|
||||||
|
|
||||||
// maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
|
|
||||||
maxTokenType int
|
|
||||||
|
|
||||||
modeNameToStartState map[string]*TokensStartState
|
|
||||||
|
|
||||||
modeToStartState []*TokensStartState
|
|
||||||
|
|
||||||
// ruleToStartState maps from rule index to starting state number.
|
|
||||||
ruleToStartState []*RuleStartState
|
|
||||||
|
|
||||||
// ruleToStopState maps from rule index to stop state number.
|
|
||||||
ruleToStopState []*RuleStopState
|
|
||||||
|
|
||||||
// ruleToTokenType maps the rule index to the resulting token type for lexer
|
|
||||||
// ATNs. For parser ATNs, it maps the rule index to the generated bypass token
|
|
||||||
// type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
|
|
||||||
// specified, and otherwise is nil.
|
|
||||||
ruleToTokenType []int
|
|
||||||
|
|
||||||
states []ATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewATN(grammarType int, maxTokenType int) *ATN {
|
|
||||||
return &ATN{
|
|
||||||
grammarType: grammarType,
|
|
||||||
maxTokenType: maxTokenType,
|
|
||||||
modeNameToStartState: make(map[string]*TokensStartState),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextTokensInContext computes the set of valid tokens that can occur starting
|
|
||||||
// in state s. If ctx is nil, the set of tokens will not include what can follow
|
|
||||||
// the rule surrounding s. In other words, the set will be restricted to tokens
|
|
||||||
// reachable staying within the rule of s.
|
|
||||||
func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
|
|
||||||
return NewLL1Analyzer(a).Look(s, nil, ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextTokensNoContext computes the set of valid tokens that can occur starting
|
|
||||||
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
|
|
||||||
// rule.
|
|
||||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
|
||||||
if s.GetNextTokenWithinRule() != nil {
|
|
||||||
return s.GetNextTokenWithinRule()
|
|
||||||
}
|
|
||||||
|
|
||||||
s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
|
|
||||||
s.GetNextTokenWithinRule().readOnly = true
|
|
||||||
|
|
||||||
return s.GetNextTokenWithinRule()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
|
|
||||||
if ctx == nil {
|
|
||||||
return a.NextTokensNoContext(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.NextTokensInContext(s, ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATN) addState(state ATNState) {
|
|
||||||
if state != nil {
|
|
||||||
state.SetATN(a)
|
|
||||||
state.SetStateNumber(len(a.states))
|
|
||||||
}
|
|
||||||
|
|
||||||
a.states = append(a.states, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATN) removeState(state ATNState) {
|
|
||||||
a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATN) defineDecisionState(s DecisionState) int {
|
|
||||||
a.DecisionToState = append(a.DecisionToState, s)
|
|
||||||
s.setDecision(len(a.DecisionToState) - 1)
|
|
||||||
|
|
||||||
return s.getDecision()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATN) getDecisionState(decision int) DecisionState {
|
|
||||||
if len(a.DecisionToState) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.DecisionToState[decision]
|
|
||||||
}
|
|
||||||
|
|
||||||
// getExpectedTokens computes the set of input symbols which could follow ATN
|
|
||||||
// state number stateNumber in the specified full parse context ctx and returns
|
|
||||||
// the set of potentially valid input symbols which could follow the specified
|
|
||||||
// state in the specified context. This method considers the complete parser
|
|
||||||
// context, but does not evaluate semantic predicates (i.e. all predicates
|
|
||||||
// encountered during the calculation are assumed true). If a path in the ATN
|
|
||||||
// exists from the starting state to the RuleStopState of the outermost context
|
|
||||||
// without Matching any symbols, Token.EOF is added to the returned set.
|
|
||||||
//
|
|
||||||
// A nil ctx defaults to ParserRuleContext.EMPTY.
|
|
||||||
//
|
|
||||||
// It panics if the ATN does not contain state stateNumber.
|
|
||||||
func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
|
|
||||||
if stateNumber < 0 || stateNumber >= len(a.states) {
|
|
||||||
panic("Invalid state number.")
|
|
||||||
}
|
|
||||||
|
|
||||||
s := a.states[stateNumber]
|
|
||||||
following := a.NextTokens(s, nil)
|
|
||||||
|
|
||||||
if !following.contains(TokenEpsilon) {
|
|
||||||
return following
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := NewIntervalSet()
|
|
||||||
|
|
||||||
expected.addSet(following)
|
|
||||||
expected.removeOne(TokenEpsilon)
|
|
||||||
|
|
||||||
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
|
|
||||||
invokingState := a.states[ctx.GetInvokingState()]
|
|
||||||
rt := invokingState.GetTransitions()[0]
|
|
||||||
|
|
||||||
following = a.NextTokens(rt.(*RuleTransition).followState, nil)
|
|
||||||
expected.addSet(following)
|
|
||||||
expected.removeOne(TokenEpsilon)
|
|
||||||
ctx = ctx.GetParent().(RuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
if following.contains(TokenEpsilon) {
|
|
||||||
expected.addOne(TokenEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
return expected
|
|
||||||
}
|
|
||||||
|
|
@ -1,295 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type comparable interface {
|
|
||||||
equals(other interface{}) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
|
||||||
// context). The syntactic context is a graph-structured stack node whose
|
|
||||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
|
|
||||||
// state. The semantic context is the tree of semantic predicates encountered
|
|
||||||
// before reaching an ATN state.
|
|
||||||
type ATNConfig interface {
|
|
||||||
comparable
|
|
||||||
|
|
||||||
hash() int
|
|
||||||
|
|
||||||
GetState() ATNState
|
|
||||||
GetAlt() int
|
|
||||||
GetSemanticContext() SemanticContext
|
|
||||||
|
|
||||||
GetContext() PredictionContext
|
|
||||||
SetContext(PredictionContext)
|
|
||||||
|
|
||||||
GetReachesIntoOuterContext() int
|
|
||||||
SetReachesIntoOuterContext(int)
|
|
||||||
|
|
||||||
String() string
|
|
||||||
|
|
||||||
getPrecedenceFilterSuppressed() bool
|
|
||||||
setPrecedenceFilterSuppressed(bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseATNConfig struct {
|
|
||||||
precedenceFilterSuppressed bool
|
|
||||||
state ATNState
|
|
||||||
alt int
|
|
||||||
context PredictionContext
|
|
||||||
semanticContext SemanticContext
|
|
||||||
reachesIntoOuterContext int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
|
|
||||||
return &BaseATNConfig{
|
|
||||||
state: old.state,
|
|
||||||
alt: old.alt,
|
|
||||||
context: old.context,
|
|
||||||
semanticContext: old.semanticContext,
|
|
||||||
reachesIntoOuterContext: old.reachesIntoOuterContext,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
|
|
||||||
return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
|
||||||
if semanticContext == nil {
|
|
||||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
|
||||||
}
|
|
||||||
|
|
||||||
return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
|
|
||||||
return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
|
|
||||||
return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
|
|
||||||
return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
|
|
||||||
return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
|
||||||
if semanticContext == nil {
|
|
||||||
panic("semanticContext cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &BaseATNConfig{
|
|
||||||
state: state,
|
|
||||||
alt: c.GetAlt(),
|
|
||||||
context: context,
|
|
||||||
semanticContext: semanticContext,
|
|
||||||
reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
|
|
||||||
precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
|
|
||||||
return b.precedenceFilterSuppressed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
|
||||||
b.precedenceFilterSuppressed = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) GetState() ATNState {
|
|
||||||
return b.state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) GetAlt() int {
|
|
||||||
return b.alt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) SetContext(v PredictionContext) {
|
|
||||||
b.context = v
|
|
||||||
}
|
|
||||||
func (b *BaseATNConfig) GetContext() PredictionContext {
|
|
||||||
return b.context
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
|
|
||||||
return b.semanticContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
|
|
||||||
return b.reachesIntoOuterContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
|
|
||||||
b.reachesIntoOuterContext = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ATN configuration is equal to another if both have the same state, they
|
|
||||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
|
||||||
func (b *BaseATNConfig) equals(o interface{}) bool {
|
|
||||||
if b == o {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var other, ok = o.(*BaseATNConfig)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var equal bool
|
|
||||||
|
|
||||||
if b.context == nil {
|
|
||||||
equal = other.context == nil
|
|
||||||
} else {
|
|
||||||
equal = b.context.equals(other.context)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
|
|
||||||
alts = b.alt == other.alt
|
|
||||||
cons = b.semanticContext.equals(other.semanticContext)
|
|
||||||
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
|
||||||
)
|
|
||||||
|
|
||||||
return nums && alts && cons && sups && equal
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) hash() int {
|
|
||||||
var c int
|
|
||||||
if b.context != nil {
|
|
||||||
c = b.context.hash()
|
|
||||||
}
|
|
||||||
|
|
||||||
h := murmurInit(7)
|
|
||||||
h = murmurUpdate(h, b.state.GetStateNumber())
|
|
||||||
h = murmurUpdate(h, b.alt)
|
|
||||||
h = murmurUpdate(h, c)
|
|
||||||
h = murmurUpdate(h, b.semanticContext.hash())
|
|
||||||
return murmurFinish(h, 4)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfig) String() string {
|
|
||||||
var s1, s2, s3 string
|
|
||||||
|
|
||||||
if b.context != nil {
|
|
||||||
s1 = ",[" + fmt.Sprint(b.context) + "]"
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.semanticContext != SemanticContextNone {
|
|
||||||
s2 = "," + fmt.Sprint(b.semanticContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.reachesIntoOuterContext > 0 {
|
|
||||||
s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
|
|
||||||
}
|
|
||||||
|
|
||||||
type LexerATNConfig struct {
|
|
||||||
*BaseATNConfig
|
|
||||||
lexerActionExecutor *LexerActionExecutor
|
|
||||||
passedThroughNonGreedyDecision bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
|
||||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
|
||||||
return &LexerATNConfig{
|
|
||||||
BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
|
|
||||||
lexerActionExecutor: lexerActionExecutor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
|
|
||||||
return &LexerATNConfig{
|
|
||||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
|
||||||
lexerActionExecutor: c.lexerActionExecutor,
|
|
||||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
|
||||||
return &LexerATNConfig{
|
|
||||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
|
||||||
lexerActionExecutor: lexerActionExecutor,
|
|
||||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
|
|
||||||
return &LexerATNConfig{
|
|
||||||
BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
|
|
||||||
lexerActionExecutor: c.lexerActionExecutor,
|
|
||||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
|
||||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNConfig) hash() int {
|
|
||||||
var f int
|
|
||||||
if l.passedThroughNonGreedyDecision {
|
|
||||||
f = 1
|
|
||||||
} else {
|
|
||||||
f = 0
|
|
||||||
}
|
|
||||||
h := murmurInit(7)
|
|
||||||
h = murmurUpdate(h, l.state.hash())
|
|
||||||
h = murmurUpdate(h, l.alt)
|
|
||||||
h = murmurUpdate(h, l.context.hash())
|
|
||||||
h = murmurUpdate(h, l.semanticContext.hash())
|
|
||||||
h = murmurUpdate(h, f)
|
|
||||||
h = murmurUpdate(h, l.lexerActionExecutor.hash())
|
|
||||||
h = murmurFinish(h, 6)
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNConfig) equals(other interface{}) bool {
|
|
||||||
var othert, ok = other.(*LexerATNConfig)
|
|
||||||
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if !ok {
|
|
||||||
return false
|
|
||||||
} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var b bool
|
|
||||||
|
|
||||||
if l.lexerActionExecutor != nil {
|
|
||||||
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
|
|
||||||
} else {
|
|
||||||
b = othert.lexerActionExecutor != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if b {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.BaseATNConfig.equals(othert.BaseATNConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
|
|
||||||
var ds, ok = target.(DecisionState)
|
|
||||||
|
|
||||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
|
||||||
}
|
|
||||||
|
|
@ -1,387 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
type ATNConfigSet interface {
|
|
||||||
hash() int
|
|
||||||
Add(ATNConfig, *DoubleDict) bool
|
|
||||||
AddAll([]ATNConfig) bool
|
|
||||||
|
|
||||||
GetStates() *Set
|
|
||||||
GetPredicates() []SemanticContext
|
|
||||||
GetItems() []ATNConfig
|
|
||||||
|
|
||||||
OptimizeConfigs(interpreter *BaseATNSimulator)
|
|
||||||
|
|
||||||
Equals(other interface{}) bool
|
|
||||||
|
|
||||||
Length() int
|
|
||||||
IsEmpty() bool
|
|
||||||
Contains(ATNConfig) bool
|
|
||||||
ContainsFast(ATNConfig) bool
|
|
||||||
Clear()
|
|
||||||
String() string
|
|
||||||
|
|
||||||
HasSemanticContext() bool
|
|
||||||
SetHasSemanticContext(v bool)
|
|
||||||
|
|
||||||
ReadOnly() bool
|
|
||||||
SetReadOnly(bool)
|
|
||||||
|
|
||||||
GetConflictingAlts() *BitSet
|
|
||||||
SetConflictingAlts(*BitSet)
|
|
||||||
|
|
||||||
FullContext() bool
|
|
||||||
|
|
||||||
GetUniqueAlt() int
|
|
||||||
SetUniqueAlt(int)
|
|
||||||
|
|
||||||
GetDipsIntoOuterContext() bool
|
|
||||||
SetDipsIntoOuterContext(bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
|
|
||||||
// about its elements and can combine similar configurations using a
|
|
||||||
// graph-structured stack.
|
|
||||||
type BaseATNConfigSet struct {
|
|
||||||
cachedHash int
|
|
||||||
|
|
||||||
// configLookup is used to determine whether two BaseATNConfigSets are equal. We
|
|
||||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
|
||||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
|
||||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
|
||||||
// read-only because a set becomes a DFA state.
|
|
||||||
configLookup *Set
|
|
||||||
|
|
||||||
// configs is the added elements.
|
|
||||||
configs []ATNConfig
|
|
||||||
|
|
||||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
|
||||||
// info together because it saves recomputation. Can we track conflicts as they
|
|
||||||
// are added to save scanning configs later?
|
|
||||||
conflictingAlts *BitSet
|
|
||||||
|
|
||||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
|
||||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
|
||||||
// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
|
|
||||||
dipsIntoOuterContext bool
|
|
||||||
|
|
||||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
|
||||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
|
||||||
// context merge.
|
|
||||||
fullCtx bool
|
|
||||||
|
|
||||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
|
||||||
// while computing a closure operation. Don't make a DFA state from a.
|
|
||||||
hasSemanticContext bool
|
|
||||||
|
|
||||||
// readOnly is whether it is read-only. Do not
|
|
||||||
// allow any code to manipulate the set if true because DFA states will point at
|
|
||||||
// sets and those must not change. It not protect other fields; conflictingAlts
|
|
||||||
// in particular, which is assigned after readOnly.
|
|
||||||
readOnly bool
|
|
||||||
|
|
||||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
|
||||||
// info together because it saves recomputation. Can we track conflicts as they
|
|
||||||
// are added to save scanning configs later?
|
|
||||||
uniqueAlt int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
|
||||||
return &BaseATNConfigSet{
|
|
||||||
cachedHash: -1,
|
|
||||||
configLookup: NewSet(nil, equalATNConfigs),
|
|
||||||
fullCtx: fullCtx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
|
|
||||||
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
|
|
||||||
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
|
|
||||||
// dipsIntoOuterContext and hasSemanticContext when necessary.
|
|
||||||
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
|
||||||
if b.readOnly {
|
|
||||||
panic("set is read-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.GetSemanticContext() != SemanticContextNone {
|
|
||||||
b.hasSemanticContext = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.GetReachesIntoOuterContext() > 0 {
|
|
||||||
b.dipsIntoOuterContext = true
|
|
||||||
}
|
|
||||||
|
|
||||||
existing := b.configLookup.add(config).(ATNConfig)
|
|
||||||
|
|
||||||
if existing == config {
|
|
||||||
b.cachedHash = -1
|
|
||||||
b.configs = append(b.configs, config) // Track order here
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge a previous (s, i, pi, _) with it and save the result
|
|
||||||
rootIsWildcard := !b.fullCtx
|
|
||||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
|
||||||
|
|
||||||
// No need to check for existing.context because config.context is in the cache,
|
|
||||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
|
||||||
// at both places.
|
|
||||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
|
||||||
|
|
||||||
// Preserve the precedence filter suppression during the merge
|
|
||||||
if config.getPrecedenceFilterSuppressed() {
|
|
||||||
existing.setPrecedenceFilterSuppressed(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replace the context because there is no need to do alt mapping
|
|
||||||
existing.SetContext(merged)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) GetStates() *Set {
|
|
||||||
states := NewSet(nil, nil)
|
|
||||||
|
|
||||||
for i := 0; i < len(b.configs); i++ {
|
|
||||||
states.add(b.configs[i].GetState())
|
|
||||||
}
|
|
||||||
|
|
||||||
return states
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) HasSemanticContext() bool {
|
|
||||||
return b.hasSemanticContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
|
|
||||||
b.hasSemanticContext = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
|
|
||||||
preds := make([]SemanticContext, 0)
|
|
||||||
|
|
||||||
for i := 0; i < len(b.configs); i++ {
|
|
||||||
c := b.configs[i].GetSemanticContext()
|
|
||||||
|
|
||||||
if c != SemanticContextNone {
|
|
||||||
preds = append(preds, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return preds
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
|
|
||||||
return b.configs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
|
||||||
if b.readOnly {
|
|
||||||
panic("set is read-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.configLookup.length() == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(b.configs); i++ {
|
|
||||||
config := b.configs[i]
|
|
||||||
|
|
||||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
|
|
||||||
for i := 0; i < len(coll); i++ {
|
|
||||||
b.Add(coll[i], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
|
|
||||||
if b == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*BaseATNConfigSet); !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
other2 := other.(*BaseATNConfigSet)
|
|
||||||
|
|
||||||
return b.configs != nil &&
|
|
||||||
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
|
|
||||||
b.fullCtx == other2.fullCtx &&
|
|
||||||
b.uniqueAlt == other2.uniqueAlt &&
|
|
||||||
b.conflictingAlts == other2.conflictingAlts &&
|
|
||||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
|
||||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) hash() int {
|
|
||||||
if b.readOnly {
|
|
||||||
if b.cachedHash == -1 {
|
|
||||||
b.cachedHash = b.hashCodeConfigs()
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.cachedHash
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.hashCodeConfigs()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
|
|
||||||
h := murmurInit(1)
|
|
||||||
for _, c := range b.configs {
|
|
||||||
if c != nil {
|
|
||||||
h = murmurUpdate(h, c.hash())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return murmurFinish(h, len(b.configs))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) Length() int {
|
|
||||||
return len(b.configs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) IsEmpty() bool {
|
|
||||||
return len(b.configs) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
|
|
||||||
if b.configLookup == nil {
|
|
||||||
panic("not implemented for read-only sets")
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.configLookup.contains(item)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
|
|
||||||
if b.configLookup == nil {
|
|
||||||
panic("not implemented for read-only sets")
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) Clear() {
|
|
||||||
if b.readOnly {
|
|
||||||
panic("set is read-only")
|
|
||||||
}
|
|
||||||
|
|
||||||
b.configs = make([]ATNConfig, 0)
|
|
||||||
b.cachedHash = -1
|
|
||||||
b.configLookup = NewSet(nil, equalATNConfigs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) FullContext() bool {
|
|
||||||
return b.fullCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
|
|
||||||
return b.dipsIntoOuterContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
|
|
||||||
b.dipsIntoOuterContext = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) GetUniqueAlt() int {
|
|
||||||
return b.uniqueAlt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
|
|
||||||
b.uniqueAlt = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
|
|
||||||
return b.conflictingAlts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
|
|
||||||
b.conflictingAlts = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) ReadOnly() bool {
|
|
||||||
return b.readOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
|
|
||||||
b.readOnly = readOnly
|
|
||||||
|
|
||||||
if readOnly {
|
|
||||||
b.configLookup = nil // Read only, so no need for the lookup cache
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNConfigSet) String() string {
|
|
||||||
s := "["
|
|
||||||
|
|
||||||
for i, c := range b.configs {
|
|
||||||
s += c.String()
|
|
||||||
|
|
||||||
if i != len(b.configs)-1 {
|
|
||||||
s += ", "
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s += "]"
|
|
||||||
|
|
||||||
if b.hasSemanticContext {
|
|
||||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
|
||||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.conflictingAlts != nil {
|
|
||||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.dipsIntoOuterContext {
|
|
||||||
s += ",dipsIntoOuterContext"
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
type OrderedATNConfigSet struct {
|
|
||||||
*BaseATNConfigSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
|
||||||
b := NewBaseATNConfigSet(false)
|
|
||||||
|
|
||||||
b.configLookup = NewSet(nil, nil)
|
|
||||||
|
|
||||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalATNConfigs(a, b interface{}) bool {
|
|
||||||
if a == nil || b == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if a == b {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var ai, ok = a.(ATNConfig)
|
|
||||||
var bi, ok1 = b.(ATNConfig)
|
|
||||||
|
|
||||||
if !ok || !ok1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber()
|
|
||||||
alts := ai.GetAlt() == bi.GetAlt()
|
|
||||||
cons := ai.GetSemanticContext().equals(bi.GetSemanticContext())
|
|
||||||
|
|
||||||
return nums && alts && cons
|
|
||||||
}
|
|
||||||
25
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
generated
vendored
25
vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
generated
vendored
|
|
@ -1,25 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
|
|
||||||
|
|
||||||
type ATNDeserializationOptions struct {
|
|
||||||
readOnly bool
|
|
||||||
verifyATN bool
|
|
||||||
generateRuleBypassTransitions bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
|
|
||||||
o := new(ATNDeserializationOptions)
|
|
||||||
|
|
||||||
if CopyFrom != nil {
|
|
||||||
o.readOnly = CopyFrom.readOnly
|
|
||||||
o.verifyATN = CopyFrom.verifyATN
|
|
||||||
o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
|
|
||||||
}
|
|
||||||
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
@ -1,828 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf16"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This is the earliest supported serialized UUID.
|
|
||||||
// stick to serialized version for now, we don't need a UUID instance
|
|
||||||
var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
|
|
||||||
var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
|
|
||||||
|
|
||||||
// This list contains all of the currently supported UUIDs, ordered by when
|
|
||||||
// the feature first appeared in this branch.
|
|
||||||
var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
|
|
||||||
|
|
||||||
var SerializedVersion = 3
|
|
||||||
|
|
||||||
// This is the current serialized UUID.
|
|
||||||
var SerializedUUID = AddedUnicodeSMP
|
|
||||||
|
|
||||||
type LoopEndStateIntPair struct {
|
|
||||||
item0 *LoopEndState
|
|
||||||
item1 int
|
|
||||||
}
|
|
||||||
|
|
||||||
type BlockStartStateIntPair struct {
|
|
||||||
item0 BlockStartState
|
|
||||||
item1 int
|
|
||||||
}
|
|
||||||
|
|
||||||
type ATNDeserializer struct {
|
|
||||||
deserializationOptions *ATNDeserializationOptions
|
|
||||||
data []rune
|
|
||||||
pos int
|
|
||||||
uuid string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
|
|
||||||
if options == nil {
|
|
||||||
options = ATNDeserializationOptionsdefaultOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ATNDeserializer{deserializationOptions: options}
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringInSlice(a string, list []string) int {
|
|
||||||
for i, b := range list {
|
|
||||||
if b == a {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// isFeatureSupported determines if a particular serialized representation of an
|
|
||||||
// ATN supports a particular feature, identified by the UUID used for
|
|
||||||
// serializing the ATN at the time the feature was first introduced. Feature is
|
|
||||||
// the UUID marking the first time the feature was supported in the serialized
|
|
||||||
// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
|
|
||||||
// being deserialized. It returns true if actualUuid represents a serialized ATN
|
|
||||||
// at or after the feature identified by feature was introduced, and otherwise
|
|
||||||
// false.
|
|
||||||
func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
|
|
||||||
idx1 := stringInSlice(feature, SupportedUUIDs)
|
|
||||||
|
|
||||||
if idx1 < 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
idx2 := stringInSlice(actualUUID, SupportedUUIDs)
|
|
||||||
|
|
||||||
return idx2 >= idx1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
|
|
||||||
a.reset(utf16.Decode(data))
|
|
||||||
a.checkVersion()
|
|
||||||
a.checkUUID()
|
|
||||||
|
|
||||||
atn := a.readATN()
|
|
||||||
|
|
||||||
a.readStates(atn)
|
|
||||||
a.readRules(atn)
|
|
||||||
a.readModes(atn)
|
|
||||||
|
|
||||||
sets := make([]*IntervalSet, 0)
|
|
||||||
|
|
||||||
// First, deserialize sets with 16-bit arguments <= U+FFFF.
|
|
||||||
sets = a.readSets(atn, sets, a.readInt)
|
|
||||||
// Next, if the ATN was serialized with the Unicode SMP feature,
|
|
||||||
// deserialize sets with 32-bit arguments <= U+10FFFF.
|
|
||||||
if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) {
|
|
||||||
sets = a.readSets(atn, sets, a.readInt32)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.readEdges(atn, sets)
|
|
||||||
a.readDecisions(atn)
|
|
||||||
a.readLexerActions(atn)
|
|
||||||
a.markPrecedenceDecisions(atn)
|
|
||||||
a.verifyATN(atn)
|
|
||||||
|
|
||||||
if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
|
|
||||||
a.generateRuleBypassTransitions(atn)
|
|
||||||
// Re-verify after modification
|
|
||||||
a.verifyATN(atn)
|
|
||||||
}
|
|
||||||
|
|
||||||
return atn
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) reset(data []rune) {
|
|
||||||
temp := make([]rune, len(data))
|
|
||||||
|
|
||||||
for i, c := range data {
|
|
||||||
// Don't adjust the first value since that's the version number
|
|
||||||
if i == 0 {
|
|
||||||
temp[i] = c
|
|
||||||
} else if c > 1 {
|
|
||||||
temp[i] = c - 2
|
|
||||||
} else {
|
|
||||||
temp[i] = c + 65533
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
a.data = temp
|
|
||||||
a.pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) checkVersion() {
|
|
||||||
version := a.readInt()
|
|
||||||
|
|
||||||
if version != SerializedVersion {
|
|
||||||
panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) checkUUID() {
|
|
||||||
uuid := a.readUUID()
|
|
||||||
|
|
||||||
if stringInSlice(uuid, SupportedUUIDs) < 0 {
|
|
||||||
panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
|
|
||||||
}
|
|
||||||
|
|
||||||
a.uuid = uuid
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readATN() *ATN {
|
|
||||||
grammarType := a.readInt()
|
|
||||||
maxTokenType := a.readInt()
|
|
||||||
|
|
||||||
return NewATN(grammarType, maxTokenType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readStates(atn *ATN) {
|
|
||||||
loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
|
|
||||||
endStateNumbers := make([]BlockStartStateIntPair, 0)
|
|
||||||
|
|
||||||
nstates := a.readInt()
|
|
||||||
|
|
||||||
for i := 0; i < nstates; i++ {
|
|
||||||
stype := a.readInt()
|
|
||||||
|
|
||||||
// Ignore bad types of states
|
|
||||||
if stype == ATNStateInvalidType {
|
|
||||||
atn.addState(nil)
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ruleIndex := a.readInt()
|
|
||||||
|
|
||||||
if ruleIndex == 0xFFFF {
|
|
||||||
ruleIndex = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
s := a.stateFactory(stype, ruleIndex)
|
|
||||||
|
|
||||||
if stype == ATNStateLoopEnd {
|
|
||||||
loopBackStateNumber := a.readInt()
|
|
||||||
|
|
||||||
loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
|
|
||||||
} else if s2, ok := s.(BlockStartState); ok {
|
|
||||||
endStateNumber := a.readInt()
|
|
||||||
|
|
||||||
endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
|
|
||||||
}
|
|
||||||
|
|
||||||
atn.addState(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delay the assignment of loop back and end states until we know all the state
|
|
||||||
// instances have been initialized
|
|
||||||
for j := 0; j < len(loopBackStateNumbers); j++ {
|
|
||||||
pair := loopBackStateNumbers[j]
|
|
||||||
|
|
||||||
pair.item0.loopBackState = atn.states[pair.item1]
|
|
||||||
}
|
|
||||||
|
|
||||||
for j := 0; j < len(endStateNumbers); j++ {
|
|
||||||
pair := endStateNumbers[j]
|
|
||||||
|
|
||||||
pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
|
|
||||||
}
|
|
||||||
|
|
||||||
numNonGreedyStates := a.readInt()
|
|
||||||
|
|
||||||
for j := 0; j < numNonGreedyStates; j++ {
|
|
||||||
stateNumber := a.readInt()
|
|
||||||
|
|
||||||
atn.states[stateNumber].(DecisionState).setNonGreedy(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
numPrecedenceStates := a.readInt()
|
|
||||||
|
|
||||||
for j := 0; j < numPrecedenceStates; j++ {
|
|
||||||
stateNumber := a.readInt()
|
|
||||||
|
|
||||||
atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readRules(atn *ATN) {
|
|
||||||
nrules := a.readInt()
|
|
||||||
|
|
||||||
if atn.grammarType == ATNTypeLexer {
|
|
||||||
atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
|
|
||||||
|
|
||||||
for i := 0; i < nrules; i++ {
|
|
||||||
s := a.readInt()
|
|
||||||
startState := atn.states[s].(*RuleStartState)
|
|
||||||
|
|
||||||
atn.ruleToStartState[i] = startState
|
|
||||||
|
|
||||||
if atn.grammarType == ATNTypeLexer {
|
|
||||||
tokenType := a.readInt()
|
|
||||||
|
|
||||||
if tokenType == 0xFFFF {
|
|
||||||
tokenType = TokenEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
atn.ruleToTokenType[i] = tokenType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
|
|
||||||
|
|
||||||
for i := 0; i < len(atn.states); i++ {
|
|
||||||
state := atn.states[i]
|
|
||||||
|
|
||||||
if s2, ok := state.(*RuleStopState); ok {
|
|
||||||
atn.ruleToStopState[s2.ruleIndex] = s2
|
|
||||||
atn.ruleToStartState[s2.ruleIndex].stopState = s2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readModes(atn *ATN) {
|
|
||||||
nmodes := a.readInt()
|
|
||||||
|
|
||||||
for i := 0; i < nmodes; i++ {
|
|
||||||
s := a.readInt()
|
|
||||||
|
|
||||||
atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet {
|
|
||||||
m := a.readInt()
|
|
||||||
|
|
||||||
for i := 0; i < m; i++ {
|
|
||||||
iset := NewIntervalSet()
|
|
||||||
|
|
||||||
sets = append(sets, iset)
|
|
||||||
|
|
||||||
n := a.readInt()
|
|
||||||
containsEOF := a.readInt()
|
|
||||||
|
|
||||||
if containsEOF != 0 {
|
|
||||||
iset.addOne(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
for j := 0; j < n; j++ {
|
|
||||||
i1 := readUnicode()
|
|
||||||
i2 := readUnicode()
|
|
||||||
|
|
||||||
iset.addRange(i1, i2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sets
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
|
|
||||||
nedges := a.readInt()
|
|
||||||
|
|
||||||
for i := 0; i < nedges; i++ {
|
|
||||||
var (
|
|
||||||
src = a.readInt()
|
|
||||||
trg = a.readInt()
|
|
||||||
ttype = a.readInt()
|
|
||||||
arg1 = a.readInt()
|
|
||||||
arg2 = a.readInt()
|
|
||||||
arg3 = a.readInt()
|
|
||||||
trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
|
|
||||||
srcState = atn.states[src]
|
|
||||||
)
|
|
||||||
|
|
||||||
srcState.AddTransition(trans, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Edges for rule stop states can be derived, so they are not serialized
|
|
||||||
for i := 0; i < len(atn.states); i++ {
|
|
||||||
state := atn.states[i]
|
|
||||||
|
|
||||||
for j := 0; j < len(state.GetTransitions()); j++ {
|
|
||||||
var t, ok = state.GetTransitions()[j].(*RuleTransition)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
outermostPrecedenceReturn := -1
|
|
||||||
|
|
||||||
if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
|
|
||||||
if t.precedence == 0 {
|
|
||||||
outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
|
|
||||||
|
|
||||||
atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(atn.states); i++ {
|
|
||||||
state := atn.states[i]
|
|
||||||
|
|
||||||
if s2, ok := state.(*BaseBlockStartState); ok {
|
|
||||||
// We need to know the end state to set its start state
|
|
||||||
if s2.endState == nil {
|
|
||||||
panic("IllegalState")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block end states can only be associated to a single block start state
|
|
||||||
if s2.endState.startState != nil {
|
|
||||||
panic("IllegalState")
|
|
||||||
}
|
|
||||||
|
|
||||||
s2.endState.startState = state
|
|
||||||
}
|
|
||||||
|
|
||||||
if s2, ok := state.(*PlusLoopbackState); ok {
|
|
||||||
for j := 0; j < len(s2.GetTransitions()); j++ {
|
|
||||||
target := s2.GetTransitions()[j].getTarget()
|
|
||||||
|
|
||||||
if t2, ok := target.(*PlusBlockStartState); ok {
|
|
||||||
t2.loopBackState = state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if s2, ok := state.(*StarLoopbackState); ok {
|
|
||||||
for j := 0; j < len(s2.GetTransitions()); j++ {
|
|
||||||
target := s2.GetTransitions()[j].getTarget()
|
|
||||||
|
|
||||||
if t2, ok := target.(*StarLoopEntryState); ok {
|
|
||||||
t2.loopBackState = state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readDecisions(atn *ATN) {
|
|
||||||
ndecisions := a.readInt()
|
|
||||||
|
|
||||||
for i := 0; i < ndecisions; i++ {
|
|
||||||
s := a.readInt()
|
|
||||||
decState := atn.states[s].(DecisionState)
|
|
||||||
|
|
||||||
atn.DecisionToState = append(atn.DecisionToState, decState)
|
|
||||||
decState.setDecision(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readLexerActions(atn *ATN) {
|
|
||||||
if atn.grammarType == ATNTypeLexer {
|
|
||||||
count := a.readInt()
|
|
||||||
|
|
||||||
atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
actionType := a.readInt()
|
|
||||||
data1 := a.readInt()
|
|
||||||
|
|
||||||
if data1 == 0xFFFF {
|
|
||||||
data1 = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
data2 := a.readInt()
|
|
||||||
|
|
||||||
if data2 == 0xFFFF {
|
|
||||||
data2 = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
lexerAction := a.lexerActionFactory(actionType, data1, data2)
|
|
||||||
|
|
||||||
atn.lexerActions[i] = lexerAction
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
|
|
||||||
count := len(atn.ruleToStartState)
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
a.generateRuleBypassTransition(atn, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
|
|
||||||
bypassStart := NewBasicBlockStartState()
|
|
||||||
|
|
||||||
bypassStart.ruleIndex = idx
|
|
||||||
atn.addState(bypassStart)
|
|
||||||
|
|
||||||
bypassStop := NewBlockEndState()
|
|
||||||
|
|
||||||
bypassStop.ruleIndex = idx
|
|
||||||
atn.addState(bypassStop)
|
|
||||||
|
|
||||||
bypassStart.endState = bypassStop
|
|
||||||
|
|
||||||
atn.defineDecisionState(bypassStart.BaseDecisionState)
|
|
||||||
|
|
||||||
bypassStop.startState = bypassStart
|
|
||||||
|
|
||||||
var excludeTransition Transition
|
|
||||||
var endState ATNState
|
|
||||||
|
|
||||||
if atn.ruleToStartState[idx].isPrecedenceRule {
|
|
||||||
// Wrap from the beginning of the rule to the StarLoopEntryState
|
|
||||||
endState = nil
|
|
||||||
|
|
||||||
for i := 0; i < len(atn.states); i++ {
|
|
||||||
state := atn.states[i]
|
|
||||||
|
|
||||||
if a.stateIsEndStateFor(state, idx) != nil {
|
|
||||||
endState = state
|
|
||||||
excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if excludeTransition == nil {
|
|
||||||
panic("Couldn't identify final state of the precedence rule prefix section.")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
endState = atn.ruleToStopState[idx]
|
|
||||||
}
|
|
||||||
|
|
||||||
// All non-excluded transitions that currently target end state need to target
|
|
||||||
// blockEnd instead
|
|
||||||
for i := 0; i < len(atn.states); i++ {
|
|
||||||
state := atn.states[i]
|
|
||||||
|
|
||||||
for j := 0; j < len(state.GetTransitions()); j++ {
|
|
||||||
transition := state.GetTransitions()[j]
|
|
||||||
|
|
||||||
if transition == excludeTransition {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if transition.getTarget() == endState {
|
|
||||||
transition.setTarget(bypassStop)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All transitions leaving the rule start state need to leave blockStart instead
|
|
||||||
ruleToStartState := atn.ruleToStartState[idx]
|
|
||||||
count := len(ruleToStartState.GetTransitions())
|
|
||||||
|
|
||||||
for count > 0 {
|
|
||||||
bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
|
|
||||||
ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Link the new states
|
|
||||||
atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
|
|
||||||
bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
|
|
||||||
|
|
||||||
MatchState := NewBasicState()
|
|
||||||
|
|
||||||
atn.addState(MatchState)
|
|
||||||
MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
|
|
||||||
bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
|
|
||||||
if state.GetRuleIndex() != idx {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := state.(*StarLoopEntryState); !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
|
|
||||||
|
|
||||||
if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
|
|
||||||
|
|
||||||
if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
|
|
||||||
return state
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
|
|
||||||
// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
|
|
||||||
// the correct value.
|
|
||||||
func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
|
|
||||||
for _, state := range atn.states {
|
|
||||||
if _, ok := state.(*StarLoopEntryState); !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We analyze the ATN to determine if a ATN decision state is the
|
|
||||||
// decision for the closure block that determines whether a
|
|
||||||
// precedence rule should continue or complete.
|
|
||||||
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
|
|
||||||
maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
|
|
||||||
|
|
||||||
if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
|
|
||||||
var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
|
|
||||||
|
|
||||||
if s3.epsilonOnlyTransitions && ok2 {
|
|
||||||
state.(*StarLoopEntryState).precedenceRuleDecision = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) verifyATN(atn *ATN) {
|
|
||||||
if !a.deserializationOptions.verifyATN {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify assumptions
|
|
||||||
for i := 0; i < len(atn.states); i++ {
|
|
||||||
state := atn.states[i]
|
|
||||||
|
|
||||||
if state == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
|
|
||||||
|
|
||||||
switch s2 := state.(type) {
|
|
||||||
case *PlusBlockStartState:
|
|
||||||
a.checkCondition(s2.loopBackState != nil, "")
|
|
||||||
|
|
||||||
case *StarLoopEntryState:
|
|
||||||
a.checkCondition(s2.loopBackState != nil, "")
|
|
||||||
a.checkCondition(len(s2.GetTransitions()) == 2, "")
|
|
||||||
|
|
||||||
switch s2 := state.(type) {
|
|
||||||
case *StarBlockStartState:
|
|
||||||
var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
|
|
||||||
|
|
||||||
a.checkCondition(ok2, "")
|
|
||||||
a.checkCondition(!s2.nonGreedy, "")
|
|
||||||
|
|
||||||
case *LoopEndState:
|
|
||||||
var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
|
|
||||||
|
|
||||||
a.checkCondition(ok2, "")
|
|
||||||
a.checkCondition(s3.nonGreedy, "")
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("IllegalState")
|
|
||||||
}
|
|
||||||
|
|
||||||
case *StarLoopbackState:
|
|
||||||
a.checkCondition(len(state.GetTransitions()) == 1, "")
|
|
||||||
|
|
||||||
var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
|
|
||||||
|
|
||||||
a.checkCondition(ok2, "")
|
|
||||||
|
|
||||||
case *LoopEndState:
|
|
||||||
a.checkCondition(s2.loopBackState != nil, "")
|
|
||||||
|
|
||||||
case *RuleStartState:
|
|
||||||
a.checkCondition(s2.stopState != nil, "")
|
|
||||||
|
|
||||||
case *BaseBlockStartState:
|
|
||||||
a.checkCondition(s2.endState != nil, "")
|
|
||||||
|
|
||||||
case *BlockEndState:
|
|
||||||
a.checkCondition(s2.startState != nil, "")
|
|
||||||
|
|
||||||
case DecisionState:
|
|
||||||
a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
|
|
||||||
|
|
||||||
default:
|
|
||||||
var _, ok = s2.(*RuleStopState)
|
|
||||||
|
|
||||||
a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) checkCondition(condition bool, message string) {
|
|
||||||
if !condition {
|
|
||||||
if message == "" {
|
|
||||||
message = "IllegalState"
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readInt() int {
|
|
||||||
v := a.data[a.pos]
|
|
||||||
|
|
||||||
a.pos++
|
|
||||||
|
|
||||||
return int(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readInt32() int {
|
|
||||||
var low = a.readInt()
|
|
||||||
var high = a.readInt()
|
|
||||||
return low | (high << 16)
|
|
||||||
}
|
|
||||||
|
|
||||||
//TODO
|
|
||||||
//func (a *ATNDeserializer) readLong() int64 {
|
|
||||||
// panic("Not implemented")
|
|
||||||
// var low = a.readInt32()
|
|
||||||
// var high = a.readInt32()
|
|
||||||
// return (low & 0x00000000FFFFFFFF) | (high << int32)
|
|
||||||
//}
|
|
||||||
|
|
||||||
func createByteToHex() []string {
|
|
||||||
bth := make([]string, 256)
|
|
||||||
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
|
|
||||||
}
|
|
||||||
|
|
||||||
return bth
|
|
||||||
}
|
|
||||||
|
|
||||||
var byteToHex = createByteToHex()
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) readUUID() string {
|
|
||||||
bb := make([]int, 16)
|
|
||||||
|
|
||||||
for i := 7; i >= 0; i-- {
|
|
||||||
integer := a.readInt()
|
|
||||||
|
|
||||||
bb[(2*i)+1] = integer & 0xFF
|
|
||||||
bb[2*i] = (integer >> 8) & 0xFF
|
|
||||||
}
|
|
||||||
|
|
||||||
return byteToHex[bb[0]] + byteToHex[bb[1]] +
|
|
||||||
byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
|
|
||||||
byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
|
|
||||||
byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
|
|
||||||
byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
|
|
||||||
byteToHex[bb[10]] + byteToHex[bb[11]] +
|
|
||||||
byteToHex[bb[12]] + byteToHex[bb[13]] +
|
|
||||||
byteToHex[bb[14]] + byteToHex[bb[15]]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
|
||||||
target := atn.states[trg]
|
|
||||||
|
|
||||||
switch typeIndex {
|
|
||||||
case TransitionEPSILON:
|
|
||||||
return NewEpsilonTransition(target, -1)
|
|
||||||
|
|
||||||
case TransitionRANGE:
|
|
||||||
if arg3 != 0 {
|
|
||||||
return NewRangeTransition(target, TokenEOF, arg2)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewRangeTransition(target, arg1, arg2)
|
|
||||||
|
|
||||||
case TransitionRULE:
|
|
||||||
return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
|
|
||||||
|
|
||||||
case TransitionPREDICATE:
|
|
||||||
return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
|
|
||||||
|
|
||||||
case TransitionPRECEDENCE:
|
|
||||||
return NewPrecedencePredicateTransition(target, arg1)
|
|
||||||
|
|
||||||
case TransitionATOM:
|
|
||||||
if arg3 != 0 {
|
|
||||||
return NewAtomTransition(target, TokenEOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewAtomTransition(target, arg1)
|
|
||||||
|
|
||||||
case TransitionACTION:
|
|
||||||
return NewActionTransition(target, arg1, arg2, arg3 != 0)
|
|
||||||
|
|
||||||
case TransitionSET:
|
|
||||||
return NewSetTransition(target, sets[arg1])
|
|
||||||
|
|
||||||
case TransitionNOTSET:
|
|
||||||
return NewNotSetTransition(target, sets[arg1])
|
|
||||||
|
|
||||||
case TransitionWILDCARD:
|
|
||||||
return NewWildcardTransition(target)
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("The specified transition type is not valid.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
|
|
||||||
var s ATNState
|
|
||||||
|
|
||||||
switch typeIndex {
|
|
||||||
case ATNStateInvalidType:
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case ATNStateBasic:
|
|
||||||
s = NewBasicState()
|
|
||||||
|
|
||||||
case ATNStateRuleStart:
|
|
||||||
s = NewRuleStartState()
|
|
||||||
|
|
||||||
case ATNStateBlockStart:
|
|
||||||
s = NewBasicBlockStartState()
|
|
||||||
|
|
||||||
case ATNStatePlusBlockStart:
|
|
||||||
s = NewPlusBlockStartState()
|
|
||||||
|
|
||||||
case ATNStateStarBlockStart:
|
|
||||||
s = NewStarBlockStartState()
|
|
||||||
|
|
||||||
case ATNStateTokenStart:
|
|
||||||
s = NewTokensStartState()
|
|
||||||
|
|
||||||
case ATNStateRuleStop:
|
|
||||||
s = NewRuleStopState()
|
|
||||||
|
|
||||||
case ATNStateBlockEnd:
|
|
||||||
s = NewBlockEndState()
|
|
||||||
|
|
||||||
case ATNStateStarLoopBack:
|
|
||||||
s = NewStarLoopbackState()
|
|
||||||
|
|
||||||
case ATNStateStarLoopEntry:
|
|
||||||
s = NewStarLoopEntryState()
|
|
||||||
|
|
||||||
case ATNStatePlusLoopBack:
|
|
||||||
s = NewPlusLoopbackState()
|
|
||||||
|
|
||||||
case ATNStateLoopEnd:
|
|
||||||
s = NewLoopEndState()
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("state type %d is invalid", typeIndex))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.SetRuleIndex(ruleIndex)
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
|
|
||||||
switch typeIndex {
|
|
||||||
case LexerActionTypeChannel:
|
|
||||||
return NewLexerChannelAction(data1)
|
|
||||||
|
|
||||||
case LexerActionTypeCustom:
|
|
||||||
return NewLexerCustomAction(data1, data2)
|
|
||||||
|
|
||||||
case LexerActionTypeMode:
|
|
||||||
return NewLexerModeAction(data1)
|
|
||||||
|
|
||||||
case LexerActionTypeMore:
|
|
||||||
return LexerMoreActionINSTANCE
|
|
||||||
|
|
||||||
case LexerActionTypePopMode:
|
|
||||||
return LexerPopModeActionINSTANCE
|
|
||||||
|
|
||||||
case LexerActionTypePushMode:
|
|
||||||
return NewLexerPushModeAction(data1)
|
|
||||||
|
|
||||||
case LexerActionTypeSkip:
|
|
||||||
return LexerSkipActionINSTANCE
|
|
||||||
|
|
||||||
case LexerActionTypeType:
|
|
||||||
return NewLexerTypeAction(data1)
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,50 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
|
|
||||||
|
|
||||||
type IATNSimulator interface {
|
|
||||||
SharedContextCache() *PredictionContextCache
|
|
||||||
ATN() *ATN
|
|
||||||
DecisionToDFA() []*DFA
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseATNSimulator struct {
|
|
||||||
atn *ATN
|
|
||||||
sharedContextCache *PredictionContextCache
|
|
||||||
decisionToDFA []*DFA
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
|
|
||||||
b := new(BaseATNSimulator)
|
|
||||||
|
|
||||||
b.atn = atn
|
|
||||||
b.sharedContextCache = sharedContextCache
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
|
|
||||||
if b.sharedContextCache == nil {
|
|
||||||
return context
|
|
||||||
}
|
|
||||||
|
|
||||||
visited := make(map[PredictionContext]PredictionContext)
|
|
||||||
|
|
||||||
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
|
|
||||||
return b.sharedContextCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNSimulator) ATN() *ATN {
|
|
||||||
return b.atn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
|
|
||||||
return b.decisionToDFA
|
|
||||||
}
|
|
||||||
|
|
@ -1,386 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import "strconv"
|
|
||||||
|
|
||||||
// Constants for serialization.
|
|
||||||
const (
|
|
||||||
ATNStateInvalidType = 0
|
|
||||||
ATNStateBasic = 1
|
|
||||||
ATNStateRuleStart = 2
|
|
||||||
ATNStateBlockStart = 3
|
|
||||||
ATNStatePlusBlockStart = 4
|
|
||||||
ATNStateStarBlockStart = 5
|
|
||||||
ATNStateTokenStart = 6
|
|
||||||
ATNStateRuleStop = 7
|
|
||||||
ATNStateBlockEnd = 8
|
|
||||||
ATNStateStarLoopBack = 9
|
|
||||||
ATNStateStarLoopEntry = 10
|
|
||||||
ATNStatePlusLoopBack = 11
|
|
||||||
ATNStateLoopEnd = 12
|
|
||||||
|
|
||||||
ATNStateInvalidStateNumber = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
var ATNStateInitialNumTransitions = 4
|
|
||||||
|
|
||||||
type ATNState interface {
|
|
||||||
GetEpsilonOnlyTransitions() bool
|
|
||||||
|
|
||||||
GetRuleIndex() int
|
|
||||||
SetRuleIndex(int)
|
|
||||||
|
|
||||||
GetNextTokenWithinRule() *IntervalSet
|
|
||||||
SetNextTokenWithinRule(*IntervalSet)
|
|
||||||
|
|
||||||
GetATN() *ATN
|
|
||||||
SetATN(*ATN)
|
|
||||||
|
|
||||||
GetStateType() int
|
|
||||||
|
|
||||||
GetStateNumber() int
|
|
||||||
SetStateNumber(int)
|
|
||||||
|
|
||||||
GetTransitions() []Transition
|
|
||||||
SetTransitions([]Transition)
|
|
||||||
AddTransition(Transition, int)
|
|
||||||
|
|
||||||
String() string
|
|
||||||
hash() int
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseATNState struct {
|
|
||||||
// NextTokenWithinRule caches lookahead during parsing. Not used during construction.
|
|
||||||
NextTokenWithinRule *IntervalSet
|
|
||||||
|
|
||||||
// atn is the current ATN.
|
|
||||||
atn *ATN
|
|
||||||
|
|
||||||
epsilonOnlyTransitions bool
|
|
||||||
|
|
||||||
// ruleIndex tracks the Rule index because there are no Rule objects at runtime.
|
|
||||||
ruleIndex int
|
|
||||||
|
|
||||||
stateNumber int
|
|
||||||
|
|
||||||
stateType int
|
|
||||||
|
|
||||||
// Track the transitions emanating from this ATN state.
|
|
||||||
transitions []Transition
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseATNState() *BaseATNState {
|
|
||||||
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) GetRuleIndex() int {
|
|
||||||
return as.ruleIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) SetRuleIndex(v int) {
|
|
||||||
as.ruleIndex = v
|
|
||||||
}
|
|
||||||
func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
|
|
||||||
return as.epsilonOnlyTransitions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) GetATN() *ATN {
|
|
||||||
return as.atn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) SetATN(atn *ATN) {
|
|
||||||
as.atn = atn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) GetTransitions() []Transition {
|
|
||||||
return as.transitions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) SetTransitions(t []Transition) {
|
|
||||||
as.transitions = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) GetStateType() int {
|
|
||||||
return as.stateType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) GetStateNumber() int {
|
|
||||||
return as.stateNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) SetStateNumber(stateNumber int) {
|
|
||||||
as.stateNumber = stateNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
|
|
||||||
return as.NextTokenWithinRule
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
|
|
||||||
as.NextTokenWithinRule = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) hash() int {
|
|
||||||
return as.stateNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) String() string {
|
|
||||||
return strconv.Itoa(as.stateNumber)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) equals(other interface{}) bool {
|
|
||||||
if ot, ok := other.(ATNState); ok {
|
|
||||||
return as.stateNumber == ot.GetStateNumber()
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) isNonGreedyExitState() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (as *BaseATNState) AddTransition(trans Transition, index int) {
|
|
||||||
if len(as.transitions) == 0 {
|
|
||||||
as.epsilonOnlyTransitions = trans.getIsEpsilon()
|
|
||||||
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
|
|
||||||
as.epsilonOnlyTransitions = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if index == -1 {
|
|
||||||
as.transitions = append(as.transitions, trans)
|
|
||||||
} else {
|
|
||||||
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
|
|
||||||
// TODO: as.transitions.splice(index, 1, trans)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BasicState struct {
|
|
||||||
*BaseATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBasicState() *BasicState {
|
|
||||||
b := NewBaseATNState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateBasic
|
|
||||||
|
|
||||||
return &BasicState{BaseATNState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
type DecisionState interface {
|
|
||||||
ATNState
|
|
||||||
|
|
||||||
getDecision() int
|
|
||||||
setDecision(int)
|
|
||||||
|
|
||||||
getNonGreedy() bool
|
|
||||||
setNonGreedy(bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseDecisionState struct {
|
|
||||||
*BaseATNState
|
|
||||||
decision int
|
|
||||||
nonGreedy bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseDecisionState() *BaseDecisionState {
|
|
||||||
return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BaseDecisionState) getDecision() int {
|
|
||||||
return s.decision
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BaseDecisionState) setDecision(b int) {
|
|
||||||
s.decision = b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BaseDecisionState) getNonGreedy() bool {
|
|
||||||
return s.nonGreedy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BaseDecisionState) setNonGreedy(b bool) {
|
|
||||||
s.nonGreedy = b
|
|
||||||
}
|
|
||||||
|
|
||||||
type BlockStartState interface {
|
|
||||||
DecisionState
|
|
||||||
|
|
||||||
getEndState() *BlockEndState
|
|
||||||
setEndState(*BlockEndState)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseBlockStartState is the start of a regular (...) block.
|
|
||||||
type BaseBlockStartState struct {
|
|
||||||
*BaseDecisionState
|
|
||||||
endState *BlockEndState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBlockStartState() *BaseBlockStartState {
|
|
||||||
return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BaseBlockStartState) getEndState() *BlockEndState {
|
|
||||||
return s.endState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
|
|
||||||
s.endState = b
|
|
||||||
}
|
|
||||||
|
|
||||||
type BasicBlockStartState struct {
|
|
||||||
*BaseBlockStartState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBasicBlockStartState() *BasicBlockStartState {
|
|
||||||
b := NewBlockStartState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateBlockStart
|
|
||||||
|
|
||||||
return &BasicBlockStartState{BaseBlockStartState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
|
|
||||||
type BlockEndState struct {
|
|
||||||
*BaseATNState
|
|
||||||
startState ATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBlockEndState() *BlockEndState {
|
|
||||||
b := NewBaseATNState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateBlockEnd
|
|
||||||
|
|
||||||
return &BlockEndState{BaseATNState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
|
|
||||||
// start symbol. In that case, there is one transition to EOF. Later, we might
|
|
||||||
// encode references to all calls to this rule to compute FOLLOW sets for error
|
|
||||||
// handling.
|
|
||||||
type RuleStopState struct {
|
|
||||||
*BaseATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRuleStopState() *RuleStopState {
|
|
||||||
b := NewBaseATNState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateRuleStop
|
|
||||||
|
|
||||||
return &RuleStopState{BaseATNState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
type RuleStartState struct {
|
|
||||||
*BaseATNState
|
|
||||||
stopState ATNState
|
|
||||||
isPrecedenceRule bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRuleStartState() *RuleStartState {
|
|
||||||
b := NewBaseATNState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateRuleStart
|
|
||||||
|
|
||||||
return &RuleStartState{BaseATNState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
|
|
||||||
// transitions: one to the loop back to start of the block, and one to exit.
|
|
||||||
type PlusLoopbackState struct {
|
|
||||||
*BaseDecisionState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPlusLoopbackState() *PlusLoopbackState {
|
|
||||||
b := NewBaseDecisionState()
|
|
||||||
|
|
||||||
b.stateType = ATNStatePlusLoopBack
|
|
||||||
|
|
||||||
return &PlusLoopbackState{BaseDecisionState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
|
|
||||||
// decision state; we don't use it for code generation. Somebody might need it,
|
|
||||||
// it is included for completeness. In reality, PlusLoopbackState is the real
|
|
||||||
// decision-making node for A+.
|
|
||||||
type PlusBlockStartState struct {
|
|
||||||
*BaseBlockStartState
|
|
||||||
loopBackState ATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPlusBlockStartState() *PlusBlockStartState {
|
|
||||||
b := NewBlockStartState()
|
|
||||||
|
|
||||||
b.stateType = ATNStatePlusBlockStart
|
|
||||||
|
|
||||||
return &PlusBlockStartState{BaseBlockStartState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StarBlockStartState is the block that begins a closure loop.
|
|
||||||
type StarBlockStartState struct {
|
|
||||||
*BaseBlockStartState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStarBlockStartState() *StarBlockStartState {
|
|
||||||
b := NewBlockStartState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateStarBlockStart
|
|
||||||
|
|
||||||
return &StarBlockStartState{BaseBlockStartState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
type StarLoopbackState struct {
|
|
||||||
*BaseATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStarLoopbackState() *StarLoopbackState {
|
|
||||||
b := NewBaseATNState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateStarLoopBack
|
|
||||||
|
|
||||||
return &StarLoopbackState{BaseATNState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
type StarLoopEntryState struct {
|
|
||||||
*BaseDecisionState
|
|
||||||
loopBackState ATNState
|
|
||||||
precedenceRuleDecision bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStarLoopEntryState() *StarLoopEntryState {
|
|
||||||
b := NewBaseDecisionState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateStarLoopEntry
|
|
||||||
|
|
||||||
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
|
|
||||||
return &StarLoopEntryState{BaseDecisionState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoopEndState marks the end of a * or + loop.
|
|
||||||
type LoopEndState struct {
|
|
||||||
*BaseATNState
|
|
||||||
loopBackState ATNState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLoopEndState() *LoopEndState {
|
|
||||||
b := NewBaseATNState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateLoopEnd
|
|
||||||
|
|
||||||
return &LoopEndState{BaseATNState: b}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
|
|
||||||
type TokensStartState struct {
|
|
||||||
*BaseDecisionState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTokensStartState() *TokensStartState {
|
|
||||||
b := NewBaseDecisionState()
|
|
||||||
|
|
||||||
b.stateType = ATNStateTokenStart
|
|
||||||
|
|
||||||
return &TokensStartState{BaseDecisionState: b}
|
|
||||||
}
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// Represent the type of recognizer an ATN applies to.
|
|
||||||
const (
|
|
||||||
ATNTypeLexer = 0
|
|
||||||
ATNTypeParser = 1
|
|
||||||
)
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
type CharStream interface {
|
|
||||||
IntStream
|
|
||||||
GetText(int, int) string
|
|
||||||
GetTextFromTokens(start, end Token) string
|
|
||||||
GetTextFromInterval(*Interval) string
|
|
||||||
}
|
|
||||||
|
|
@ -1,56 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// TokenFactory creates CommonToken objects.
|
|
||||||
type TokenFactory interface {
|
|
||||||
Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommonTokenFactory is the default TokenFactory implementation.
|
|
||||||
type CommonTokenFactory struct {
|
|
||||||
// copyText indicates whether CommonToken.setText should be called after
|
|
||||||
// constructing tokens to explicitly set the text. This is useful for cases
|
|
||||||
// where the input stream might not be able to provide arbitrary substrings of
|
|
||||||
// text from the input after the lexer creates a token (e.g. the
|
|
||||||
// implementation of CharStream.GetText in UnbufferedCharStream panics an
|
|
||||||
// UnsupportedOperationException). Explicitly setting the token text allows
|
|
||||||
// Token.GetText to be called at any time regardless of the input stream
|
|
||||||
// implementation.
|
|
||||||
//
|
|
||||||
// The default value is false to avoid the performance and memory overhead of
|
|
||||||
// copying text for every token unless explicitly requested.
|
|
||||||
copyText bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
|
|
||||||
return &CommonTokenFactory{copyText: copyText}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
|
|
||||||
// explicitly copy token text when constructing tokens.
|
|
||||||
var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
|
|
||||||
|
|
||||||
func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
|
|
||||||
t := NewCommonToken(source, ttype, channel, start, stop)
|
|
||||||
|
|
||||||
t.line = line
|
|
||||||
t.column = column
|
|
||||||
|
|
||||||
if text != "" {
|
|
||||||
t.SetText(text)
|
|
||||||
} else if c.copyText && source.charStream != nil {
|
|
||||||
t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
|
|
||||||
t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
|
|
||||||
t.SetText(text)
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
@ -1,447 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CommonTokenStream is an implementation of TokenStream that loads tokens from
|
|
||||||
// a TokenSource on-demand and places the tokens in a buffer to provide access
|
|
||||||
// to any previous token by index. This token stream ignores the value of
|
|
||||||
// Token.getChannel. If your parser requires the token stream filter tokens to
|
|
||||||
// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
|
|
||||||
// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
|
|
||||||
type CommonTokenStream struct {
|
|
||||||
channel int
|
|
||||||
|
|
||||||
// fetchedEOF indicates whether the Token.EOF token has been fetched from
|
|
||||||
// tokenSource and added to tokens. This field improves performance for the
|
|
||||||
// following cases:
|
|
||||||
//
|
|
||||||
// consume: The lookahead check in consume to preven consuming the EOF symbol is
|
|
||||||
// optimized by checking the values of fetchedEOF and p instead of calling LA.
|
|
||||||
//
|
|
||||||
// fetch: The check to prevent adding multiple EOF symbols into tokens is
|
|
||||||
// trivial with bt field.
|
|
||||||
fetchedEOF bool
|
|
||||||
|
|
||||||
// index indexs into tokens of the current token (next token to consume).
|
|
||||||
// tokens[p] should be LT(1). It is set to -1 when the stream is first
|
|
||||||
// constructed or when SetTokenSource is called, indicating that the first token
|
|
||||||
// has not yet been fetched from the token source. For additional information,
|
|
||||||
// see the documentation of IntStream for a description of initializing methods.
|
|
||||||
index int
|
|
||||||
|
|
||||||
// tokenSource is the TokenSource from which tokens for the bt stream are
|
|
||||||
// fetched.
|
|
||||||
tokenSource TokenSource
|
|
||||||
|
|
||||||
// tokens is all tokens fetched from the token source. The list is considered a
|
|
||||||
// complete view of the input once fetchedEOF is set to true.
|
|
||||||
tokens []Token
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
|
||||||
return &CommonTokenStream{
|
|
||||||
channel: channel,
|
|
||||||
index: -1,
|
|
||||||
tokenSource: lexer,
|
|
||||||
tokens: make([]Token, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetAllTokens() []Token {
|
|
||||||
return c.tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Mark() int {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Release(marker int) {}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) reset() {
|
|
||||||
c.Seek(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Seek(index int) {
|
|
||||||
c.lazyInit()
|
|
||||||
c.index = c.adjustSeekIndex(index)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Get(index int) Token {
|
|
||||||
c.lazyInit()
|
|
||||||
|
|
||||||
return c.tokens[index]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Consume() {
|
|
||||||
SkipEOFCheck := false
|
|
||||||
|
|
||||||
if c.index >= 0 {
|
|
||||||
if c.fetchedEOF {
|
|
||||||
// The last token in tokens is EOF. Skip the check if p indexes any fetched.
|
|
||||||
// token except the last.
|
|
||||||
SkipEOFCheck = c.index < len(c.tokens)-1
|
|
||||||
} else {
|
|
||||||
// No EOF token in tokens. Skip the check if p indexes a fetched token.
|
|
||||||
SkipEOFCheck = c.index < len(c.tokens)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Not yet initialized
|
|
||||||
SkipEOFCheck = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !SkipEOFCheck && c.LA(1) == TokenEOF {
|
|
||||||
panic("cannot consume EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Sync(c.index + 1) {
|
|
||||||
c.index = c.adjustSeekIndex(c.index + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync makes sure index i in tokens has a token and returns true if a token is
|
|
||||||
// located at index i and otherwise false.
|
|
||||||
func (c *CommonTokenStream) Sync(i int) bool {
|
|
||||||
n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
|
|
||||||
|
|
||||||
if n > 0 {
|
|
||||||
fetched := c.fetch(n)
|
|
||||||
return fetched >= n
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetch adds n elements to buffer and returns the actual number of elements
|
|
||||||
// added to the buffer.
|
|
||||||
func (c *CommonTokenStream) fetch(n int) int {
|
|
||||||
if c.fetchedEOF {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
t := c.tokenSource.NextToken()
|
|
||||||
|
|
||||||
t.SetTokenIndex(len(c.tokens))
|
|
||||||
c.tokens = append(c.tokens, t)
|
|
||||||
|
|
||||||
if t.GetTokenType() == TokenEOF {
|
|
||||||
c.fetchedEOF = true
|
|
||||||
|
|
||||||
return i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTokens gets all tokens from start to stop inclusive.
|
|
||||||
func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
|
|
||||||
if start < 0 || stop < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c.lazyInit()
|
|
||||||
|
|
||||||
subset := make([]Token, 0)
|
|
||||||
|
|
||||||
if stop >= len(c.tokens) {
|
|
||||||
stop = len(c.tokens) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := start; i < stop; i++ {
|
|
||||||
t := c.tokens[i]
|
|
||||||
|
|
||||||
if t.GetTokenType() == TokenEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if types == nil || types.contains(t.GetTokenType()) {
|
|
||||||
subset = append(subset, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return subset
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) LA(i int) int {
|
|
||||||
return c.LT(i).GetTokenType()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) lazyInit() {
|
|
||||||
if c.index == -1 {
|
|
||||||
c.setup()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) setup() {
|
|
||||||
c.Sync(0)
|
|
||||||
c.index = c.adjustSeekIndex(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetTokenSource() TokenSource {
|
|
||||||
return c.tokenSource
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTokenSource resets the c token stream by setting its token source.
|
|
||||||
func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
|
|
||||||
c.tokenSource = tokenSource
|
|
||||||
c.tokens = make([]Token, 0)
|
|
||||||
c.index = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextTokenOnChannel returns the index of the next token on channel given a
|
|
||||||
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
|
|
||||||
// no tokens on channel between i and EOF.
|
|
||||||
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
|
|
||||||
c.Sync(i)
|
|
||||||
|
|
||||||
if i >= len(c.tokens) {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
token := c.tokens[i]
|
|
||||||
|
|
||||||
for token.GetChannel() != c.channel {
|
|
||||||
if token.GetTokenType() == TokenEOF {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
i++
|
|
||||||
c.Sync(i)
|
|
||||||
token = c.tokens[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// previousTokenOnChannel returns the index of the previous token on channel
|
|
||||||
// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
|
|
||||||
// there are no tokens on channel between i and 0.
|
|
||||||
func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
|
|
||||||
for i >= 0 && c.tokens[i].GetChannel() != channel {
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHiddenTokensToRight collects all tokens on a specified channel to the
|
|
||||||
// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
|
|
||||||
// or EOF. If channel is -1, it finds any non-default channel token.
|
|
||||||
func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
|
|
||||||
c.lazyInit()
|
|
||||||
|
|
||||||
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
|
|
||||||
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
|
|
||||||
}
|
|
||||||
|
|
||||||
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
|
|
||||||
from := tokenIndex + 1
|
|
||||||
|
|
||||||
// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
|
|
||||||
var to int
|
|
||||||
|
|
||||||
if nextOnChannel == -1 {
|
|
||||||
to = len(c.tokens) - 1
|
|
||||||
} else {
|
|
||||||
to = nextOnChannel
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.filterForChannel(from, to, channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHiddenTokensToLeft collects all tokens on channel to the left of the
|
|
||||||
// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
|
|
||||||
// -1, it finds any non default channel token.
|
|
||||||
func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
|
|
||||||
c.lazyInit()
|
|
||||||
|
|
||||||
if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
|
|
||||||
panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
|
|
||||||
}
|
|
||||||
|
|
||||||
prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
|
|
||||||
|
|
||||||
if prevOnChannel == tokenIndex-1 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there are none on channel to the left and prevOnChannel == -1 then from = 0
|
|
||||||
from := prevOnChannel + 1
|
|
||||||
to := tokenIndex - 1
|
|
||||||
|
|
||||||
return c.filterForChannel(from, to, channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
|
|
||||||
hidden := make([]Token, 0)
|
|
||||||
|
|
||||||
for i := left; i < right+1; i++ {
|
|
||||||
t := c.tokens[i]
|
|
||||||
|
|
||||||
if channel == -1 {
|
|
||||||
if t.GetChannel() != LexerDefaultTokenChannel {
|
|
||||||
hidden = append(hidden, t)
|
|
||||||
}
|
|
||||||
} else if t.GetChannel() == channel {
|
|
||||||
hidden = append(hidden, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(hidden) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return hidden
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetSourceName() string {
|
|
||||||
return c.tokenSource.GetSourceName()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Size() int {
|
|
||||||
return len(c.tokens)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) Index() int {
|
|
||||||
return c.index
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetAllText() string {
|
|
||||||
return c.GetTextFromInterval(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
|
|
||||||
if start == nil || end == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
|
|
||||||
return c.GetTextFromInterval(interval.GetSourceInterval())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
|
|
||||||
c.lazyInit()
|
|
||||||
c.Fill()
|
|
||||||
|
|
||||||
if interval == nil {
|
|
||||||
interval = NewInterval(0, len(c.tokens)-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
start := interval.Start
|
|
||||||
stop := interval.Stop
|
|
||||||
|
|
||||||
if start < 0 || stop < 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if stop >= len(c.tokens) {
|
|
||||||
stop = len(c.tokens) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
s := ""
|
|
||||||
|
|
||||||
for i := start; i < stop+1; i++ {
|
|
||||||
t := c.tokens[i]
|
|
||||||
|
|
||||||
if t.GetTokenType() == TokenEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s += t.GetText()
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill gets all tokens from the lexer until EOF.
|
|
||||||
func (c *CommonTokenStream) Fill() {
|
|
||||||
c.lazyInit()
|
|
||||||
|
|
||||||
for c.fetch(1000) == 1000 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) adjustSeekIndex(i int) int {
|
|
||||||
return c.NextTokenOnChannel(i, c.channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) LB(k int) Token {
|
|
||||||
if k == 0 || c.index-k < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
i := c.index
|
|
||||||
n := 1
|
|
||||||
|
|
||||||
// Find k good tokens looking backward
|
|
||||||
for n <= k {
|
|
||||||
// Skip off-channel tokens
|
|
||||||
i = c.previousTokenOnChannel(i-1, c.channel)
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.tokens[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonTokenStream) LT(k int) Token {
|
|
||||||
c.lazyInit()
|
|
||||||
|
|
||||||
if k == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if k < 0 {
|
|
||||||
return c.LB(-k)
|
|
||||||
}
|
|
||||||
|
|
||||||
i := c.index
|
|
||||||
n := 1 // We know tokens[n] is valid
|
|
||||||
|
|
||||||
// Find k good tokens
|
|
||||||
for n < k {
|
|
||||||
// Skip off-channel tokens, but make sure to not look past EOF
|
|
||||||
if c.Sync(i + 1) {
|
|
||||||
i = c.NextTokenOnChannel(i+1, c.channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.tokens[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNumberOfOnChannelTokens counts EOF once.
|
|
||||||
func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
|
|
||||||
var n int
|
|
||||||
|
|
||||||
c.Fill()
|
|
||||||
|
|
||||||
for i := 0; i < len(c.tokens); i++ {
|
|
||||||
t := c.tokens[i]
|
|
||||||
|
|
||||||
if t.GetChannel() == c.channel {
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.GetTokenType() == TokenEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
@ -1,171 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DFA struct {
|
|
||||||
// atnStartState is the ATN state in which this was created
|
|
||||||
atnStartState DecisionState
|
|
||||||
|
|
||||||
decision int
|
|
||||||
|
|
||||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
|
||||||
// indicate whether it is there.
|
|
||||||
states map[int]*DFAState
|
|
||||||
statesMu sync.RWMutex
|
|
||||||
|
|
||||||
s0 *DFAState
|
|
||||||
s0Mu sync.RWMutex
|
|
||||||
|
|
||||||
// precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
|
|
||||||
// True if the DFA is for a precedence decision and false otherwise.
|
|
||||||
precedenceDfa bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
|
||||||
return &DFA{
|
|
||||||
atnStartState: atnStartState,
|
|
||||||
decision: decision,
|
|
||||||
states: make(map[int]*DFAState),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPrecedenceStartState gets the start state for the current precedence and
|
|
||||||
// returns the start state corresponding to the specified precedence if a start
|
|
||||||
// state exists for the specified precedence and nil otherwise. d must be a
|
|
||||||
// precedence DFA. See also isPrecedenceDfa.
|
|
||||||
func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
|
|
||||||
if !d.precedenceDfa {
|
|
||||||
panic("only precedence DFAs may contain a precedence start state")
|
|
||||||
}
|
|
||||||
|
|
||||||
d.s0Mu.RLock()
|
|
||||||
defer d.s0Mu.RUnlock()
|
|
||||||
|
|
||||||
// s0.edges is never nil for a precedence DFA
|
|
||||||
if precedence < 0 || precedence >= len(d.s0.edges) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.s0.edges[precedence]
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPrecedenceStartState sets the start state for the current precedence. d
|
|
||||||
// must be a precedence DFA. See also isPrecedenceDfa.
|
|
||||||
func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
|
|
||||||
if !d.precedenceDfa {
|
|
||||||
panic("only precedence DFAs may contain a precedence start state")
|
|
||||||
}
|
|
||||||
|
|
||||||
if precedence < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
d.s0Mu.Lock()
|
|
||||||
defer d.s0Mu.Unlock()
|
|
||||||
|
|
||||||
// Synchronization on s0 here is ok. When the DFA is turned into a
|
|
||||||
// precedence DFA, s0 will be initialized once and not updated again. s0.edges
|
|
||||||
// is never nil for a precedence DFA.
|
|
||||||
if precedence >= len(d.s0.edges) {
|
|
||||||
d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.s0.edges[precedence] = startState
|
|
||||||
}
|
|
||||||
|
|
||||||
// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
|
|
||||||
// from the current DFA configuration, then d.states is cleared, the initial
|
|
||||||
// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
|
|
||||||
// store the start states for individual precedence values if precedenceDfa is
|
|
||||||
// true or nil otherwise, and d.precedenceDfa is updated.
|
|
||||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
|
||||||
if d.precedenceDfa != precedenceDfa {
|
|
||||||
d.states = make(map[int]*DFAState)
|
|
||||||
|
|
||||||
if precedenceDfa {
|
|
||||||
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
|
|
||||||
|
|
||||||
precedenceState.edges = make([]*DFAState, 0)
|
|
||||||
precedenceState.isAcceptState = false
|
|
||||||
precedenceState.requiresFullContext = false
|
|
||||||
d.s0 = precedenceState
|
|
||||||
} else {
|
|
||||||
d.s0 = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
d.precedenceDfa = precedenceDfa
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) getS0() *DFAState {
|
|
||||||
d.s0Mu.RLock()
|
|
||||||
defer d.s0Mu.RUnlock()
|
|
||||||
return d.s0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) setS0(s *DFAState) {
|
|
||||||
d.s0Mu.Lock()
|
|
||||||
defer d.s0Mu.Unlock()
|
|
||||||
d.s0 = s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) getState(hash int) (*DFAState, bool) {
|
|
||||||
d.statesMu.RLock()
|
|
||||||
defer d.statesMu.RUnlock()
|
|
||||||
s, ok := d.states[hash]
|
|
||||||
return s, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) setState(hash int, state *DFAState) {
|
|
||||||
d.statesMu.Lock()
|
|
||||||
defer d.statesMu.Unlock()
|
|
||||||
d.states[hash] = state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) numStates() int {
|
|
||||||
d.statesMu.RLock()
|
|
||||||
defer d.statesMu.RUnlock()
|
|
||||||
return len(d.states)
|
|
||||||
}
|
|
||||||
|
|
||||||
type dfaStateList []*DFAState
|
|
||||||
|
|
||||||
func (d dfaStateList) Len() int { return len(d) }
|
|
||||||
func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
|
|
||||||
func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
|
||||||
|
|
||||||
// sortedStates returns the states in d sorted by their state number.
|
|
||||||
func (d *DFA) sortedStates() []*DFAState {
|
|
||||||
vs := make([]*DFAState, 0, len(d.states))
|
|
||||||
|
|
||||||
for _, v := range d.states {
|
|
||||||
vs = append(vs, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(dfaStateList(vs))
|
|
||||||
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) String(literalNames []string, symbolicNames []string) string {
|
|
||||||
if d.s0 == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewDFASerializer(d, literalNames, symbolicNames).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFA) ToLexerString() string {
|
|
||||||
if d.s0 == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewLexerDFASerializer(d).String()
|
|
||||||
}
|
|
||||||
|
|
@ -1,152 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DFASerializer is a DFA walker that knows how to dump them to serialized
|
|
||||||
// strings.
|
|
||||||
type DFASerializer struct {
|
|
||||||
dfa *DFA
|
|
||||||
literalNames []string
|
|
||||||
symbolicNames []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
|
|
||||||
if literalNames == nil {
|
|
||||||
literalNames = make([]string, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if symbolicNames == nil {
|
|
||||||
symbolicNames = make([]string, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &DFASerializer{
|
|
||||||
dfa: dfa,
|
|
||||||
literalNames: literalNames,
|
|
||||||
symbolicNames: symbolicNames,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFASerializer) String() string {
|
|
||||||
if d.dfa.s0 == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := ""
|
|
||||||
states := d.dfa.sortedStates()
|
|
||||||
|
|
||||||
for _, s := range states {
|
|
||||||
if s.edges != nil {
|
|
||||||
n := len(s.edges)
|
|
||||||
|
|
||||||
for j := 0; j < n; j++ {
|
|
||||||
t := s.edges[j]
|
|
||||||
|
|
||||||
if t != nil && t.stateNumber != 0x7FFFFFFF {
|
|
||||||
buf += d.GetStateString(s)
|
|
||||||
buf += "-"
|
|
||||||
buf += d.getEdgeLabel(j)
|
|
||||||
buf += "->"
|
|
||||||
buf += d.GetStateString(t)
|
|
||||||
buf += "\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buf) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFASerializer) getEdgeLabel(i int) string {
|
|
||||||
if i == 0 {
|
|
||||||
return "EOF"
|
|
||||||
} else if d.literalNames != nil && i-1 < len(d.literalNames) {
|
|
||||||
return d.literalNames[i-1]
|
|
||||||
} else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
|
|
||||||
return d.symbolicNames[i-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.Itoa(i - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFASerializer) GetStateString(s *DFAState) string {
|
|
||||||
var a, b string
|
|
||||||
|
|
||||||
if s.isAcceptState {
|
|
||||||
a = ":"
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.requiresFullContext {
|
|
||||||
b = "^"
|
|
||||||
}
|
|
||||||
|
|
||||||
baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
|
|
||||||
|
|
||||||
if s.isAcceptState {
|
|
||||||
if s.predicates != nil {
|
|
||||||
return baseStateStr + "=>" + fmt.Sprint(s.predicates)
|
|
||||||
}
|
|
||||||
|
|
||||||
return baseStateStr + "=>" + fmt.Sprint(s.prediction)
|
|
||||||
}
|
|
||||||
|
|
||||||
return baseStateStr
|
|
||||||
}
|
|
||||||
|
|
||||||
type LexerDFASerializer struct {
|
|
||||||
*DFASerializer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
|
|
||||||
return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerDFASerializer) getEdgeLabel(i int) string {
|
|
||||||
return "'" + string(i) + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerDFASerializer) String() string {
|
|
||||||
if l.dfa.s0 == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := ""
|
|
||||||
states := l.dfa.sortedStates()
|
|
||||||
|
|
||||||
for i := 0; i < len(states); i++ {
|
|
||||||
s := states[i]
|
|
||||||
|
|
||||||
if s.edges != nil {
|
|
||||||
n := len(s.edges)
|
|
||||||
|
|
||||||
for j := 0; j < n; j++ {
|
|
||||||
t := s.edges[j]
|
|
||||||
|
|
||||||
if t != nil && t.stateNumber != 0x7FFFFFFF {
|
|
||||||
buf += l.GetStateString(s)
|
|
||||||
buf += "-"
|
|
||||||
buf += l.getEdgeLabel(j)
|
|
||||||
buf += "->"
|
|
||||||
buf += l.GetStateString(t)
|
|
||||||
buf += "\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buf) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf
|
|
||||||
}
|
|
||||||
|
|
@ -1,166 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PredPrediction maps a predicate to a predicted alternative.
|
|
||||||
type PredPrediction struct {
|
|
||||||
alt int
|
|
||||||
pred SemanticContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
|
|
||||||
return &PredPrediction{alt: alt, pred: pred}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PredPrediction) String() string {
|
|
||||||
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
|
|
||||||
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
|
|
||||||
// states the ATN can be in after reading each input symbol. That is to say,
|
|
||||||
// after reading input a1a2..an, the DFA is in a state that represents the
|
|
||||||
// subset T of the states of the ATN that are reachable from the ATN's start
|
|
||||||
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
|
|
||||||
// conversion, therefore, the subset T would be a bitset representing the set of
|
|
||||||
// states the ATN could be in. We need to track the alt predicted by each state
|
|
||||||
// as well, however. More importantly, we need to maintain a stack of states,
|
|
||||||
// tracking the closure operations as they jump from rule to rule, emulating
|
|
||||||
// rule invocations (method calls). I have to add a stack to simulate the proper
|
|
||||||
// lookahead sequences for the underlying LL grammar from which the ATN was
|
|
||||||
// derived.
|
|
||||||
//
|
|
||||||
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
|
|
||||||
// state (ala normal conversion) and a RuleContext describing the chain of rules
|
|
||||||
// (if any) followed to arrive at that state.
|
|
||||||
//
|
|
||||||
// A DFAState may have multiple references to a particular state, but with
|
|
||||||
// different ATN contexts (with same or different alts) meaning that state was
|
|
||||||
// reached via a different set of rule invocations.
|
|
||||||
type DFAState struct {
|
|
||||||
stateNumber int
|
|
||||||
configs ATNConfigSet
|
|
||||||
|
|
||||||
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
|
|
||||||
// Token.EOF maps to the first element.
|
|
||||||
edges []*DFAState
|
|
||||||
|
|
||||||
isAcceptState bool
|
|
||||||
|
|
||||||
// prediction is the ttype we match or alt we predict if the state is accept.
|
|
||||||
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
|
|
||||||
// requiresFullContext.
|
|
||||||
prediction int
|
|
||||||
|
|
||||||
lexerActionExecutor *LexerActionExecutor
|
|
||||||
|
|
||||||
// requiresFullContext indicates it was created during an SLL prediction that
|
|
||||||
// discovered a conflict between the configurations in the state. Future
|
|
||||||
// ParserATNSimulator.execATN invocations immediately jump doing
|
|
||||||
// full context prediction if true.
|
|
||||||
requiresFullContext bool
|
|
||||||
|
|
||||||
// predicates is the predicates associated with the ATN configurations of the
|
|
||||||
// DFA state during SLL parsing. When we have predicates, requiresFullContext
|
|
||||||
// is false, since full context prediction evaluates predicates on-the-fly. If
|
|
||||||
// d is
|
|
||||||
// not nil, then prediction is ATN.INVALID_ALT_NUMBER.
|
|
||||||
//
|
|
||||||
// We only use these for non-requiresFullContext but conflicting states. That
|
|
||||||
// means we know from the context (it's $ or we don't dip into outer context)
|
|
||||||
// that it's an ambiguity not a conflict.
|
|
||||||
//
|
|
||||||
// This list is computed by
|
|
||||||
// ParserATNSimulator.predicateDFAState.
|
|
||||||
predicates []*PredPrediction
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
|
|
||||||
if configs == nil {
|
|
||||||
configs = NewBaseATNConfigSet(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &DFAState{configs: configs, stateNumber: stateNumber}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
|
|
||||||
func (d *DFAState) GetAltSet() *Set {
|
|
||||||
alts := NewSet(nil, nil)
|
|
||||||
|
|
||||||
if d.configs != nil {
|
|
||||||
for _, c := range d.configs.GetItems() {
|
|
||||||
alts.add(c.GetAlt())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if alts.length() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return alts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFAState) setPrediction(v int) {
|
|
||||||
d.prediction = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// equals returns whether d equals other. Two DFAStates are equal if their ATN
|
|
||||||
// configuration sets are the same. This method is used to see if a state
|
|
||||||
// already exists.
|
|
||||||
//
|
|
||||||
// Because the number of alternatives and number of ATN configurations are
|
|
||||||
// finite, there is a finite number of DFA states that can be processed. This is
|
|
||||||
// necessary to show that the algorithm terminates.
|
|
||||||
//
|
|
||||||
// Cannot test the DFA state numbers here because in
|
|
||||||
// ParserATNSimulator.addDFAState we need to know if any other state exists that
|
|
||||||
// has d exact set of ATN configurations. The stateNumber is irrelevant.
|
|
||||||
func (d *DFAState) equals(other interface{}) bool {
|
|
||||||
if d == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*DFAState); !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.configs.Equals(other.(*DFAState).configs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFAState) String() string {
|
|
||||||
var s string
|
|
||||||
if d.isAcceptState {
|
|
||||||
if d.predicates != nil {
|
|
||||||
s = "=>" + fmt.Sprint(d.predicates)
|
|
||||||
} else {
|
|
||||||
s = "=>" + fmt.Sprint(d.prediction)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DFAState) hash() int {
|
|
||||||
h := murmurInit(11)
|
|
||||||
|
|
||||||
c := 1
|
|
||||||
if d.isAcceptState {
|
|
||||||
if d.predicates != nil {
|
|
||||||
for _, p := range d.predicates {
|
|
||||||
h = murmurUpdate(h, p.alt)
|
|
||||||
h = murmurUpdate(h, p.pred.hash())
|
|
||||||
c += 2
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
h = murmurUpdate(h, d.prediction)
|
|
||||||
c += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h = murmurUpdate(h, d.configs.hash())
|
|
||||||
return murmurFinish(h, c)
|
|
||||||
}
|
|
||||||
|
|
@ -1,111 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
//
|
|
||||||
// This implementation of {@link ANTLRErrorListener} can be used to identify
|
|
||||||
// certain potential correctness and performance problems in grammars. "reports"
|
|
||||||
// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
|
|
||||||
// message.
|
|
||||||
//
|
|
||||||
// <ul>
|
|
||||||
// <li><b>Ambiguities</b>: These are cases where more than one path through the
|
|
||||||
// grammar can Match the input.</li>
|
|
||||||
// <li><b>Weak context sensitivity</b>: These are cases where full-context
|
|
||||||
// prediction resolved an SLL conflict to a unique alternative which equaled the
|
|
||||||
// minimum alternative of the SLL conflict.</li>
|
|
||||||
// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
|
|
||||||
// full-context prediction resolved an SLL conflict to a unique alternative,
|
|
||||||
// <em>and</em> the minimum alternative of the SLL conflict was found to not be
|
|
||||||
// a truly viable alternative. Two-stage parsing cannot be used for inputs where
|
|
||||||
// d situation occurs.</li>
|
|
||||||
// </ul>
|
|
||||||
|
|
||||||
type DiagnosticErrorListener struct {
|
|
||||||
*DefaultErrorListener
|
|
||||||
|
|
||||||
exactOnly bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
|
||||||
|
|
||||||
n := new(DiagnosticErrorListener)
|
|
||||||
|
|
||||||
// whether all ambiguities or only exact ambiguities are Reported.
|
|
||||||
n.exactOnly = exactOnly
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
|
||||||
if d.exactOnly && !exact {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
msg := "reportAmbiguity d=" +
|
|
||||||
d.getDecisionDescription(recognizer, dfa) +
|
|
||||||
": ambigAlts=" +
|
|
||||||
d.getConflictingAlts(ambigAlts, configs).String() +
|
|
||||||
", input='" +
|
|
||||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
|
||||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
|
||||||
|
|
||||||
msg := "reportAttemptingFullContext d=" +
|
|
||||||
d.getDecisionDescription(recognizer, dfa) +
|
|
||||||
", input='" +
|
|
||||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
|
||||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
|
||||||
msg := "reportContextSensitivity d=" +
|
|
||||||
d.getDecisionDescription(recognizer, dfa) +
|
|
||||||
", input='" +
|
|
||||||
recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
|
|
||||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
|
|
||||||
decision := dfa.decision
|
|
||||||
ruleIndex := dfa.atnStartState.GetRuleIndex()
|
|
||||||
|
|
||||||
ruleNames := recognizer.GetRuleNames()
|
|
||||||
if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
|
|
||||||
return strconv.Itoa(decision)
|
|
||||||
}
|
|
||||||
ruleName := ruleNames[ruleIndex]
|
|
||||||
if ruleName == "" {
|
|
||||||
return strconv.Itoa(decision)
|
|
||||||
}
|
|
||||||
return strconv.Itoa(decision) + " (" + ruleName + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Computes the set of conflicting or ambiguous alternatives from a
|
|
||||||
// configuration set, if that information was not already provided by the
|
|
||||||
// parser.
|
|
||||||
//
|
|
||||||
// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
|
|
||||||
// Reported by the parser.
|
|
||||||
// @param configs The conflicting or ambiguous configuration set.
|
|
||||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
|
|
||||||
// returns the set of alternatives represented in {@code configs}.
|
|
||||||
//
|
|
||||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
|
|
||||||
if ReportedAlts != nil {
|
|
||||||
return ReportedAlts
|
|
||||||
}
|
|
||||||
result := NewBitSet()
|
|
||||||
for _, c := range set.GetItems() {
|
|
||||||
result.add(c.GetAlt())
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
@ -1,108 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Provides an empty default implementation of {@link ANTLRErrorListener}. The
|
|
||||||
// default implementation of each method does nothing, but can be overridden as
|
|
||||||
// necessary.
|
|
||||||
|
|
||||||
type ErrorListener interface {
|
|
||||||
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
|
|
||||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
|
|
||||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
|
|
||||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
type DefaultErrorListener struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDefaultErrorListener() *DefaultErrorListener {
|
|
||||||
return new(DefaultErrorListener)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConsoleErrorListener struct {
|
|
||||||
*DefaultErrorListener
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewConsoleErrorListener() *ConsoleErrorListener {
|
|
||||||
return new(ConsoleErrorListener)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Provides a default instance of {@link ConsoleErrorListener}.
|
|
||||||
//
|
|
||||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
|
||||||
|
|
||||||
//
|
|
||||||
// {@inheritDoc}
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This implementation prints messages to {@link System//err} containing the
|
|
||||||
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
|
|
||||||
// the following format.</p>
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
|
||||||
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProxyErrorListener struct {
|
|
||||||
*DefaultErrorListener
|
|
||||||
delegates []ErrorListener
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
|
|
||||||
if delegates == nil {
|
|
||||||
panic("delegates is not provided")
|
|
||||||
}
|
|
||||||
l := new(ProxyErrorListener)
|
|
||||||
l.delegates = delegates
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
|
||||||
for _, d := range p.delegates {
|
|
||||||
d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
|
||||||
for _, d := range p.delegates {
|
|
||||||
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
|
||||||
for _, d := range p.delegates {
|
|
||||||
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
|
||||||
for _, d := range p.delegates {
|
|
||||||
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,758 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ErrorStrategy interface {
|
|
||||||
reset(Parser)
|
|
||||||
RecoverInline(Parser) Token
|
|
||||||
Recover(Parser, RecognitionException)
|
|
||||||
Sync(Parser)
|
|
||||||
inErrorRecoveryMode(Parser) bool
|
|
||||||
ReportError(Parser, RecognitionException)
|
|
||||||
ReportMatch(Parser)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the default implementation of {@link ANTLRErrorStrategy} used for
|
|
||||||
// error Reporting and recovery in ANTLR parsers.
|
|
||||||
//
|
|
||||||
type DefaultErrorStrategy struct {
|
|
||||||
errorRecoveryMode bool
|
|
||||||
lastErrorIndex int
|
|
||||||
lastErrorStates *IntervalSet
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ErrorStrategy = &DefaultErrorStrategy{}
|
|
||||||
|
|
||||||
func NewDefaultErrorStrategy() *DefaultErrorStrategy {
|
|
||||||
|
|
||||||
d := new(DefaultErrorStrategy)
|
|
||||||
|
|
||||||
// Indicates whether the error strategy is currently "recovering from an
|
|
||||||
// error". This is used to suppress Reporting multiple error messages while
|
|
||||||
// attempting to recover from a detected syntax error.
|
|
||||||
//
|
|
||||||
// @see //inErrorRecoveryMode
|
|
||||||
//
|
|
||||||
d.errorRecoveryMode = false
|
|
||||||
|
|
||||||
// The index into the input stream where the last error occurred.
|
|
||||||
// This is used to prevent infinite loops where an error is found
|
|
||||||
// but no token is consumed during recovery...another error is found,
|
|
||||||
// ad nauseum. This is a failsafe mechanism to guarantee that at least
|
|
||||||
// one token/tree node is consumed for two errors.
|
|
||||||
//
|
|
||||||
d.lastErrorIndex = -1
|
|
||||||
d.lastErrorStates = nil
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>The default implementation simply calls {@link //endErrorCondition} to
|
|
||||||
// ensure that the handler is not in error recovery mode.</p>
|
|
||||||
func (d *DefaultErrorStrategy) reset(recognizer Parser) {
|
|
||||||
d.endErrorCondition(recognizer)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This method is called to enter error recovery mode when a recognition
|
|
||||||
// exception is Reported.
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
|
|
||||||
d.errorRecoveryMode = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
|
|
||||||
return d.errorRecoveryMode
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This method is called to leave error recovery mode after recovering from
|
|
||||||
// a recognition exception.
|
|
||||||
//
|
|
||||||
// @param recognizer
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
|
|
||||||
d.errorRecoveryMode = false
|
|
||||||
d.lastErrorStates = nil
|
|
||||||
d.lastErrorIndex = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// {@inheritDoc}
|
|
||||||
//
|
|
||||||
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
|
||||||
d.endErrorCondition(recognizer)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// {@inheritDoc}
|
|
||||||
//
|
|
||||||
// <p>The default implementation returns immediately if the handler is already
|
|
||||||
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
|
|
||||||
// and dispatches the Reporting task based on the runtime type of {@code e}
|
|
||||||
// according to the following table.</p>
|
|
||||||
//
|
|
||||||
// <ul>
|
|
||||||
// <li>{@link NoViableAltException}: Dispatches the call to
|
|
||||||
// {@link //ReportNoViableAlternative}</li>
|
|
||||||
// <li>{@link InputMisMatchException}: Dispatches the call to
|
|
||||||
// {@link //ReportInputMisMatch}</li>
|
|
||||||
// <li>{@link FailedPredicateException}: Dispatches the call to
|
|
||||||
// {@link //ReportFailedPredicate}</li>
|
|
||||||
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
|
|
||||||
// the exception</li>
|
|
||||||
// </ul>
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
|
||||||
// if we've already Reported an error and have not Matched a token
|
|
||||||
// yet successfully, don't Report any errors.
|
|
||||||
if d.inErrorRecoveryMode(recognizer) {
|
|
||||||
return // don't Report spurious errors
|
|
||||||
}
|
|
||||||
d.beginErrorCondition(recognizer)
|
|
||||||
|
|
||||||
switch t := e.(type) {
|
|
||||||
default:
|
|
||||||
fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
|
|
||||||
// fmt.Println(e.stack)
|
|
||||||
recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
|
|
||||||
case *NoViableAltException:
|
|
||||||
d.ReportNoViableAlternative(recognizer, t)
|
|
||||||
case *InputMisMatchException:
|
|
||||||
d.ReportInputMisMatch(recognizer, t)
|
|
||||||
case *FailedPredicateException:
|
|
||||||
d.ReportFailedPredicate(recognizer, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// {@inheritDoc}
|
|
||||||
//
|
|
||||||
// <p>The default implementation reSynchronizes the parser by consuming tokens
|
|
||||||
// until we find one in the reSynchronization set--loosely the set of tokens
|
|
||||||
// that can follow the current rule.</p>
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
|
||||||
|
|
||||||
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
|
|
||||||
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
|
|
||||||
// uh oh, another error at same token index and previously-Visited
|
|
||||||
// state in ATN must be a case where LT(1) is in the recovery
|
|
||||||
// token set so nothing got consumed. Consume a single token
|
|
||||||
// at least to prevent an infinite loop d is a failsafe.
|
|
||||||
recognizer.Consume()
|
|
||||||
}
|
|
||||||
d.lastErrorIndex = recognizer.GetInputStream().Index()
|
|
||||||
if d.lastErrorStates == nil {
|
|
||||||
d.lastErrorStates = NewIntervalSet()
|
|
||||||
}
|
|
||||||
d.lastErrorStates.addOne(recognizer.GetState())
|
|
||||||
followSet := d.getErrorRecoverySet(recognizer)
|
|
||||||
d.consumeUntil(recognizer, followSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
|
|
||||||
// that the current lookahead symbol is consistent with what were expecting
|
|
||||||
// at d point in the ATN. You can call d anytime but ANTLR only
|
|
||||||
// generates code to check before subrules/loops and each iteration.
|
|
||||||
//
|
|
||||||
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
|
|
||||||
// subrules. E.g.,</p>
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// a : Sync ( stuff Sync )*
|
|
||||||
// Sync : {consume to what can follow Sync}
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
// At the start of a sub rule upon error, {@link //Sync} performs single
|
|
||||||
// token deletion, if possible. If it can't do that, it bails on the current
|
|
||||||
// rule and uses the default error recovery, which consumes until the
|
|
||||||
// reSynchronization set of the current rule.
|
|
||||||
//
|
|
||||||
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
|
|
||||||
// with an empty alternative), then the expected set includes what follows
|
|
||||||
// the subrule.</p>
|
|
||||||
//
|
|
||||||
// <p>During loop iteration, it consumes until it sees a token that can start a
|
|
||||||
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
|
||||||
// stay in the loop as long as possible.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>ORIGINS</strong></p>
|
|
||||||
//
|
|
||||||
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
|
|
||||||
// A single mismatch token or missing token would force the parser to bail
|
|
||||||
// out of the entire rules surrounding the loop. So, for rule</p>
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// classfunc : 'class' ID '{' member* '}'
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
// input with an extra token between members would force the parser to
|
|
||||||
// consume until it found the next class definition rather than the next
|
|
||||||
// member definition of the current class.
|
|
||||||
//
|
|
||||||
// <p>This functionality cost a little bit of effort because the parser has to
|
|
||||||
// compare token set at the start of the loop and at each iteration. If for
|
|
||||||
// some reason speed is suffering for you, you can turn off d
|
|
||||||
// functionality by simply overriding d method as a blank { }.</p>
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
|
||||||
// If already recovering, don't try to Sync
|
|
||||||
if d.inErrorRecoveryMode(recognizer) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
|
|
||||||
la := recognizer.GetTokenStream().LA(1)
|
|
||||||
|
|
||||||
// try cheaper subset first might get lucky. seems to shave a wee bit off
|
|
||||||
nextTokens := recognizer.GetATN().NextTokens(s, nil)
|
|
||||||
if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch s.GetStateType() {
|
|
||||||
case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
|
|
||||||
// Report error and recover if possible
|
|
||||||
if d.SingleTokenDeletion(recognizer) != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(NewInputMisMatchException(recognizer))
|
|
||||||
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
|
|
||||||
d.ReportUnwantedToken(recognizer)
|
|
||||||
expecting := NewIntervalSet()
|
|
||||||
expecting.addSet(recognizer.GetExpectedTokens())
|
|
||||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
|
|
||||||
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
|
|
||||||
default:
|
|
||||||
// do nothing if we can't identify the exact kind of ATN state
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is called by {@link //ReportError} when the exception is a
|
|
||||||
// {@link NoViableAltException}.
|
|
||||||
//
|
|
||||||
// @see //ReportError
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
// @param e the recognition exception
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
|
|
||||||
tokens := recognizer.GetTokenStream()
|
|
||||||
var input string
|
|
||||||
if tokens != nil {
|
|
||||||
if e.startToken.GetTokenType() == TokenEOF {
|
|
||||||
input = "<EOF>"
|
|
||||||
} else {
|
|
||||||
input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
input = "<unknown input>"
|
|
||||||
}
|
|
||||||
msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
|
|
||||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This is called by {@link //ReportError} when the exception is an
|
|
||||||
// {@link InputMisMatchException}.
|
|
||||||
//
|
|
||||||
// @see //ReportError
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
// @param e the recognition exception
|
|
||||||
//
|
|
||||||
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
|
||||||
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
|
|
||||||
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
|
||||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This is called by {@link //ReportError} when the exception is a
|
|
||||||
// {@link FailedPredicateException}.
|
|
||||||
//
|
|
||||||
// @see //ReportError
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
// @param e the recognition exception
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
|
|
||||||
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
|
|
||||||
msg := "rule " + ruleName + " " + e.message
|
|
||||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method is called to Report a syntax error which requires the removal
|
|
||||||
// of a token from the input stream. At the time d method is called, the
|
|
||||||
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
|
|
||||||
// removed from the input stream. When d method returns,
|
|
||||||
// {@code recognizer} is in error recovery mode.
|
|
||||||
//
|
|
||||||
// <p>This method is called when {@link //singleTokenDeletion} identifies
|
|
||||||
// single-token deletion as a viable recovery strategy for a mismatched
|
|
||||||
// input error.</p>
|
|
||||||
//
|
|
||||||
// <p>The default implementation simply returns if the handler is already in
|
|
||||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
|
||||||
// enter error recovery mode, followed by calling
|
|
||||||
// {@link Parser//NotifyErrorListeners}.</p>
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
|
||||||
if d.inErrorRecoveryMode(recognizer) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.beginErrorCondition(recognizer)
|
|
||||||
t := recognizer.GetCurrentToken()
|
|
||||||
tokenName := d.GetTokenErrorDisplay(t)
|
|
||||||
expecting := d.GetExpectedTokens(recognizer)
|
|
||||||
msg := "extraneous input " + tokenName + " expecting " +
|
|
||||||
expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
|
||||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method is called to Report a syntax error which requires the
|
|
||||||
// insertion of a missing token into the input stream. At the time d
|
|
||||||
// method is called, the missing token has not yet been inserted. When d
|
|
||||||
// method returns, {@code recognizer} is in error recovery mode.
|
|
||||||
//
|
|
||||||
// <p>This method is called when {@link //singleTokenInsertion} identifies
|
|
||||||
// single-token insertion as a viable recovery strategy for a mismatched
|
|
||||||
// input error.</p>
|
|
||||||
//
|
|
||||||
// <p>The default implementation simply returns if the handler is already in
|
|
||||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
|
||||||
// enter error recovery mode, followed by calling
|
|
||||||
// {@link Parser//NotifyErrorListeners}.</p>
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
|
||||||
if d.inErrorRecoveryMode(recognizer) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.beginErrorCondition(recognizer)
|
|
||||||
t := recognizer.GetCurrentToken()
|
|
||||||
expecting := d.GetExpectedTokens(recognizer)
|
|
||||||
msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
|
|
||||||
" at " + d.GetTokenErrorDisplay(t)
|
|
||||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>The default implementation attempts to recover from the mismatched input
|
|
||||||
// by using single token insertion and deletion as described below. If the
|
|
||||||
// recovery attempt fails, d method panics an
|
|
||||||
// {@link InputMisMatchException}.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
|
|
||||||
//
|
|
||||||
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
|
|
||||||
// right token, however, then assume {@code LA(1)} is some extra spurious
|
|
||||||
// token and delete it. Then consume and return the next token (which was
|
|
||||||
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
|
|
||||||
//
|
|
||||||
// <p>This recovery strategy is implemented by {@link
|
|
||||||
// //singleTokenDeletion}.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
|
|
||||||
//
|
|
||||||
// <p>If current token (at {@code LA(1)}) is consistent with what could come
|
|
||||||
// after the expected {@code LA(1)} token, then assume the token is missing
|
|
||||||
// and use the parser's {@link TokenFactory} to create it on the fly. The
|
|
||||||
// "insertion" is performed by returning the created token as the successful
|
|
||||||
// result of the Match operation.</p>
|
|
||||||
//
|
|
||||||
// <p>This recovery strategy is implemented by {@link
|
|
||||||
// //singleTokenInsertion}.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>EXAMPLE</strong></p>
|
|
||||||
//
|
|
||||||
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
|
|
||||||
// the parser returns from the nested call to {@code expr}, it will have
|
|
||||||
// call chain:</p>
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// stat &rarr expr &rarr atom
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
// and it will be trying to Match the {@code ')'} at d point in the
|
|
||||||
// derivation:
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// => ID '=' '(' INT ')' ('+' atom)* ''
|
|
||||||
// ^
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
|
|
||||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
|
|
||||||
// is in the set of tokens that can follow the {@code ')'} token reference
|
|
||||||
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
|
||||||
// SINGLE TOKEN DELETION
|
|
||||||
MatchedSymbol := d.SingleTokenDeletion(recognizer)
|
|
||||||
if MatchedSymbol != nil {
|
|
||||||
// we have deleted the extra token.
|
|
||||||
// now, move past ttype token as if all were ok
|
|
||||||
recognizer.Consume()
|
|
||||||
return MatchedSymbol
|
|
||||||
}
|
|
||||||
// SINGLE TOKEN INSERTION
|
|
||||||
if d.SingleTokenInsertion(recognizer) {
|
|
||||||
return d.GetMissingSymbol(recognizer)
|
|
||||||
}
|
|
||||||
// even that didn't work must panic the exception
|
|
||||||
panic(NewInputMisMatchException(recognizer))
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This method implements the single-token insertion inline error recovery
|
|
||||||
// strategy. It is called by {@link //recoverInline} if the single-token
|
|
||||||
// deletion strategy fails to recover from the mismatched input. If this
|
|
||||||
// method returns {@code true}, {@code recognizer} will be in error recovery
|
|
||||||
// mode.
|
|
||||||
//
|
|
||||||
// <p>This method determines whether or not single-token insertion is viable by
|
|
||||||
// checking if the {@code LA(1)} input symbol could be successfully Matched
|
|
||||||
// if it were instead the {@code LA(2)} symbol. If d method returns
|
|
||||||
// {@code true}, the caller is responsible for creating and inserting a
|
|
||||||
// token with the correct type to produce d behavior.</p>
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
// @return {@code true} if single-token insertion is a viable recovery
|
|
||||||
// strategy for the current mismatched input, otherwise {@code false}
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
|
||||||
currentSymbolType := recognizer.GetTokenStream().LA(1)
|
|
||||||
// if current token is consistent with what could come after current
|
|
||||||
// ATN state, then we know we're missing a token error recovery
|
|
||||||
// is free to conjure up and insert the missing token
|
|
||||||
atn := recognizer.GetInterpreter().atn
|
|
||||||
currentState := atn.states[recognizer.GetState()]
|
|
||||||
next := currentState.GetTransitions()[0].getTarget()
|
|
||||||
expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
|
|
||||||
if expectingAtLL2.contains(currentSymbolType) {
|
|
||||||
d.ReportMissingToken(recognizer)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method implements the single-token deletion inline error recovery
|
|
||||||
// strategy. It is called by {@link //recoverInline} to attempt to recover
|
|
||||||
// from mismatched input. If this method returns nil, the parser and error
|
|
||||||
// handler state will not have changed. If this method returns non-nil,
|
|
||||||
// {@code recognizer} will <em>not</em> be in error recovery mode since the
|
|
||||||
// returned token was a successful Match.
|
|
||||||
//
|
|
||||||
// <p>If the single-token deletion is successful, d method calls
|
|
||||||
// {@link //ReportUnwantedToken} to Report the error, followed by
|
|
||||||
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
|
|
||||||
// before returning {@link //ReportMatch} is called to signal a successful
|
|
||||||
// Match.</p>
|
|
||||||
//
|
|
||||||
// @param recognizer the parser instance
|
|
||||||
// @return the successfully Matched {@link Token} instance if single-token
|
|
||||||
// deletion successfully recovers from the mismatched input, otherwise
|
|
||||||
// {@code nil}
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
|
||||||
NextTokenType := recognizer.GetTokenStream().LA(2)
|
|
||||||
expecting := d.GetExpectedTokens(recognizer)
|
|
||||||
if expecting.contains(NextTokenType) {
|
|
||||||
d.ReportUnwantedToken(recognizer)
|
|
||||||
// print("recoverFromMisMatchedToken deleting " \
|
|
||||||
// + str(recognizer.GetTokenStream().LT(1)) \
|
|
||||||
// + " since " + str(recognizer.GetTokenStream().LT(2)) \
|
|
||||||
// + " is what we want", file=sys.stderr)
|
|
||||||
recognizer.Consume() // simply delete extra token
|
|
||||||
// we want to return the token we're actually Matching
|
|
||||||
MatchedSymbol := recognizer.GetCurrentToken()
|
|
||||||
d.ReportMatch(recognizer) // we know current token is correct
|
|
||||||
return MatchedSymbol
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Conjure up a missing token during error recovery.
|
|
||||||
//
|
|
||||||
// The recognizer attempts to recover from single missing
|
|
||||||
// symbols. But, actions might refer to that missing symbol.
|
|
||||||
// For example, x=ID {f($x)}. The action clearly assumes
|
|
||||||
// that there has been an identifier Matched previously and that
|
|
||||||
// $x points at that token. If that token is missing, but
|
|
||||||
// the next token in the stream is what we want we assume that
|
|
||||||
// d token is missing and we keep going. Because we
|
|
||||||
// have to return some token to replace the missing token,
|
|
||||||
// we have to conjure one up. This method gives the user control
|
|
||||||
// over the tokens returned for missing tokens. Mostly,
|
|
||||||
// you will want to create something special for identifier
|
|
||||||
// tokens. For literals such as '{' and ',', the default
|
|
||||||
// action in the parser or tree parser works. It simply creates
|
|
||||||
// a CommonToken of the appropriate type. The text will be the token.
|
|
||||||
// If you change what tokens must be created by the lexer,
|
|
||||||
// override d method to create the appropriate tokens.
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
|
||||||
currentSymbol := recognizer.GetCurrentToken()
|
|
||||||
expecting := d.GetExpectedTokens(recognizer)
|
|
||||||
expectedTokenType := expecting.first()
|
|
||||||
var tokenText string
|
|
||||||
|
|
||||||
if expectedTokenType == TokenEOF {
|
|
||||||
tokenText = "<missing EOF>"
|
|
||||||
} else {
|
|
||||||
ln := recognizer.GetLiteralNames()
|
|
||||||
if expectedTokenType > 0 && expectedTokenType < len(ln) {
|
|
||||||
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
|
|
||||||
} else {
|
|
||||||
tokenText = "<missing undefined>" // TODO matches the JS impl
|
|
||||||
}
|
|
||||||
}
|
|
||||||
current := currentSymbol
|
|
||||||
lookback := recognizer.GetTokenStream().LT(-1)
|
|
||||||
if current.GetTokenType() == TokenEOF && lookback != nil {
|
|
||||||
current = lookback
|
|
||||||
}
|
|
||||||
|
|
||||||
tf := recognizer.GetTokenFactory()
|
|
||||||
|
|
||||||
return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
|
|
||||||
return recognizer.GetExpectedTokens()
|
|
||||||
}
|
|
||||||
|
|
||||||
// How should a token be displayed in an error message? The default
|
|
||||||
// is to display just the text, but during development you might
|
|
||||||
// want to have a lot of information spit out. Override in that case
|
|
||||||
// to use t.String() (which, for CommonToken, dumps everything about
|
|
||||||
// the token). This is better than forcing you to override a method in
|
|
||||||
// your token objects because you don't have to go modify your lexer
|
|
||||||
// so that it creates a NewJava type.
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
|
|
||||||
if t == nil {
|
|
||||||
return "<no token>"
|
|
||||||
}
|
|
||||||
s := t.GetText()
|
|
||||||
if s == "" {
|
|
||||||
if t.GetTokenType() == TokenEOF {
|
|
||||||
s = "<EOF>"
|
|
||||||
} else {
|
|
||||||
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return d.escapeWSAndQuote(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
|
||||||
s = strings.Replace(s, "\t", "\\t", -1)
|
|
||||||
s = strings.Replace(s, "\n", "\\n", -1)
|
|
||||||
s = strings.Replace(s, "\r", "\\r", -1)
|
|
||||||
return "'" + s + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the error recovery set for the current rule. During
|
|
||||||
// rule invocation, the parser pushes the set of tokens that can
|
|
||||||
// follow that rule reference on the stack d amounts to
|
|
||||||
// computing FIRST of what follows the rule reference in the
|
|
||||||
// enclosing rule. See LinearApproximator.FIRST().
|
|
||||||
// This local follow set only includes tokens
|
|
||||||
// from within the rule i.e., the FIRST computation done by
|
|
||||||
// ANTLR stops at the end of a rule.
|
|
||||||
//
|
|
||||||
// EXAMPLE
|
|
||||||
//
|
|
||||||
// When you find a "no viable alt exception", the input is not
|
|
||||||
// consistent with any of the alternatives for rule r. The best
|
|
||||||
// thing to do is to consume tokens until you see something that
|
|
||||||
// can legally follow a call to r//or* any rule that called r.
|
|
||||||
// You don't want the exact set of viable next tokens because the
|
|
||||||
// input might just be missing a token--you might consume the
|
|
||||||
// rest of the input looking for one of the missing tokens.
|
|
||||||
//
|
|
||||||
// Consider grammar:
|
|
||||||
//
|
|
||||||
// a : '[' b ']'
|
|
||||||
// | '(' b ')'
|
|
||||||
//
|
|
||||||
// b : c '^' INT
|
|
||||||
// c : ID
|
|
||||||
// | INT
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// At each rule invocation, the set of tokens that could follow
|
|
||||||
// that rule is pushed on a stack. Here are the various
|
|
||||||
// context-sensitive follow sets:
|
|
||||||
//
|
|
||||||
// FOLLOW(b1_in_a) = FIRST(']') = ']'
|
|
||||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
|
|
||||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
|
|
||||||
//
|
|
||||||
// Upon erroneous input "[]", the call chain is
|
|
||||||
//
|
|
||||||
// a -> b -> c
|
|
||||||
//
|
|
||||||
// and, hence, the follow context stack is:
|
|
||||||
//
|
|
||||||
// depth follow set start of rule execution
|
|
||||||
// 0 <EOF> a (from main())
|
|
||||||
// 1 ']' b
|
|
||||||
// 2 '^' c
|
|
||||||
//
|
|
||||||
// Notice that ')' is not included, because b would have to have
|
|
||||||
// been called from a different context in rule a for ')' to be
|
|
||||||
// included.
|
|
||||||
//
|
|
||||||
// For error recovery, we cannot consider FOLLOW(c)
|
|
||||||
// (context-sensitive or otherwise). We need the combined set of
|
|
||||||
// all context-sensitive FOLLOW sets--the set of all tokens that
|
|
||||||
// could follow any reference in the call chain. We need to
|
|
||||||
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
|
|
||||||
// we reSync'd to that token, we'd consume until EOF. We need to
|
|
||||||
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
|
|
||||||
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
|
|
||||||
// not consume anything. After printing an error, rule c would
|
|
||||||
// return normally. Rule b would not find the required '^' though.
|
|
||||||
// At this point, it gets a mismatched token error and panics an
|
|
||||||
// exception (since LA(1) is not in the viable following token
|
|
||||||
// set). The rule exception handler tries to recover, but finds
|
|
||||||
// the same recovery set and doesn't consume anything. Rule b
|
|
||||||
// exits normally returning to rule a. Now it finds the ']' (and
|
|
||||||
// with the successful Match exits errorRecovery mode).
|
|
||||||
//
|
|
||||||
// So, you can see that the parser walks up the call chain looking
|
|
||||||
// for the token that was a member of the recovery set.
|
|
||||||
//
|
|
||||||
// Errors are not generated in errorRecovery mode.
|
|
||||||
//
|
|
||||||
// ANTLR's error recovery mechanism is based upon original ideas:
|
|
||||||
//
|
|
||||||
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
|
|
||||||
//
|
|
||||||
// and
|
|
||||||
//
|
|
||||||
// "A note on error recovery in recursive descent parsers":
|
|
||||||
// http://portal.acm.org/citation.cfm?id=947902.947905
|
|
||||||
//
|
|
||||||
// Later, Josef Grosch had some good ideas:
|
|
||||||
//
|
|
||||||
// "Efficient and Comfortable Error Recovery in Recursive Descent
|
|
||||||
// Parsers":
|
|
||||||
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
|
||||||
//
|
|
||||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
|
|
||||||
// at run-time upon error to avoid overhead during parsing.
|
|
||||||
//
|
|
||||||
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
|
|
||||||
atn := recognizer.GetInterpreter().atn
|
|
||||||
ctx := recognizer.GetParserRuleContext()
|
|
||||||
recoverSet := NewIntervalSet()
|
|
||||||
for ctx != nil && ctx.GetInvokingState() >= 0 {
|
|
||||||
// compute what follows who invoked us
|
|
||||||
invokingState := atn.states[ctx.GetInvokingState()]
|
|
||||||
rt := invokingState.GetTransitions()[0]
|
|
||||||
follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
|
|
||||||
recoverSet.addSet(follow)
|
|
||||||
ctx = ctx.GetParent().(ParserRuleContext)
|
|
||||||
}
|
|
||||||
recoverSet.removeOne(TokenEpsilon)
|
|
||||||
return recoverSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume tokens until one Matches the given token set.//
|
|
||||||
func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
|
|
||||||
ttype := recognizer.GetTokenStream().LA(1)
|
|
||||||
for ttype != TokenEOF && !set.contains(ttype) {
|
|
||||||
recognizer.Consume()
|
|
||||||
ttype = recognizer.GetTokenStream().LA(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
|
|
||||||
// by immediately canceling the parse operation with a
|
|
||||||
// {@link ParseCancellationException}. The implementation ensures that the
|
|
||||||
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
|
|
||||||
// that were not completed prior to encountering the error.
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This error strategy is useful in the following scenarios.</p>
|
|
||||||
//
|
|
||||||
// <ul>
|
|
||||||
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
|
|
||||||
// stage of two-stage parsing to immediately terminate if an error is
|
|
||||||
// encountered, and immediately fall back to the second stage. In addition to
|
|
||||||
// avoiding wasted work by attempting to recover from errors here, the empty
|
|
||||||
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
|
|
||||||
// the first stage.</li>
|
|
||||||
// <li><strong>Silent validation:</strong> When syntax errors are not being
|
|
||||||
// Reported or logged, and the parse result is simply ignored if errors occur,
|
|
||||||
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
|
|
||||||
// when the result will be ignored either way.</li>
|
|
||||||
// </ul>
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
|
|
||||||
//
|
|
||||||
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
|
|
||||||
|
|
||||||
type BailErrorStrategy struct {
|
|
||||||
*DefaultErrorStrategy
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ErrorStrategy = &BailErrorStrategy{}
|
|
||||||
|
|
||||||
func NewBailErrorStrategy() *BailErrorStrategy {
|
|
||||||
|
|
||||||
b := new(BailErrorStrategy)
|
|
||||||
|
|
||||||
b.DefaultErrorStrategy = NewDefaultErrorStrategy()
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instead of recovering from exception {@code e}, re-panic it wrapped
|
|
||||||
// in a {@link ParseCancellationException} so it is not caught by the
|
|
||||||
// rule func catches. Use {@link Exception//getCause()} to get the
|
|
||||||
// original {@link RecognitionException}.
|
|
||||||
//
|
|
||||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
|
||||||
context := recognizer.GetParserRuleContext()
|
|
||||||
for context != nil {
|
|
||||||
context.SetException(e)
|
|
||||||
context = context.GetParent().(ParserRuleContext)
|
|
||||||
}
|
|
||||||
panic(NewParseCancellationException()) // TODO we don't emit e properly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we don't attempt to recover inline if the parser
|
|
||||||
// successfully recovers, it won't panic an exception.
|
|
||||||
//
|
|
||||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
|
||||||
b.Recover(recognizer, NewInputMisMatchException(recognizer))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we don't attempt to recover from problems in subrules.//
|
|
||||||
func (b *BailErrorStrategy) Sync(recognizer Parser) {
|
|
||||||
// pass
|
|
||||||
}
|
|
||||||
|
|
@ -1,241 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
|
|
||||||
// 3 kinds of errors: prediction errors, failed predicate errors, and
|
|
||||||
// mismatched input errors. In each case, the parser knows where it is
|
|
||||||
// in the input, where it is in the ATN, the rule invocation stack,
|
|
||||||
// and what kind of problem occurred.
|
|
||||||
|
|
||||||
type RecognitionException interface {
|
|
||||||
GetOffendingToken() Token
|
|
||||||
GetMessage() string
|
|
||||||
GetInputStream() IntStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseRecognitionException struct {
|
|
||||||
message string
|
|
||||||
recognizer Recognizer
|
|
||||||
offendingToken Token
|
|
||||||
offendingState int
|
|
||||||
ctx RuleContext
|
|
||||||
input IntStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
|
|
||||||
|
|
||||||
// todo
|
|
||||||
// Error.call(this)
|
|
||||||
//
|
|
||||||
// if (!!Error.captureStackTrace) {
|
|
||||||
// Error.captureStackTrace(this, RecognitionException)
|
|
||||||
// } else {
|
|
||||||
// stack := NewError().stack
|
|
||||||
// }
|
|
||||||
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
|
||||||
|
|
||||||
t := new(BaseRecognitionException)
|
|
||||||
|
|
||||||
t.message = message
|
|
||||||
t.recognizer = recognizer
|
|
||||||
t.input = input
|
|
||||||
t.ctx = ctx
|
|
||||||
// The current {@link Token} when an error occurred. Since not all streams
|
|
||||||
// support accessing symbols by index, we have to track the {@link Token}
|
|
||||||
// instance itself.
|
|
||||||
t.offendingToken = nil
|
|
||||||
// Get the ATN state number the parser was in at the time the error
|
|
||||||
// occurred. For {@link NoViableAltException} and
|
|
||||||
// {@link LexerNoViableAltException} exceptions, this is the
|
|
||||||
// {@link DecisionState} number. For others, it is the state whose outgoing
|
|
||||||
// edge we couldn't Match.
|
|
||||||
t.offendingState = -1
|
|
||||||
if t.recognizer != nil {
|
|
||||||
t.offendingState = t.recognizer.GetState()
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognitionException) GetMessage() string {
|
|
||||||
return b.message
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognitionException) GetOffendingToken() Token {
|
|
||||||
return b.offendingToken
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognitionException) GetInputStream() IntStream {
|
|
||||||
return b.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>If the state number is not known, b method returns -1.</p>
|
|
||||||
|
|
||||||
//
|
|
||||||
// Gets the set of input symbols which could potentially follow the
|
|
||||||
// previously Matched symbol at the time b exception was panicn.
|
|
||||||
//
|
|
||||||
// <p>If the set of expected tokens is not known and could not be computed,
|
|
||||||
// b method returns {@code nil}.</p>
|
|
||||||
//
|
|
||||||
// @return The set of token types that could potentially follow the current
|
|
||||||
// state in the ATN, or {@code nil} if the information is not available.
|
|
||||||
// /
|
|
||||||
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
|
|
||||||
if b.recognizer != nil {
|
|
||||||
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognitionException) String() string {
|
|
||||||
return b.message
|
|
||||||
}
|
|
||||||
|
|
||||||
type LexerNoViableAltException struct {
|
|
||||||
*BaseRecognitionException
|
|
||||||
|
|
||||||
startIndex int
|
|
||||||
deadEndConfigs ATNConfigSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
|
|
||||||
|
|
||||||
l := new(LexerNoViableAltException)
|
|
||||||
|
|
||||||
l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
|
|
||||||
|
|
||||||
l.startIndex = startIndex
|
|
||||||
l.deadEndConfigs = deadEndConfigs
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerNoViableAltException) String() string {
|
|
||||||
symbol := ""
|
|
||||||
if l.startIndex >= 0 && l.startIndex < l.input.Size() {
|
|
||||||
symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
|
|
||||||
}
|
|
||||||
return "LexerNoViableAltException" + symbol
|
|
||||||
}
|
|
||||||
|
|
||||||
type NoViableAltException struct {
|
|
||||||
*BaseRecognitionException
|
|
||||||
|
|
||||||
startToken Token
|
|
||||||
offendingToken Token
|
|
||||||
ctx ParserRuleContext
|
|
||||||
deadEndConfigs ATNConfigSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// Indicates that the parser could not decide which of two or more paths
|
|
||||||
// to take based upon the remaining input. It tracks the starting token
|
|
||||||
// of the offending input and also knows where the parser was
|
|
||||||
// in the various paths when the error. Reported by ReportNoViableAlternative()
|
|
||||||
//
|
|
||||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
|
||||||
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = recognizer.GetParserRuleContext()
|
|
||||||
}
|
|
||||||
|
|
||||||
if offendingToken == nil {
|
|
||||||
offendingToken = recognizer.GetCurrentToken()
|
|
||||||
}
|
|
||||||
|
|
||||||
if startToken == nil {
|
|
||||||
startToken = recognizer.GetCurrentToken()
|
|
||||||
}
|
|
||||||
|
|
||||||
if input == nil {
|
|
||||||
input = recognizer.GetInputStream().(TokenStream)
|
|
||||||
}
|
|
||||||
|
|
||||||
n := new(NoViableAltException)
|
|
||||||
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
|
|
||||||
|
|
||||||
// Which configurations did we try at input.Index() that couldn't Match
|
|
||||||
// input.LT(1)?//
|
|
||||||
n.deadEndConfigs = deadEndConfigs
|
|
||||||
// The token object at the start index the input stream might
|
|
||||||
// not be buffering tokens so get a reference to it. (At the
|
|
||||||
// time the error occurred, of course the stream needs to keep a
|
|
||||||
// buffer all of the tokens but later we might not have access to those.)
|
|
||||||
n.startToken = startToken
|
|
||||||
n.offendingToken = offendingToken
|
|
||||||
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
type InputMisMatchException struct {
|
|
||||||
*BaseRecognitionException
|
|
||||||
}
|
|
||||||
|
|
||||||
// This signifies any kind of mismatched input exceptions such as
|
|
||||||
// when the current input does not Match the expected token.
|
|
||||||
//
|
|
||||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
|
||||||
|
|
||||||
i := new(InputMisMatchException)
|
|
||||||
i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
|
|
||||||
|
|
||||||
i.offendingToken = recognizer.GetCurrentToken()
|
|
||||||
|
|
||||||
return i
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// A semantic predicate failed during validation. Validation of predicates
|
|
||||||
// occurs when normally parsing the alternative just like Matching a token.
|
|
||||||
// Disambiguating predicate evaluation occurs when we test a predicate during
|
|
||||||
// prediction.
|
|
||||||
|
|
||||||
type FailedPredicateException struct {
|
|
||||||
*BaseRecognitionException
|
|
||||||
|
|
||||||
ruleIndex int
|
|
||||||
predicateIndex int
|
|
||||||
predicate string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
|
|
||||||
|
|
||||||
f := new(FailedPredicateException)
|
|
||||||
|
|
||||||
f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
|
|
||||||
|
|
||||||
s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
|
|
||||||
trans := s.GetTransitions()[0]
|
|
||||||
if trans2, ok := trans.(*PredicateTransition); ok {
|
|
||||||
f.ruleIndex = trans2.ruleIndex
|
|
||||||
f.predicateIndex = trans2.predIndex
|
|
||||||
} else {
|
|
||||||
f.ruleIndex = 0
|
|
||||||
f.predicateIndex = 0
|
|
||||||
}
|
|
||||||
f.predicate = predicate
|
|
||||||
f.offendingToken = recognizer.GetCurrentToken()
|
|
||||||
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FailedPredicateException) formatMessage(predicate, message string) string {
|
|
||||||
if message != "" {
|
|
||||||
return message
|
|
||||||
}
|
|
||||||
|
|
||||||
return "failed predicate: {" + predicate + "}?"
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParseCancellationException struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewParseCancellationException() *ParseCancellationException {
|
|
||||||
// Error.call(this)
|
|
||||||
// Error.captureStackTrace(this, ParseCancellationException)
|
|
||||||
return new(ParseCancellationException)
|
|
||||||
}
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This is an InputStream that is loaded from a file all at once
|
|
||||||
// when you construct the object.
|
|
||||||
|
|
||||||
type FileStream struct {
|
|
||||||
*InputStream
|
|
||||||
|
|
||||||
filename string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileStream(fileName string) (*FileStream, error) {
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
f, err := os.Open(fileName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
_, err = io.Copy(buf, f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := new(FileStream)
|
|
||||||
|
|
||||||
fs.filename = fileName
|
|
||||||
s := string(buf.Bytes())
|
|
||||||
|
|
||||||
fs.InputStream = NewInputStream(s)
|
|
||||||
|
|
||||||
return fs, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) GetSourceName() string {
|
|
||||||
return f.filename
|
|
||||||
}
|
|
||||||
|
|
@ -1,113 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
type InputStream struct {
|
|
||||||
name string
|
|
||||||
index int
|
|
||||||
data []rune
|
|
||||||
size int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInputStream(data string) *InputStream {
|
|
||||||
|
|
||||||
is := new(InputStream)
|
|
||||||
|
|
||||||
is.name = "<empty>"
|
|
||||||
is.index = 0
|
|
||||||
is.data = []rune(data)
|
|
||||||
is.size = len(is.data) // number of runes
|
|
||||||
|
|
||||||
return is
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) reset() {
|
|
||||||
is.index = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) Consume() {
|
|
||||||
if is.index >= is.size {
|
|
||||||
// assert is.LA(1) == TokenEOF
|
|
||||||
panic("cannot consume EOF")
|
|
||||||
}
|
|
||||||
is.index++
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) LA(offset int) int {
|
|
||||||
|
|
||||||
if offset == 0 {
|
|
||||||
return 0 // nil
|
|
||||||
}
|
|
||||||
if offset < 0 {
|
|
||||||
offset++ // e.g., translate LA(-1) to use offset=0
|
|
||||||
}
|
|
||||||
pos := is.index + offset - 1
|
|
||||||
|
|
||||||
if pos < 0 || pos >= is.size { // invalid
|
|
||||||
return TokenEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
return int(is.data[pos])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) LT(offset int) int {
|
|
||||||
return is.LA(offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) Index() int {
|
|
||||||
return is.index
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) Size() int {
|
|
||||||
return is.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// mark/release do nothing we have entire buffer
|
|
||||||
func (is *InputStream) Mark() int {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) Release(marker int) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) Seek(index int) {
|
|
||||||
if index <= is.index {
|
|
||||||
is.index = index // just jump don't update stream state (line,...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// seek forward
|
|
||||||
is.index = intMin(index, is.size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) GetText(start int, stop int) string {
|
|
||||||
if stop >= is.size {
|
|
||||||
stop = is.size - 1
|
|
||||||
}
|
|
||||||
if start >= is.size {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(is.data[start : stop+1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
|
||||||
if start != nil && stop != nil {
|
|
||||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) GetTextFromInterval(i *Interval) string {
|
|
||||||
return is.GetText(i.Start, i.Stop)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*InputStream) GetSourceName() string {
|
|
||||||
return "Obtained from string"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (is *InputStream) String() string {
|
|
||||||
return string(is.data)
|
|
||||||
}
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
type IntStream interface {
|
|
||||||
Consume()
|
|
||||||
LA(int) int
|
|
||||||
Mark() int
|
|
||||||
Release(marker int)
|
|
||||||
Index() int
|
|
||||||
Seek(index int)
|
|
||||||
Size() int
|
|
||||||
GetSourceName() string
|
|
||||||
}
|
|
||||||
|
|
@ -1,296 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Interval struct {
|
|
||||||
Start int
|
|
||||||
Stop int
|
|
||||||
}
|
|
||||||
|
|
||||||
/* stop is not included! */
|
|
||||||
func NewInterval(start, stop int) *Interval {
|
|
||||||
i := new(Interval)
|
|
||||||
|
|
||||||
i.Start = start
|
|
||||||
i.Stop = stop
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Interval) Contains(item int) bool {
|
|
||||||
return item >= i.Start && item < i.Stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Interval) String() string {
|
|
||||||
if i.Start == i.Stop-1 {
|
|
||||||
return strconv.Itoa(i.Start)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Interval) length() int {
|
|
||||||
return i.Stop - i.Start
|
|
||||||
}
|
|
||||||
|
|
||||||
type IntervalSet struct {
|
|
||||||
intervals []*Interval
|
|
||||||
readOnly bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewIntervalSet() *IntervalSet {
|
|
||||||
|
|
||||||
i := new(IntervalSet)
|
|
||||||
|
|
||||||
i.intervals = nil
|
|
||||||
i.readOnly = false
|
|
||||||
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) first() int {
|
|
||||||
if len(i.intervals) == 0 {
|
|
||||||
return TokenInvalidType
|
|
||||||
}
|
|
||||||
|
|
||||||
return i.intervals[0].Start
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) addOne(v int) {
|
|
||||||
i.addInterval(NewInterval(v, v+1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) addRange(l, h int) {
|
|
||||||
i.addInterval(NewInterval(l, h+1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) addInterval(v *Interval) {
|
|
||||||
if i.intervals == nil {
|
|
||||||
i.intervals = make([]*Interval, 0)
|
|
||||||
i.intervals = append(i.intervals, v)
|
|
||||||
} else {
|
|
||||||
// find insert pos
|
|
||||||
for k, interval := range i.intervals {
|
|
||||||
// distinct range -> insert
|
|
||||||
if v.Stop < interval.Start {
|
|
||||||
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
|
|
||||||
return
|
|
||||||
} else if v.Stop == interval.Start {
|
|
||||||
i.intervals[k].Start = v.Start
|
|
||||||
return
|
|
||||||
} else if v.Start <= interval.Stop {
|
|
||||||
i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
|
|
||||||
|
|
||||||
// if not applying to end, merge potential overlaps
|
|
||||||
if k < len(i.intervals)-1 {
|
|
||||||
l := i.intervals[k]
|
|
||||||
r := i.intervals[k+1]
|
|
||||||
// if r contained in l
|
|
||||||
if l.Stop >= r.Stop {
|
|
||||||
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
|
|
||||||
} else if l.Stop >= r.Start { // partial overlap
|
|
||||||
i.intervals[k] = NewInterval(l.Start, r.Stop)
|
|
||||||
i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// greater than any exiting
|
|
||||||
i.intervals = append(i.intervals, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
|
|
||||||
if other.intervals != nil {
|
|
||||||
for k := 0; k < len(other.intervals); k++ {
|
|
||||||
i2 := other.intervals[k]
|
|
||||||
i.addInterval(NewInterval(i2.Start, i2.Stop))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
|
|
||||||
result := NewIntervalSet()
|
|
||||||
result.addInterval(NewInterval(start, stop+1))
|
|
||||||
for j := 0; j < len(i.intervals); j++ {
|
|
||||||
result.removeRange(i.intervals[j])
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) contains(item int) bool {
|
|
||||||
if i.intervals == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for k := 0; k < len(i.intervals); k++ {
|
|
||||||
if i.intervals[k].Contains(item) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) length() int {
|
|
||||||
len := 0
|
|
||||||
|
|
||||||
for _, v := range i.intervals {
|
|
||||||
len += v.length()
|
|
||||||
}
|
|
||||||
|
|
||||||
return len
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) removeRange(v *Interval) {
|
|
||||||
if v.Start == v.Stop-1 {
|
|
||||||
i.removeOne(v.Start)
|
|
||||||
} else if i.intervals != nil {
|
|
||||||
k := 0
|
|
||||||
for n := 0; n < len(i.intervals); n++ {
|
|
||||||
ni := i.intervals[k]
|
|
||||||
// intervals are ordered
|
|
||||||
if v.Stop <= ni.Start {
|
|
||||||
return
|
|
||||||
} else if v.Start > ni.Start && v.Stop < ni.Stop {
|
|
||||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
|
||||||
x := NewInterval(v.Stop, ni.Stop)
|
|
||||||
// i.intervals.splice(k, 0, x)
|
|
||||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
|
||||||
return
|
|
||||||
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
|
|
||||||
// i.intervals.splice(k, 1)
|
|
||||||
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
|
|
||||||
k = k - 1 // need another pass
|
|
||||||
} else if v.Start < ni.Stop {
|
|
||||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
|
||||||
} else if v.Stop < ni.Stop {
|
|
||||||
i.intervals[k] = NewInterval(v.Stop, ni.Stop)
|
|
||||||
}
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) removeOne(v int) {
|
|
||||||
if i.intervals != nil {
|
|
||||||
for k := 0; k < len(i.intervals); k++ {
|
|
||||||
ki := i.intervals[k]
|
|
||||||
// intervals i ordered
|
|
||||||
if v < ki.Start {
|
|
||||||
return
|
|
||||||
} else if v == ki.Start && v == ki.Stop-1 {
|
|
||||||
// i.intervals.splice(k, 1)
|
|
||||||
i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
|
|
||||||
return
|
|
||||||
} else if v == ki.Start {
|
|
||||||
i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
|
|
||||||
return
|
|
||||||
} else if v == ki.Stop-1 {
|
|
||||||
i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
|
|
||||||
return
|
|
||||||
} else if v < ki.Stop-1 {
|
|
||||||
x := NewInterval(ki.Start, v)
|
|
||||||
ki.Start = v + 1
|
|
||||||
// i.intervals.splice(k, 0, x)
|
|
||||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) String() string {
|
|
||||||
return i.StringVerbose(nil, nil, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
|
|
||||||
|
|
||||||
if i.intervals == nil {
|
|
||||||
return "{}"
|
|
||||||
} else if literalNames != nil || symbolicNames != nil {
|
|
||||||
return i.toTokenString(literalNames, symbolicNames)
|
|
||||||
} else if elemsAreChar {
|
|
||||||
return i.toCharString()
|
|
||||||
}
|
|
||||||
|
|
||||||
return i.toIndexString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) toCharString() string {
|
|
||||||
names := make([]string, len(i.intervals))
|
|
||||||
|
|
||||||
for j := 0; j < len(i.intervals); j++ {
|
|
||||||
v := i.intervals[j]
|
|
||||||
if v.Stop == v.Start+1 {
|
|
||||||
if v.Start == TokenEOF {
|
|
||||||
names = append(names, "<EOF>")
|
|
||||||
} else {
|
|
||||||
names = append(names, ("'" + string(v.Start) + "'"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(names) > 1 {
|
|
||||||
return "{" + strings.Join(names, ", ") + "}"
|
|
||||||
}
|
|
||||||
|
|
||||||
return names[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) toIndexString() string {
|
|
||||||
|
|
||||||
names := make([]string, 0)
|
|
||||||
for j := 0; j < len(i.intervals); j++ {
|
|
||||||
v := i.intervals[j]
|
|
||||||
if v.Stop == v.Start+1 {
|
|
||||||
if v.Start == TokenEOF {
|
|
||||||
names = append(names, "<EOF>")
|
|
||||||
} else {
|
|
||||||
names = append(names, strconv.Itoa(v.Start))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(names) > 1 {
|
|
||||||
return "{" + strings.Join(names, ", ") + "}"
|
|
||||||
}
|
|
||||||
|
|
||||||
return names[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
|
|
||||||
names := make([]string, 0)
|
|
||||||
for _, v := range i.intervals {
|
|
||||||
for j := v.Start; j < v.Stop; j++ {
|
|
||||||
names = append(names, i.elementName(literalNames, symbolicNames, j))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(names) > 1 {
|
|
||||||
return "{" + strings.Join(names, ", ") + "}"
|
|
||||||
}
|
|
||||||
|
|
||||||
return names[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
|
|
||||||
if a == TokenEOF {
|
|
||||||
return "<EOF>"
|
|
||||||
} else if a == TokenEpsilon {
|
|
||||||
return "<EPSILON>"
|
|
||||||
} else {
|
|
||||||
if a < len(literalNames) && literalNames[a] != "" {
|
|
||||||
return literalNames[a]
|
|
||||||
}
|
|
||||||
|
|
||||||
return symbolicNames[a]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,418 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A lexer is recognizer that draws input symbols from a character stream.
|
|
||||||
// lexer grammars result in a subclass of this object. A Lexer object
|
|
||||||
// uses simplified Match() and error recovery mechanisms in the interest
|
|
||||||
// of speed.
|
|
||||||
///
|
|
||||||
|
|
||||||
type Lexer interface {
|
|
||||||
TokenSource
|
|
||||||
Recognizer
|
|
||||||
|
|
||||||
Emit() Token
|
|
||||||
|
|
||||||
SetChannel(int)
|
|
||||||
PushMode(int)
|
|
||||||
PopMode() int
|
|
||||||
SetType(int)
|
|
||||||
SetMode(int)
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseLexer struct {
|
|
||||||
*BaseRecognizer
|
|
||||||
|
|
||||||
Interpreter ILexerATNSimulator
|
|
||||||
TokenStartCharIndex int
|
|
||||||
TokenStartLine int
|
|
||||||
TokenStartColumn int
|
|
||||||
ActionType int
|
|
||||||
Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
|
|
||||||
|
|
||||||
input CharStream
|
|
||||||
factory TokenFactory
|
|
||||||
tokenFactorySourcePair *TokenSourceCharStreamPair
|
|
||||||
token Token
|
|
||||||
hitEOF bool
|
|
||||||
channel int
|
|
||||||
thetype int
|
|
||||||
modeStack IntStack
|
|
||||||
mode int
|
|
||||||
text string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseLexer(input CharStream) *BaseLexer {
|
|
||||||
|
|
||||||
lexer := new(BaseLexer)
|
|
||||||
|
|
||||||
lexer.BaseRecognizer = NewBaseRecognizer()
|
|
||||||
|
|
||||||
lexer.input = input
|
|
||||||
lexer.factory = CommonTokenFactoryDEFAULT
|
|
||||||
lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
|
|
||||||
|
|
||||||
lexer.Virt = lexer
|
|
||||||
|
|
||||||
lexer.Interpreter = nil // child classes must populate it
|
|
||||||
|
|
||||||
// The goal of all lexer rules/methods is to create a token object.
|
|
||||||
// l is an instance variable as multiple rules may collaborate to
|
|
||||||
// create a single token. NextToken will return l object after
|
|
||||||
// Matching lexer rule(s). If you subclass to allow multiple token
|
|
||||||
// emissions, then set l to the last token to be Matched or
|
|
||||||
// something nonnil so that the auto token emit mechanism will not
|
|
||||||
// emit another token.
|
|
||||||
lexer.token = nil
|
|
||||||
|
|
||||||
// What character index in the stream did the current token start at?
|
|
||||||
// Needed, for example, to get the text for current token. Set at
|
|
||||||
// the start of NextToken.
|
|
||||||
lexer.TokenStartCharIndex = -1
|
|
||||||
|
|
||||||
// The line on which the first character of the token resides///
|
|
||||||
lexer.TokenStartLine = -1
|
|
||||||
|
|
||||||
// The character position of first character within the line///
|
|
||||||
lexer.TokenStartColumn = -1
|
|
||||||
|
|
||||||
// Once we see EOF on char stream, next token will be EOF.
|
|
||||||
// If you have DONE : EOF then you see DONE EOF.
|
|
||||||
lexer.hitEOF = false
|
|
||||||
|
|
||||||
// The channel number for the current token///
|
|
||||||
lexer.channel = TokenDefaultChannel
|
|
||||||
|
|
||||||
// The token type for the current token///
|
|
||||||
lexer.thetype = TokenInvalidType
|
|
||||||
|
|
||||||
lexer.modeStack = make([]int, 0)
|
|
||||||
lexer.mode = LexerDefaultMode
|
|
||||||
|
|
||||||
// You can set the text for the current token to override what is in
|
|
||||||
// the input char buffer. Use setText() or can set l instance var.
|
|
||||||
// /
|
|
||||||
lexer.text = ""
|
|
||||||
|
|
||||||
return lexer
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
LexerDefaultMode = 0
|
|
||||||
LexerMore = -2
|
|
||||||
LexerSkip = -3
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
LexerDefaultTokenChannel = TokenDefaultChannel
|
|
||||||
LexerHidden = TokenHiddenChannel
|
|
||||||
LexerMinCharValue = 0x0000
|
|
||||||
LexerMaxCharValue = 0x10FFFF
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *BaseLexer) reset() {
|
|
||||||
// wack Lexer state variables
|
|
||||||
if b.input != nil {
|
|
||||||
b.input.Seek(0) // rewind the input
|
|
||||||
}
|
|
||||||
b.token = nil
|
|
||||||
b.thetype = TokenInvalidType
|
|
||||||
b.channel = TokenDefaultChannel
|
|
||||||
b.TokenStartCharIndex = -1
|
|
||||||
b.TokenStartColumn = -1
|
|
||||||
b.TokenStartLine = -1
|
|
||||||
b.text = ""
|
|
||||||
|
|
||||||
b.hitEOF = false
|
|
||||||
b.mode = LexerDefaultMode
|
|
||||||
b.modeStack = make([]int, 0)
|
|
||||||
|
|
||||||
b.Interpreter.reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
|
|
||||||
return b.Interpreter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetInputStream() CharStream {
|
|
||||||
return b.input
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetSourceName() string {
|
|
||||||
return b.GrammarFileName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) SetChannel(v int) {
|
|
||||||
b.channel = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetTokenFactory() TokenFactory {
|
|
||||||
return b.factory
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) setTokenFactory(f TokenFactory) {
|
|
||||||
b.factory = f
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) safeMatch() (ret int) {
|
|
||||||
defer func() {
|
|
||||||
if e := recover(); e != nil {
|
|
||||||
if re, ok := e.(RecognitionException); ok {
|
|
||||||
b.notifyListeners(re) // Report error
|
|
||||||
b.Recover(re)
|
|
||||||
ret = LexerSkip // default
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return b.Interpreter.Match(b.input, b.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a token from l source i.e., Match a token on the char stream.
|
|
||||||
func (b *BaseLexer) NextToken() Token {
|
|
||||||
if b.input == nil {
|
|
||||||
panic("NextToken requires a non-nil input stream.")
|
|
||||||
}
|
|
||||||
|
|
||||||
tokenStartMarker := b.input.Mark()
|
|
||||||
|
|
||||||
// previously in finally block
|
|
||||||
defer func() {
|
|
||||||
// make sure we release marker after Match or
|
|
||||||
// unbuffered char stream will keep buffering
|
|
||||||
b.input.Release(tokenStartMarker)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
if b.hitEOF {
|
|
||||||
b.EmitEOF()
|
|
||||||
return b.token
|
|
||||||
}
|
|
||||||
b.token = nil
|
|
||||||
b.channel = TokenDefaultChannel
|
|
||||||
b.TokenStartCharIndex = b.input.Index()
|
|
||||||
b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
|
|
||||||
b.TokenStartLine = b.Interpreter.GetLine()
|
|
||||||
b.text = ""
|
|
||||||
continueOuter := false
|
|
||||||
for {
|
|
||||||
b.thetype = TokenInvalidType
|
|
||||||
ttype := LexerSkip
|
|
||||||
|
|
||||||
ttype = b.safeMatch()
|
|
||||||
|
|
||||||
if b.input.LA(1) == TokenEOF {
|
|
||||||
b.hitEOF = true
|
|
||||||
}
|
|
||||||
if b.thetype == TokenInvalidType {
|
|
||||||
b.thetype = ttype
|
|
||||||
}
|
|
||||||
if b.thetype == LexerSkip {
|
|
||||||
continueOuter = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if b.thetype != LexerMore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if continueOuter {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if b.token == nil {
|
|
||||||
b.Virt.Emit()
|
|
||||||
}
|
|
||||||
return b.token
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instruct the lexer to Skip creating a token for current lexer rule
|
|
||||||
// and look for another token. NextToken() knows to keep looking when
|
|
||||||
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
|
|
||||||
// if token==nil at end of any token rule, it creates one for you
|
|
||||||
// and emits it.
|
|
||||||
// /
|
|
||||||
func (b *BaseLexer) Skip() {
|
|
||||||
b.thetype = LexerSkip
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) More() {
|
|
||||||
b.thetype = LexerMore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) SetMode(m int) {
|
|
||||||
b.mode = m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) PushMode(m int) {
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("pushMode " + strconv.Itoa(m))
|
|
||||||
}
|
|
||||||
b.modeStack.Push(b.mode)
|
|
||||||
b.mode = m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) PopMode() int {
|
|
||||||
if len(b.modeStack) == 0 {
|
|
||||||
panic("Empty Stack")
|
|
||||||
}
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
|
|
||||||
}
|
|
||||||
i, _ := b.modeStack.Pop()
|
|
||||||
b.mode = i
|
|
||||||
return b.mode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) inputStream() CharStream {
|
|
||||||
return b.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetInputStream resets the lexer input stream and associated lexer state.
|
|
||||||
func (b *BaseLexer) SetInputStream(input CharStream) {
|
|
||||||
b.input = nil
|
|
||||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
|
||||||
b.reset()
|
|
||||||
b.input = input
|
|
||||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
|
|
||||||
return b.tokenFactorySourcePair
|
|
||||||
}
|
|
||||||
|
|
||||||
// By default does not support multiple emits per NextToken invocation
|
|
||||||
// for efficiency reasons. Subclass and override l method, NextToken,
|
|
||||||
// and GetToken (to push tokens into a list and pull from that list
|
|
||||||
// rather than a single variable as l implementation does).
|
|
||||||
// /
|
|
||||||
func (b *BaseLexer) EmitToken(token Token) {
|
|
||||||
b.token = token
|
|
||||||
}
|
|
||||||
|
|
||||||
// The standard method called to automatically emit a token at the
|
|
||||||
// outermost lexical rule. The token object should point into the
|
|
||||||
// char buffer start..stop. If there is a text override in 'text',
|
|
||||||
// use that to set the token's text. Override l method to emit
|
|
||||||
// custom Token objects or provide a Newfactory.
|
|
||||||
// /
|
|
||||||
func (b *BaseLexer) Emit() Token {
|
|
||||||
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
|
|
||||||
b.EmitToken(t)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) EmitEOF() Token {
|
|
||||||
cpos := b.GetCharPositionInLine()
|
|
||||||
lpos := b.GetLine()
|
|
||||||
eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
|
|
||||||
b.EmitToken(eof)
|
|
||||||
return eof
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetCharPositionInLine() int {
|
|
||||||
return b.Interpreter.GetCharPositionInLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetLine() int {
|
|
||||||
return b.Interpreter.GetLine()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetType() int {
|
|
||||||
return b.thetype
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) SetType(t int) {
|
|
||||||
b.thetype = t
|
|
||||||
}
|
|
||||||
|
|
||||||
// What is the index of the current character of lookahead?///
|
|
||||||
func (b *BaseLexer) GetCharIndex() int {
|
|
||||||
return b.input.Index()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the text Matched so far for the current token or any text override.
|
|
||||||
//Set the complete text of l token it wipes any previous changes to the text.
|
|
||||||
func (b *BaseLexer) GetText() string {
|
|
||||||
if b.text != "" {
|
|
||||||
return b.text
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.Interpreter.GetText(b.input)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) SetText(text string) {
|
|
||||||
b.text = text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) GetATN() *ATN {
|
|
||||||
return b.Interpreter.ATN()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a list of all Token objects in input char stream.
|
|
||||||
// Forces load of all tokens. Does not include EOF token.
|
|
||||||
// /
|
|
||||||
func (b *BaseLexer) GetAllTokens() []Token {
|
|
||||||
vl := b.Virt
|
|
||||||
tokens := make([]Token, 0)
|
|
||||||
t := vl.NextToken()
|
|
||||||
for t.GetTokenType() != TokenEOF {
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
t = vl.NextToken()
|
|
||||||
}
|
|
||||||
return tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) notifyListeners(e RecognitionException) {
|
|
||||||
start := b.TokenStartCharIndex
|
|
||||||
stop := b.input.Index()
|
|
||||||
text := b.input.GetTextFromInterval(NewInterval(start, stop))
|
|
||||||
msg := "token recognition error at: '" + text + "'"
|
|
||||||
listener := b.GetErrorListenerDispatch()
|
|
||||||
listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
|
|
||||||
if c == TokenEOF {
|
|
||||||
return "<EOF>"
|
|
||||||
} else if c == '\n' {
|
|
||||||
return "\\n"
|
|
||||||
} else if c == '\t' {
|
|
||||||
return "\\t"
|
|
||||||
} else if c == '\r' {
|
|
||||||
return "\\r"
|
|
||||||
} else {
|
|
||||||
return string(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexer) getCharErrorDisplay(c rune) string {
|
|
||||||
return "'" + b.getErrorDisplayForChar(c) + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lexers can normally Match any char in it's vocabulary after Matching
|
|
||||||
// a token, so do the easy thing and just kill a character and hope
|
|
||||||
// it all works out. You can instead use the rule invocation stack
|
|
||||||
// to do sophisticated error recovery if you are in a fragment rule.
|
|
||||||
// /
|
|
||||||
func (b *BaseLexer) Recover(re RecognitionException) {
|
|
||||||
if b.input.LA(1) != TokenEOF {
|
|
||||||
if _, ok := re.(*LexerNoViableAltException); ok {
|
|
||||||
// Skip a char and try again
|
|
||||||
b.Interpreter.Consume(b.input)
|
|
||||||
} else {
|
|
||||||
// TODO: Do we lose character or line position information?
|
|
||||||
b.input.Consume()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,431 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import "strconv"
|
|
||||||
|
|
||||||
const (
|
|
||||||
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
|
|
||||||
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
|
|
||||||
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
|
|
||||||
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
|
|
||||||
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
|
|
||||||
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
|
|
||||||
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
|
|
||||||
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
|
|
||||||
)
|
|
||||||
|
|
||||||
type LexerAction interface {
|
|
||||||
getActionType() int
|
|
||||||
getIsPositionDependent() bool
|
|
||||||
execute(lexer Lexer)
|
|
||||||
hash() int
|
|
||||||
equals(other LexerAction) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseLexerAction struct {
|
|
||||||
actionType int
|
|
||||||
isPositionDependent bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseLexerAction(action int) *BaseLexerAction {
|
|
||||||
la := new(BaseLexerAction)
|
|
||||||
|
|
||||||
la.actionType = action
|
|
||||||
la.isPositionDependent = false
|
|
||||||
|
|
||||||
return la
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexerAction) execute(lexer Lexer) {
|
|
||||||
panic("Not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexerAction) getActionType() int {
|
|
||||||
return b.actionType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexerAction) getIsPositionDependent() bool {
|
|
||||||
return b.isPositionDependent
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexerAction) hash() int {
|
|
||||||
return b.actionType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseLexerAction) equals(other LexerAction) bool {
|
|
||||||
return b == other
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
|
|
||||||
//
|
|
||||||
// <p>The {@code Skip} command does not have any parameters, so l action is
|
|
||||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
|
||||||
type LexerSkipAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerSkipAction() *LexerSkipAction {
|
|
||||||
la := new(LexerSkipAction)
|
|
||||||
la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
|
|
||||||
return la
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provides a singleton instance of l parameterless lexer action.
|
|
||||||
var LexerSkipActionINSTANCE = NewLexerSkipAction()
|
|
||||||
|
|
||||||
func (l *LexerSkipAction) execute(lexer Lexer) {
|
|
||||||
lexer.Skip()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerSkipAction) String() string {
|
|
||||||
return "skip"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
|
||||||
// with the assigned type.
|
|
||||||
type LexerTypeAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
|
|
||||||
thetype int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerTypeAction(thetype int) *LexerTypeAction {
|
|
||||||
l := new(LexerTypeAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
|
|
||||||
l.thetype = thetype
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerTypeAction) execute(lexer Lexer) {
|
|
||||||
lexer.SetType(l.thetype)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerTypeAction) hash() int {
|
|
||||||
h := murmurInit(0)
|
|
||||||
h = murmurUpdate(h, l.actionType)
|
|
||||||
h = murmurUpdate(h, l.thetype)
|
|
||||||
return murmurFinish(h, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerTypeAction) equals(other LexerAction) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerTypeAction); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.thetype == other.(*LexerTypeAction).thetype
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerTypeAction) String() string {
|
|
||||||
return "actionType(" + strconv.Itoa(l.thetype) + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the {@code pushMode} lexer action by calling
|
|
||||||
// {@link Lexer//pushMode} with the assigned mode.
|
|
||||||
type LexerPushModeAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
|
|
||||||
mode int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerPushModeAction(mode int) *LexerPushModeAction {
|
|
||||||
|
|
||||||
l := new(LexerPushModeAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
|
|
||||||
|
|
||||||
l.mode = mode
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>This action is implemented by calling {@link Lexer//pushMode} with the
|
|
||||||
// value provided by {@link //getMode}.</p>
|
|
||||||
func (l *LexerPushModeAction) execute(lexer Lexer) {
|
|
||||||
lexer.PushMode(l.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerPushModeAction) hash() int {
|
|
||||||
h := murmurInit(0)
|
|
||||||
h = murmurUpdate(h, l.actionType)
|
|
||||||
h = murmurUpdate(h, l.mode)
|
|
||||||
return murmurFinish(h, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerPushModeAction) equals(other LexerAction) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerPushModeAction); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.mode == other.(*LexerPushModeAction).mode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerPushModeAction) String() string {
|
|
||||||
return "pushMode(" + strconv.Itoa(l.mode) + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
|
|
||||||
//
|
|
||||||
// <p>The {@code popMode} command does not have any parameters, so l action is
|
|
||||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
|
||||||
type LexerPopModeAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerPopModeAction() *LexerPopModeAction {
|
|
||||||
|
|
||||||
l := new(LexerPopModeAction)
|
|
||||||
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
|
|
||||||
|
|
||||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
|
|
||||||
func (l *LexerPopModeAction) execute(lexer Lexer) {
|
|
||||||
lexer.PopMode()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerPopModeAction) String() string {
|
|
||||||
return "popMode"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the {@code more} lexer action by calling {@link Lexer//more}.
|
|
||||||
//
|
|
||||||
// <p>The {@code more} command does not have any parameters, so l action is
|
|
||||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
|
||||||
|
|
||||||
type LexerMoreAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerMoreAction() *LexerMoreAction {
|
|
||||||
l := new(LexerMoreAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
var LexerMoreActionINSTANCE = NewLexerMoreAction()
|
|
||||||
|
|
||||||
// <p>This action is implemented by calling {@link Lexer//popMode}.</p>
|
|
||||||
func (l *LexerMoreAction) execute(lexer Lexer) {
|
|
||||||
lexer.More()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerMoreAction) String() string {
|
|
||||||
return "more"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
|
|
||||||
// the assigned mode.
|
|
||||||
type LexerModeAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
|
|
||||||
mode int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerModeAction(mode int) *LexerModeAction {
|
|
||||||
l := new(LexerModeAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
|
|
||||||
l.mode = mode
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>This action is implemented by calling {@link Lexer//mode} with the
|
|
||||||
// value provided by {@link //getMode}.</p>
|
|
||||||
func (l *LexerModeAction) execute(lexer Lexer) {
|
|
||||||
lexer.SetMode(l.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerModeAction) hash() int {
|
|
||||||
h := murmurInit(0)
|
|
||||||
h = murmurUpdate(h, l.actionType)
|
|
||||||
h = murmurUpdate(h, l.mode)
|
|
||||||
return murmurFinish(h, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerModeAction) equals(other LexerAction) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerModeAction); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.mode == other.(*LexerModeAction).mode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerModeAction) String() string {
|
|
||||||
return "mode(" + strconv.Itoa(l.mode) + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Executes a custom lexer action by calling {@link Recognizer//action} with the
|
|
||||||
// rule and action indexes assigned to the custom action. The implementation of
|
|
||||||
// a custom action is added to the generated code for the lexer in an override
|
|
||||||
// of {@link Recognizer//action} when the grammar is compiled.
|
|
||||||
//
|
|
||||||
// <p>This class may represent embedded actions created with the <code>{...}</code>
|
|
||||||
// syntax in ANTLR 4, as well as actions created for lexer commands where the
|
|
||||||
// command argument could not be evaluated when the grammar was compiled.</p>
|
|
||||||
|
|
||||||
// Constructs a custom lexer action with the specified rule and action
|
|
||||||
// indexes.
|
|
||||||
//
|
|
||||||
// @param ruleIndex The rule index to use for calls to
|
|
||||||
// {@link Recognizer//action}.
|
|
||||||
// @param actionIndex The action index to use for calls to
|
|
||||||
// {@link Recognizer//action}.
|
|
||||||
|
|
||||||
type LexerCustomAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
ruleIndex, actionIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
|
|
||||||
l := new(LexerCustomAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
|
|
||||||
l.ruleIndex = ruleIndex
|
|
||||||
l.actionIndex = actionIndex
|
|
||||||
l.isPositionDependent = true
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>Custom actions are implemented by calling {@link Lexer//action} with the
|
|
||||||
// appropriate rule and action indexes.</p>
|
|
||||||
func (l *LexerCustomAction) execute(lexer Lexer) {
|
|
||||||
lexer.Action(nil, l.ruleIndex, l.actionIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerCustomAction) hash() int {
|
|
||||||
h := murmurInit(0)
|
|
||||||
h = murmurUpdate(h, l.actionType)
|
|
||||||
h = murmurUpdate(h, l.ruleIndex)
|
|
||||||
h = murmurUpdate(h, l.actionIndex)
|
|
||||||
return murmurFinish(h, 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerCustomAction) equals(other LexerAction) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerCustomAction); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implements the {@code channel} lexer action by calling
|
|
||||||
// {@link Lexer//setChannel} with the assigned channel.
|
|
||||||
// Constructs a New{@code channel} action with the specified channel value.
|
|
||||||
// @param channel The channel value to pass to {@link Lexer//setChannel}.
|
|
||||||
type LexerChannelAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
|
|
||||||
channel int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerChannelAction(channel int) *LexerChannelAction {
|
|
||||||
l := new(LexerChannelAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
|
|
||||||
l.channel = channel
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>This action is implemented by calling {@link Lexer//setChannel} with the
|
|
||||||
// value provided by {@link //getChannel}.</p>
|
|
||||||
func (l *LexerChannelAction) execute(lexer Lexer) {
|
|
||||||
lexer.SetChannel(l.channel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerChannelAction) hash() int {
|
|
||||||
h := murmurInit(0)
|
|
||||||
h = murmurUpdate(h, l.actionType)
|
|
||||||
h = murmurUpdate(h, l.channel)
|
|
||||||
return murmurFinish(h, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerChannelAction) equals(other LexerAction) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerChannelAction); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.channel == other.(*LexerChannelAction).channel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerChannelAction) String() string {
|
|
||||||
return "channel(" + strconv.Itoa(l.channel) + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
// This implementation of {@link LexerAction} is used for tracking input offsets
|
|
||||||
// for position-dependent actions within a {@link LexerActionExecutor}.
|
|
||||||
//
|
|
||||||
// <p>This action is not serialized as part of the ATN, and is only required for
|
|
||||||
// position-dependent lexer actions which appear at a location other than the
|
|
||||||
// end of a rule. For more information about DFA optimizations employed for
|
|
||||||
// lexer actions, see {@link LexerActionExecutor//append} and
|
|
||||||
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
|
|
||||||
|
|
||||||
// Constructs a Newindexed custom action by associating a character offset
|
|
||||||
// with a {@link LexerAction}.
|
|
||||||
//
|
|
||||||
// <p>Note: This class is only required for lexer actions for which
|
|
||||||
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
|
|
||||||
//
|
|
||||||
// @param offset The offset into the input {@link CharStream}, relative to
|
|
||||||
// the token start index, at which the specified lexer action should be
|
|
||||||
// executed.
|
|
||||||
// @param action The lexer action to execute at a particular offset in the
|
|
||||||
// input {@link CharStream}.
|
|
||||||
type LexerIndexedCustomAction struct {
|
|
||||||
*BaseLexerAction
|
|
||||||
|
|
||||||
offset int
|
|
||||||
lexerAction LexerAction
|
|
||||||
isPositionDependent bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
|
|
||||||
|
|
||||||
l := new(LexerIndexedCustomAction)
|
|
||||||
l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
|
|
||||||
|
|
||||||
l.offset = offset
|
|
||||||
l.lexerAction = lexerAction
|
|
||||||
l.isPositionDependent = true
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>This method calls {@link //execute} on the result of {@link //getAction}
|
|
||||||
// using the provided {@code lexer}.</p>
|
|
||||||
func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
|
|
||||||
// assume the input stream position was properly set by the calling code
|
|
||||||
l.lexerAction.execute(lexer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerIndexedCustomAction) hash() int {
|
|
||||||
h := murmurInit(0)
|
|
||||||
h = murmurUpdate(h, l.actionType)
|
|
||||||
h = murmurUpdate(h, l.offset)
|
|
||||||
h = murmurUpdate(h, l.lexerAction.hash())
|
|
||||||
return murmurFinish(h, 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,170 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// Represents an executor for a sequence of lexer actions which traversed during
|
|
||||||
// the Matching operation of a lexer rule (token).
|
|
||||||
//
|
|
||||||
// <p>The executor tracks position information for position-dependent lexer actions
|
|
||||||
// efficiently, ensuring that actions appearing only at the end of the rule do
|
|
||||||
// not cause bloating of the {@link DFA} created for the lexer.</p>
|
|
||||||
|
|
||||||
type LexerActionExecutor struct {
|
|
||||||
lexerActions []LexerAction
|
|
||||||
cachedHash int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
|
||||||
|
|
||||||
if lexerActions == nil {
|
|
||||||
lexerActions = make([]LexerAction, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
l := new(LexerActionExecutor)
|
|
||||||
|
|
||||||
l.lexerActions = lexerActions
|
|
||||||
|
|
||||||
// Caches the result of {@link //hashCode} since the hash code is an element
|
|
||||||
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
|
|
||||||
l.cachedHash = murmurInit(57)
|
|
||||||
for _, a := range lexerActions {
|
|
||||||
l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
|
|
||||||
}
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a {@link LexerActionExecutor} which executes the actions for
|
|
||||||
// the input {@code lexerActionExecutor} followed by a specified
|
|
||||||
// {@code lexerAction}.
|
|
||||||
//
|
|
||||||
// @param lexerActionExecutor The executor for actions already traversed by
|
|
||||||
// the lexer while Matching a token within a particular
|
|
||||||
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
|
|
||||||
// though it were an empty executor.
|
|
||||||
// @param lexerAction The lexer action to execute after the actions
|
|
||||||
// specified in {@code lexerActionExecutor}.
|
|
||||||
//
|
|
||||||
// @return A {@link LexerActionExecutor} for executing the combine actions
|
|
||||||
// of {@code lexerActionExecutor} and {@code lexerAction}.
|
|
||||||
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
|
|
||||||
if lexerActionExecutor == nil {
|
|
||||||
return NewLexerActionExecutor([]LexerAction{lexerAction})
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a {@link LexerActionExecutor} which encodes the current offset
|
|
||||||
// for position-dependent lexer actions.
|
|
||||||
//
|
|
||||||
// <p>Normally, when the executor encounters lexer actions where
|
|
||||||
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
|
|
||||||
// {@link IntStream//seek} on the input {@link CharStream} to set the input
|
|
||||||
// position to the <em>end</em> of the current token. This behavior provides
|
|
||||||
// for efficient DFA representation of lexer actions which appear at the end
|
|
||||||
// of a lexer rule, even when the lexer rule Matches a variable number of
|
|
||||||
// characters.</p>
|
|
||||||
//
|
|
||||||
// <p>Prior to traversing a Match transition in the ATN, the current offset
|
|
||||||
// from the token start index is assigned to all position-dependent lexer
|
|
||||||
// actions which have not already been assigned a fixed offset. By storing
|
|
||||||
// the offsets relative to the token start index, the DFA representation of
|
|
||||||
// lexer actions which appear in the middle of tokens remains efficient due
|
|
||||||
// to sharing among tokens of the same length, regardless of their absolute
|
|
||||||
// position in the input stream.</p>
|
|
||||||
//
|
|
||||||
// <p>If the current executor already has offsets assigned to all
|
|
||||||
// position-dependent lexer actions, the method returns {@code this}.</p>
|
|
||||||
//
|
|
||||||
// @param offset The current offset to assign to all position-dependent
|
|
||||||
// lexer actions which do not already have offsets assigned.
|
|
||||||
//
|
|
||||||
// @return A {@link LexerActionExecutor} which stores input stream offsets
|
|
||||||
// for all position-dependent lexer actions.
|
|
||||||
// /
|
|
||||||
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
|
|
||||||
var updatedLexerActions []LexerAction
|
|
||||||
for i := 0; i < len(l.lexerActions); i++ {
|
|
||||||
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
|
|
||||||
if l.lexerActions[i].getIsPositionDependent() && !ok {
|
|
||||||
if updatedLexerActions == nil {
|
|
||||||
updatedLexerActions = make([]LexerAction, 0)
|
|
||||||
|
|
||||||
for _, a := range l.lexerActions {
|
|
||||||
updatedLexerActions = append(updatedLexerActions, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if updatedLexerActions == nil {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewLexerActionExecutor(updatedLexerActions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the actions encapsulated by l executor within the context of a
|
|
||||||
// particular {@link Lexer}.
|
|
||||||
//
|
|
||||||
// <p>This method calls {@link IntStream//seek} to set the position of the
|
|
||||||
// {@code input} {@link CharStream} prior to calling
|
|
||||||
// {@link LexerAction//execute} on a position-dependent action. Before the
|
|
||||||
// method returns, the input position will be restored to the same position
|
|
||||||
// it was in when the method was invoked.</p>
|
|
||||||
//
|
|
||||||
// @param lexer The lexer instance.
|
|
||||||
// @param input The input stream which is the source for the current token.
|
|
||||||
// When l method is called, the current {@link IntStream//index} for
|
|
||||||
// {@code input} should be the start of the following token, i.e. 1
|
|
||||||
// character past the end of the current token.
|
|
||||||
// @param startIndex The token start index. This value may be passed to
|
|
||||||
// {@link IntStream//seek} to set the {@code input} position to the beginning
|
|
||||||
// of the token.
|
|
||||||
// /
|
|
||||||
func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
|
|
||||||
requiresSeek := false
|
|
||||||
stopIndex := input.Index()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if requiresSeek {
|
|
||||||
input.Seek(stopIndex)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for i := 0; i < len(l.lexerActions); i++ {
|
|
||||||
lexerAction := l.lexerActions[i]
|
|
||||||
if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
|
|
||||||
offset := la.offset
|
|
||||||
input.Seek(startIndex + offset)
|
|
||||||
lexerAction = la.lexerAction
|
|
||||||
requiresSeek = (startIndex + offset) != stopIndex
|
|
||||||
} else if lexerAction.getIsPositionDependent() {
|
|
||||||
input.Seek(stopIndex)
|
|
||||||
requiresSeek = false
|
|
||||||
}
|
|
||||||
lexerAction.execute(lexer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerActionExecutor) hash() int {
|
|
||||||
if l == nil {
|
|
||||||
return 61
|
|
||||||
}
|
|
||||||
return l.cachedHash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerActionExecutor) equals(other interface{}) bool {
|
|
||||||
if l == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*LexerActionExecutor); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return l.cachedHash == other.(*LexerActionExecutor).cachedHash &&
|
|
||||||
&l.lexerActions == &other.(*LexerActionExecutor).lexerActions
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,658 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
LexerATNSimulatorDebug = false
|
|
||||||
LexerATNSimulatorDFADebug = false
|
|
||||||
|
|
||||||
LexerATNSimulatorMinDFAEdge = 0
|
|
||||||
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
|
|
||||||
|
|
||||||
LexerATNSimulatorMatchCalls = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
type ILexerATNSimulator interface {
|
|
||||||
IATNSimulator
|
|
||||||
|
|
||||||
reset()
|
|
||||||
Match(input CharStream, mode int) int
|
|
||||||
GetCharPositionInLine() int
|
|
||||||
GetLine() int
|
|
||||||
GetText(input CharStream) string
|
|
||||||
Consume(input CharStream)
|
|
||||||
}
|
|
||||||
|
|
||||||
type LexerATNSimulator struct {
|
|
||||||
*BaseATNSimulator
|
|
||||||
|
|
||||||
recog Lexer
|
|
||||||
predictionMode int
|
|
||||||
mergeCache DoubleDict
|
|
||||||
startIndex int
|
|
||||||
Line int
|
|
||||||
CharPositionInLine int
|
|
||||||
mode int
|
|
||||||
prevAccept *SimState
|
|
||||||
MatchCalls int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
|
|
||||||
l := new(LexerATNSimulator)
|
|
||||||
|
|
||||||
l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
|
|
||||||
|
|
||||||
l.decisionToDFA = decisionToDFA
|
|
||||||
l.recog = recog
|
|
||||||
// The current token's starting index into the character stream.
|
|
||||||
// Shared across DFA to ATN simulation in case the ATN fails and the
|
|
||||||
// DFA did not have a previous accept state. In l case, we use the
|
|
||||||
// ATN-generated exception object.
|
|
||||||
l.startIndex = -1
|
|
||||||
// line number 1..n within the input///
|
|
||||||
l.Line = 1
|
|
||||||
// The index of the character relative to the beginning of the line
|
|
||||||
// 0..n-1///
|
|
||||||
l.CharPositionInLine = 0
|
|
||||||
l.mode = LexerDefaultMode
|
|
||||||
// Used during DFA/ATN exec to record the most recent accept configuration
|
|
||||||
// info
|
|
||||||
l.prevAccept = NewSimState()
|
|
||||||
// done
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
|
|
||||||
l.CharPositionInLine = simulator.CharPositionInLine
|
|
||||||
l.Line = simulator.Line
|
|
||||||
l.mode = simulator.mode
|
|
||||||
l.startIndex = simulator.startIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
|
|
||||||
l.MatchCalls++
|
|
||||||
l.mode = mode
|
|
||||||
mark := input.Mark()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
input.Release(mark)
|
|
||||||
}()
|
|
||||||
|
|
||||||
l.startIndex = input.Index()
|
|
||||||
l.prevAccept.reset()
|
|
||||||
|
|
||||||
dfa := l.decisionToDFA[mode]
|
|
||||||
|
|
||||||
if dfa.s0 == nil {
|
|
||||||
return l.MatchATN(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.execATN(input, dfa.s0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) reset() {
|
|
||||||
l.prevAccept.reset()
|
|
||||||
l.startIndex = -1
|
|
||||||
l.Line = 1
|
|
||||||
l.CharPositionInLine = 0
|
|
||||||
l.mode = LexerDefaultMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
|
||||||
startState := l.atn.modeToStartState[l.mode]
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
|
|
||||||
}
|
|
||||||
oldMode := l.mode
|
|
||||||
s0Closure := l.computeStartState(input, startState)
|
|
||||||
suppressEdge := s0Closure.hasSemanticContext
|
|
||||||
s0Closure.hasSemanticContext = false
|
|
||||||
|
|
||||||
next := l.addDFAState(s0Closure)
|
|
||||||
|
|
||||||
if !suppressEdge {
|
|
||||||
l.decisionToDFA[l.mode].setS0(next)
|
|
||||||
}
|
|
||||||
|
|
||||||
predict := l.execATN(input, next)
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
|
|
||||||
}
|
|
||||||
return predict
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("start state closure=" + ds0.configs.String())
|
|
||||||
}
|
|
||||||
if ds0.isAcceptState {
|
|
||||||
// allow zero-length tokens
|
|
||||||
l.captureSimState(l.prevAccept, input, ds0)
|
|
||||||
}
|
|
||||||
t := input.LA(1)
|
|
||||||
s := ds0 // s is current/from DFA state
|
|
||||||
|
|
||||||
for { // while more work
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("execATN loop starting closure: " + s.configs.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// As we move src->trg, src->trg, we keep track of the previous trg to
|
|
||||||
// avoid looking up the DFA state again, which is expensive.
|
|
||||||
// If the previous target was already part of the DFA, we might
|
|
||||||
// be able to avoid doing a reach operation upon t. If s!=nil,
|
|
||||||
// it means that semantic predicates didn't prevent us from
|
|
||||||
// creating a DFA state. Once we know s!=nil, we check to see if
|
|
||||||
// the DFA state has an edge already for t. If so, we can just reuse
|
|
||||||
// it's configuration set there's no point in re-computing it.
|
|
||||||
// This is kind of like doing DFA simulation within the ATN
|
|
||||||
// simulation because DFA simulation is really just a way to avoid
|
|
||||||
// computing reach/closure sets. Technically, once we know that
|
|
||||||
// we have a previously added DFA state, we could jump over to
|
|
||||||
// the DFA simulator. But, that would mean popping back and forth
|
|
||||||
// a lot and making things more complicated algorithmically.
|
|
||||||
// This optimization makes a lot of sense for loops within DFA.
|
|
||||||
// A character will take us back to an existing DFA state
|
|
||||||
// that already has lots of edges out of it. e.g., .* in comments.
|
|
||||||
target := l.getExistingTargetState(s, t)
|
|
||||||
if target == nil {
|
|
||||||
target = l.computeTargetState(input, s, t)
|
|
||||||
// print("Computed:" + str(target))
|
|
||||||
}
|
|
||||||
if target == ATNSimulatorError {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// If l is a consumable input element, make sure to consume before
|
|
||||||
// capturing the accept state so the input index, line, and char
|
|
||||||
// position accurately reflect the state of the interpreter at the
|
|
||||||
// end of the token.
|
|
||||||
if t != TokenEOF {
|
|
||||||
l.Consume(input)
|
|
||||||
}
|
|
||||||
if target.isAcceptState {
|
|
||||||
l.captureSimState(l.prevAccept, input, target)
|
|
||||||
if t == TokenEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t = input.LA(1)
|
|
||||||
s = target // flip current DFA target becomes Newsrc/from state
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.failOrAccept(l.prevAccept, input, s.configs, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get an existing target state for an edge in the DFA. If the target state
|
|
||||||
// for the edge has not yet been computed or is otherwise not available,
|
|
||||||
// l method returns {@code nil}.
|
|
||||||
//
|
|
||||||
// @param s The current DFA state
|
|
||||||
// @param t The next input symbol
|
|
||||||
// @return The existing target DFA state for the given input symbol
|
|
||||||
// {@code t}, or {@code nil} if the target state for l edge is not
|
|
||||||
// already cached
|
|
||||||
func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
|
|
||||||
if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
target := s.edges[t-LexerATNSimulatorMinDFAEdge]
|
|
||||||
if LexerATNSimulatorDebug && target != nil {
|
|
||||||
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
|
|
||||||
}
|
|
||||||
return target
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute a target state for an edge in the DFA, and attempt to add the
|
|
||||||
// computed state and corresponding edge to the DFA.
|
|
||||||
//
|
|
||||||
// @param input The input stream
|
|
||||||
// @param s The current DFA state
|
|
||||||
// @param t The next input symbol
|
|
||||||
//
|
|
||||||
// @return The computed target DFA state for the given input symbol
|
|
||||||
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
|
|
||||||
// returns {@link //ERROR}.
|
|
||||||
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
|
|
||||||
reach := NewOrderedATNConfigSet()
|
|
||||||
|
|
||||||
// if we don't find an existing DFA state
|
|
||||||
// Fill reach starting from closure, following t transitions
|
|
||||||
l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
|
|
||||||
|
|
||||||
if len(reach.configs) == 0 { // we got nowhere on t from s
|
|
||||||
if !reach.hasSemanticContext {
|
|
||||||
// we got nowhere on t, don't panic out l knowledge it'd
|
|
||||||
// cause a failover from DFA later.
|
|
||||||
l.addDFAEdge(s, t, ATNSimulatorError, nil)
|
|
||||||
}
|
|
||||||
// stop when we can't Match any more char
|
|
||||||
return ATNSimulatorError
|
|
||||||
}
|
|
||||||
// Add an edge from s to target DFA found/created for reach
|
|
||||||
return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
|
|
||||||
if l.prevAccept.dfaState != nil {
|
|
||||||
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
|
|
||||||
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
|
|
||||||
return prevAccept.dfaState.prediction
|
|
||||||
}
|
|
||||||
|
|
||||||
// if no accept and EOF is first char, return EOF
|
|
||||||
if t == TokenEOF && input.Index() == l.startIndex {
|
|
||||||
return TokenEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Given a starting configuration set, figure out all ATN configurations
|
|
||||||
// we can reach upon input {@code t}. Parameter {@code reach} is a return
|
|
||||||
// parameter.
|
|
||||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
|
|
||||||
// l is used to Skip processing for configs which have a lower priority
|
|
||||||
// than a config that already reached an accept state for the same rule
|
|
||||||
SkipAlt := ATNInvalidAltNumber
|
|
||||||
|
|
||||||
for _, cfg := range closure.GetItems() {
|
|
||||||
currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
|
|
||||||
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
|
|
||||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, trans := range cfg.GetState().GetTransitions() {
|
|
||||||
target := l.getReachableTarget(trans, t)
|
|
||||||
if target != nil {
|
|
||||||
lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
|
|
||||||
if lexerActionExecutor != nil {
|
|
||||||
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
|
|
||||||
}
|
|
||||||
treatEOFAsEpsilon := (t == TokenEOF)
|
|
||||||
config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
|
|
||||||
if l.closure(input, config, reach,
|
|
||||||
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
|
|
||||||
// any remaining configs for l alt have a lower priority
|
|
||||||
// than the one that just reached an accept state.
|
|
||||||
SkipAlt = cfg.GetAlt()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Printf("ACTION %s\n", lexerActionExecutor)
|
|
||||||
}
|
|
||||||
// seek to after last char in token
|
|
||||||
input.Seek(index)
|
|
||||||
l.Line = line
|
|
||||||
l.CharPositionInLine = charPos
|
|
||||||
if lexerActionExecutor != nil && l.recog != nil {
|
|
||||||
lexerActionExecutor.execute(l.recog, input, startIndex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
|
|
||||||
if trans.Matches(t, 0, LexerMaxCharValue) {
|
|
||||||
return trans.getTarget()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
|
|
||||||
configs := NewOrderedATNConfigSet()
|
|
||||||
for i := 0; i < len(p.GetTransitions()); i++ {
|
|
||||||
target := p.GetTransitions()[i].getTarget()
|
|
||||||
cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
|
|
||||||
l.closure(input, cfg, configs, false, false, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
return configs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since the alternatives within any lexer decision are ordered by
|
|
||||||
// preference, l method stops pursuing the closure as soon as an accept
|
|
||||||
// state is reached. After the first accept state is reached by depth-first
|
|
||||||
// search from {@code config}, all other (potentially reachable) states for
|
|
||||||
// l rule would have a lower priority.
|
|
||||||
//
|
|
||||||
// @return {@code true} if an accept state is reached, otherwise
|
|
||||||
// {@code false}.
|
|
||||||
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
|
|
||||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := config.state.(*RuleStopState)
|
|
||||||
if ok {
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
if l.recog != nil {
|
|
||||||
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("closure at rule stop %s\n", config)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.context == nil || config.context.hasEmptyPath() {
|
|
||||||
if config.context == nil || config.context.isEmpty() {
|
|
||||||
configs.Add(config, nil)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
|
|
||||||
currentAltReachedAcceptState = true
|
|
||||||
}
|
|
||||||
if config.context != nil && !config.context.isEmpty() {
|
|
||||||
for i := 0; i < config.context.length(); i++ {
|
|
||||||
if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
|
|
||||||
newContext := config.context.GetParent(i) // "pop" return state
|
|
||||||
returnState := l.atn.states[config.context.getReturnState(i)]
|
|
||||||
cfg := NewLexerATNConfig2(config, returnState, newContext)
|
|
||||||
currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return currentAltReachedAcceptState
|
|
||||||
}
|
|
||||||
// optimization
|
|
||||||
if !config.state.GetEpsilonOnlyTransitions() {
|
|
||||||
if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
|
|
||||||
configs.Add(config, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for j := 0; j < len(config.state.GetTransitions()); j++ {
|
|
||||||
trans := config.state.GetTransitions()[j]
|
|
||||||
cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
|
|
||||||
if cfg != nil {
|
|
||||||
currentAltReachedAcceptState = l.closure(input, cfg, configs,
|
|
||||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return currentAltReachedAcceptState
|
|
||||||
}
|
|
||||||
|
|
||||||
// side-effect: can alter configs.hasSemanticContext
|
|
||||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
|
|
||||||
configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
|
|
||||||
|
|
||||||
var cfg *LexerATNConfig
|
|
||||||
|
|
||||||
if trans.getSerializationType() == TransitionRULE {
|
|
||||||
|
|
||||||
rt := trans.(*RuleTransition)
|
|
||||||
newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
|
|
||||||
cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
|
|
||||||
|
|
||||||
} else if trans.getSerializationType() == TransitionPRECEDENCE {
|
|
||||||
panic("Precedence predicates are not supported in lexers.")
|
|
||||||
} else if trans.getSerializationType() == TransitionPREDICATE {
|
|
||||||
// Track traversing semantic predicates. If we traverse,
|
|
||||||
// we cannot add a DFA state for l "reach" computation
|
|
||||||
// because the DFA would not test the predicate again in the
|
|
||||||
// future. Rather than creating collections of semantic predicates
|
|
||||||
// like v3 and testing them on prediction, v4 will test them on the
|
|
||||||
// fly all the time using the ATN not the DFA. This is slower but
|
|
||||||
// semantically it's not used that often. One of the key elements to
|
|
||||||
// l predicate mechanism is not adding DFA states that see
|
|
||||||
// predicates immediately afterwards in the ATN. For example,
|
|
||||||
|
|
||||||
// a : ID {p1}? | ID {p2}?
|
|
||||||
|
|
||||||
// should create the start state for rule 'a' (to save start state
|
|
||||||
// competition), but should not create target of ID state. The
|
|
||||||
// collection of ATN states the following ID references includes
|
|
||||||
// states reached by traversing predicates. Since l is when we
|
|
||||||
// test them, we cannot cash the DFA state target of ID.
|
|
||||||
|
|
||||||
pt := trans.(*PredicateTransition)
|
|
||||||
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
|
|
||||||
}
|
|
||||||
configs.SetHasSemanticContext(true)
|
|
||||||
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
|
|
||||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
|
||||||
}
|
|
||||||
} else if trans.getSerializationType() == TransitionACTION {
|
|
||||||
if config.context == nil || config.context.hasEmptyPath() {
|
|
||||||
// execute actions anywhere in the start rule for a token.
|
|
||||||
//
|
|
||||||
// TODO: if the entry rule is invoked recursively, some
|
|
||||||
// actions may be executed during the recursive call. The
|
|
||||||
// problem can appear when hasEmptyPath() is true but
|
|
||||||
// isEmpty() is false. In l case, the config needs to be
|
|
||||||
// split into two contexts - one with just the empty path
|
|
||||||
// and another with everything but the empty path.
|
|
||||||
// Unfortunately, the current algorithm does not allow
|
|
||||||
// getEpsilonTarget to return two configurations, so
|
|
||||||
// additional modifications are needed before we can support
|
|
||||||
// the split operation.
|
|
||||||
lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
|
|
||||||
cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
|
|
||||||
} else {
|
|
||||||
// ignore actions in referenced rules
|
|
||||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
|
||||||
}
|
|
||||||
} else if trans.getSerializationType() == TransitionEPSILON {
|
|
||||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
|
||||||
} else if trans.getSerializationType() == TransitionATOM ||
|
|
||||||
trans.getSerializationType() == TransitionRANGE ||
|
|
||||||
trans.getSerializationType() == TransitionSET {
|
|
||||||
if treatEOFAsEpsilon {
|
|
||||||
if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
|
|
||||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate a predicate specified in the lexer.
|
|
||||||
//
|
|
||||||
// <p>If {@code speculative} is {@code true}, l method was called before
|
|
||||||
// {@link //consume} for the Matched character. This method should call
|
|
||||||
// {@link //consume} before evaluating the predicate to ensure position
|
|
||||||
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
|
|
||||||
// and {@link Lexer//getcolumn}, properly reflect the current
|
|
||||||
// lexer state. This method should restore {@code input} and the simulator
|
|
||||||
// to the original state before returning (i.e. undo the actions made by the
|
|
||||||
// call to {@link //consume}.</p>
|
|
||||||
//
|
|
||||||
// @param input The input stream.
|
|
||||||
// @param ruleIndex The rule containing the predicate.
|
|
||||||
// @param predIndex The index of the predicate within the rule.
|
|
||||||
// @param speculative {@code true} if the current index in {@code input} is
|
|
||||||
// one character before the predicate's location.
|
|
||||||
//
|
|
||||||
// @return {@code true} if the specified predicate evaluates to
|
|
||||||
// {@code true}.
|
|
||||||
// /
|
|
||||||
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
|
|
||||||
// assume true if no recognizer was provided
|
|
||||||
if l.recog == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if !speculative {
|
|
||||||
return l.recog.Sempred(nil, ruleIndex, predIndex)
|
|
||||||
}
|
|
||||||
savedcolumn := l.CharPositionInLine
|
|
||||||
savedLine := l.Line
|
|
||||||
index := input.Index()
|
|
||||||
marker := input.Mark()
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
l.CharPositionInLine = savedcolumn
|
|
||||||
l.Line = savedLine
|
|
||||||
input.Seek(index)
|
|
||||||
input.Release(marker)
|
|
||||||
}()
|
|
||||||
|
|
||||||
l.Consume(input)
|
|
||||||
return l.recog.Sempred(nil, ruleIndex, predIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
|
|
||||||
settings.index = input.Index()
|
|
||||||
settings.line = l.Line
|
|
||||||
settings.column = l.CharPositionInLine
|
|
||||||
settings.dfaState = dfaState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
|
|
||||||
if to == nil && cfgs != nil {
|
|
||||||
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
|
|
||||||
// marker indicating dynamic predicate evaluation makes l edge
|
|
||||||
// dependent on the specific input sequence, so the static edge in the
|
|
||||||
// DFA should be omitted. The target DFAState is still created since
|
|
||||||
// execATN has the ability to reSynchronize with the DFA state cache
|
|
||||||
// following the predicate evaluation step.
|
|
||||||
//
|
|
||||||
// TJP notes: next time through the DFA, we see a pred again and eval.
|
|
||||||
// If that gets us to a previously created (but dangling) DFA
|
|
||||||
// state, we can continue in pure DFA mode from there.
|
|
||||||
// /
|
|
||||||
suppressEdge := cfgs.HasSemanticContext()
|
|
||||||
cfgs.SetHasSemanticContext(false)
|
|
||||||
|
|
||||||
to = l.addDFAState(cfgs)
|
|
||||||
|
|
||||||
if suppressEdge {
|
|
||||||
return to
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// add the edge
|
|
||||||
if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
|
|
||||||
// Only track edges within the DFA bounds
|
|
||||||
return to
|
|
||||||
}
|
|
||||||
if LexerATNSimulatorDebug {
|
|
||||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
|
|
||||||
}
|
|
||||||
if from.edges == nil {
|
|
||||||
// make room for tokens 1..n and -1 masquerading as index 0
|
|
||||||
from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)
|
|
||||||
}
|
|
||||||
from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect
|
|
||||||
|
|
||||||
return to
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a NewDFA state if there isn't one with l set of
|
|
||||||
// configurations already. This method also detects the first
|
|
||||||
// configuration containing an ATN rule stop state. Later, when
|
|
||||||
// traversing the DFA, we will know which rule to accept.
|
|
||||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
|
|
||||||
|
|
||||||
proposed := NewDFAState(-1, configs)
|
|
||||||
var firstConfigWithRuleStopState ATNConfig
|
|
||||||
|
|
||||||
for _, cfg := range configs.GetItems() {
|
|
||||||
|
|
||||||
_, ok := cfg.GetState().(*RuleStopState)
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
firstConfigWithRuleStopState = cfg
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if firstConfigWithRuleStopState != nil {
|
|
||||||
proposed.isAcceptState = true
|
|
||||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
|
|
||||||
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
|
|
||||||
}
|
|
||||||
hash := proposed.hash()
|
|
||||||
dfa := l.decisionToDFA[l.mode]
|
|
||||||
existing, ok := dfa.getState(hash)
|
|
||||||
if ok {
|
|
||||||
return existing
|
|
||||||
}
|
|
||||||
newState := proposed
|
|
||||||
newState.stateNumber = dfa.numStates()
|
|
||||||
configs.SetReadOnly(true)
|
|
||||||
newState.configs = configs
|
|
||||||
dfa.setState(hash, newState)
|
|
||||||
return newState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) getDFA(mode int) *DFA {
|
|
||||||
return l.decisionToDFA[mode]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the text Matched so far for the current token.
|
|
||||||
func (l *LexerATNSimulator) GetText(input CharStream) string {
|
|
||||||
// index is first lookahead char, don't include.
|
|
||||||
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) Consume(input CharStream) {
|
|
||||||
curChar := input.LA(1)
|
|
||||||
if curChar == int('\n') {
|
|
||||||
l.Line++
|
|
||||||
l.CharPositionInLine = 0
|
|
||||||
} else {
|
|
||||||
l.CharPositionInLine++
|
|
||||||
}
|
|
||||||
input.Consume()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) GetCharPositionInLine() int {
|
|
||||||
return l.CharPositionInLine
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) GetLine() int {
|
|
||||||
return l.Line
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LexerATNSimulator) GetTokenName(tt int) string {
|
|
||||||
if tt == -1 {
|
|
||||||
return "EOF"
|
|
||||||
}
|
|
||||||
|
|
||||||
return "'" + string(tt) + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
func resetSimState(sim *SimState) {
|
|
||||||
sim.index = -1
|
|
||||||
sim.line = 0
|
|
||||||
sim.column = -1
|
|
||||||
sim.dfaState = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type SimState struct {
|
|
||||||
index int
|
|
||||||
line int
|
|
||||||
column int
|
|
||||||
dfaState *DFAState
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSimState() *SimState {
|
|
||||||
s := new(SimState)
|
|
||||||
resetSimState(s)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SimState) reset() {
|
|
||||||
resetSimState(s)
|
|
||||||
}
|
|
||||||
|
|
@ -1,215 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
type LL1Analyzer struct {
|
|
||||||
atn *ATN
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
|
|
||||||
la := new(LL1Analyzer)
|
|
||||||
la.atn = atn
|
|
||||||
return la
|
|
||||||
}
|
|
||||||
|
|
||||||
//* Special value added to the lookahead sets to indicate that we hit
|
|
||||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
|
||||||
///
|
|
||||||
const (
|
|
||||||
LL1AnalyzerHitPred = TokenInvalidType
|
|
||||||
)
|
|
||||||
|
|
||||||
//*
|
|
||||||
// Calculates the SLL(1) expected lookahead set for each outgoing transition
|
|
||||||
// of an {@link ATNState}. The returned array has one element for each
|
|
||||||
// outgoing transition in {@code s}. If the closure from transition
|
|
||||||
// <em>i</em> leads to a semantic predicate before Matching a symbol, the
|
|
||||||
// element at index <em>i</em> of the result will be {@code nil}.
|
|
||||||
//
|
|
||||||
// @param s the ATN state
|
|
||||||
// @return the expected symbols for each outgoing transition of {@code s}.
|
|
||||||
func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
|
||||||
if s == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
count := len(s.GetTransitions())
|
|
||||||
look := make([]*IntervalSet, count)
|
|
||||||
for alt := 0; alt < count; alt++ {
|
|
||||||
look[alt] = NewIntervalSet()
|
|
||||||
lookBusy := NewSet(nil, nil)
|
|
||||||
seeThruPreds := false // fail to get lookahead upon pred
|
|
||||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
|
|
||||||
// Wipe out lookahead for la alternative if we found nothing
|
|
||||||
// or we had a predicate when we !seeThruPreds
|
|
||||||
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
|
|
||||||
look[alt] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return look
|
|
||||||
}
|
|
||||||
|
|
||||||
//*
|
|
||||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
|
||||||
// specified {@code ctx}.
|
|
||||||
//
|
|
||||||
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
|
|
||||||
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
|
|
||||||
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
|
|
||||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
|
||||||
//
|
|
||||||
// @param s the ATN state
|
|
||||||
// @param stopState the ATN state to stop at. This can be a
|
|
||||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
|
||||||
// @param ctx the complete parser context, or {@code nil} if the context
|
|
||||||
// should be ignored
|
|
||||||
//
|
|
||||||
// @return The set of tokens that can follow {@code s} in the ATN in the
|
|
||||||
// specified {@code ctx}.
|
|
||||||
///
|
|
||||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
|
|
||||||
r := NewIntervalSet()
|
|
||||||
seeThruPreds := true // ignore preds get all lookahead
|
|
||||||
var lookContext PredictionContext
|
|
||||||
if ctx != nil {
|
|
||||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
|
||||||
}
|
|
||||||
la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
//*
|
|
||||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
|
||||||
// specified {@code ctx}.
|
|
||||||
//
|
|
||||||
// <p>If {@code ctx} is {@code nil} and {@code stopState} or the end of the
|
|
||||||
// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
|
|
||||||
// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
|
|
||||||
// {@code true} and {@code stopState} or the end of the outermost rule is
|
|
||||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
|
||||||
//
|
|
||||||
// @param s the ATN state.
|
|
||||||
// @param stopState the ATN state to stop at. This can be a
|
|
||||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
|
||||||
// @param ctx The outer context, or {@code nil} if the outer context should
|
|
||||||
// not be used.
|
|
||||||
// @param look The result lookahead set.
|
|
||||||
// @param lookBusy A set used for preventing epsilon closures in the ATN
|
|
||||||
// from causing a stack overflow. Outside code should pass
|
|
||||||
// {@code NewSet<ATNConfig>} for la argument.
|
|
||||||
// @param calledRuleStack A set used for preventing left recursion in the
|
|
||||||
// ATN from causing a stack overflow. Outside code should pass
|
|
||||||
// {@code NewBitSet()} for la argument.
|
|
||||||
// @param seeThruPreds {@code true} to true semantic predicates as
|
|
||||||
// implicitly {@code true} and "see through them", otherwise {@code false}
|
|
||||||
// to treat semantic predicates as opaque and add {@link //HitPred} to the
|
|
||||||
// result if one is encountered.
|
|
||||||
// @param addEOF Add {@link Token//EOF} to the result if the end of the
|
|
||||||
// outermost context is reached. This parameter has no effect if {@code ctx}
|
|
||||||
// is {@code nil}.
|
|
||||||
|
|
||||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
|
||||||
|
|
||||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
|
||||||
|
|
||||||
removed := calledRuleStack.contains(returnState.GetRuleIndex())
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if removed {
|
|
||||||
calledRuleStack.add(returnState.GetRuleIndex())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
calledRuleStack.remove(returnState.GetRuleIndex())
|
|
||||||
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
|
||||||
|
|
||||||
c := NewBaseATNConfig6(s, 0, ctx)
|
|
||||||
|
|
||||||
if lookBusy.contains(c) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lookBusy.add(c)
|
|
||||||
|
|
||||||
if s == stopState {
|
|
||||||
if ctx == nil {
|
|
||||||
look.addOne(TokenEpsilon)
|
|
||||||
return
|
|
||||||
} else if ctx.isEmpty() && addEOF {
|
|
||||||
look.addOne(TokenEOF)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := s.(*RuleStopState)
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
if ctx == nil {
|
|
||||||
look.addOne(TokenEpsilon)
|
|
||||||
return
|
|
||||||
} else if ctx.isEmpty() && addEOF {
|
|
||||||
look.addOne(TokenEOF)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx != BasePredictionContextEMPTY {
|
|
||||||
// run thru all possible stack tops in ctx
|
|
||||||
for i := 0; i < ctx.length(); i++ {
|
|
||||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
|
||||||
la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n := len(s.GetTransitions())
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
t := s.GetTransitions()[i]
|
|
||||||
|
|
||||||
if t1, ok := t.(*RuleTransition); ok {
|
|
||||||
if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
|
||||||
la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
|
|
||||||
} else if t2, ok := t.(AbstractPredicateTransition); ok {
|
|
||||||
if seeThruPreds {
|
|
||||||
la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
|
||||||
} else {
|
|
||||||
look.addOne(LL1AnalyzerHitPred)
|
|
||||||
}
|
|
||||||
} else if t.getIsEpsilon() {
|
|
||||||
la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
|
||||||
} else if _, ok := t.(*WildcardTransition); ok {
|
|
||||||
look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
|
|
||||||
} else {
|
|
||||||
set := t.getLabel()
|
|
||||||
if set != nil {
|
|
||||||
if _, ok := t.(*NotSetTransition); ok {
|
|
||||||
set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
|
|
||||||
}
|
|
||||||
look.addSet(set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
|
||||||
|
|
||||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
calledRuleStack.remove(t1.getTarget().GetRuleIndex())
|
|
||||||
}()
|
|
||||||
|
|
||||||
calledRuleStack.add(t1.getTarget().GetRuleIndex())
|
|
||||||
la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -1,718 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Parser interface {
|
|
||||||
Recognizer
|
|
||||||
|
|
||||||
GetInterpreter() *ParserATNSimulator
|
|
||||||
|
|
||||||
GetTokenStream() TokenStream
|
|
||||||
GetTokenFactory() TokenFactory
|
|
||||||
GetParserRuleContext() ParserRuleContext
|
|
||||||
SetParserRuleContext(ParserRuleContext)
|
|
||||||
Consume() Token
|
|
||||||
GetParseListeners() []ParseTreeListener
|
|
||||||
|
|
||||||
GetErrorHandler() ErrorStrategy
|
|
||||||
SetErrorHandler(ErrorStrategy)
|
|
||||||
GetInputStream() IntStream
|
|
||||||
GetCurrentToken() Token
|
|
||||||
GetExpectedTokens() *IntervalSet
|
|
||||||
NotifyErrorListeners(string, Token, RecognitionException)
|
|
||||||
IsExpectedToken(int) bool
|
|
||||||
GetPrecedence() int
|
|
||||||
GetRuleInvocationStack(ParserRuleContext) []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseParser struct {
|
|
||||||
*BaseRecognizer
|
|
||||||
|
|
||||||
Interpreter *ParserATNSimulator
|
|
||||||
BuildParseTrees bool
|
|
||||||
|
|
||||||
input TokenStream
|
|
||||||
errHandler ErrorStrategy
|
|
||||||
precedenceStack IntStack
|
|
||||||
ctx ParserRuleContext
|
|
||||||
|
|
||||||
tracer *TraceListener
|
|
||||||
parseListeners []ParseTreeListener
|
|
||||||
_SyntaxErrors int
|
|
||||||
}
|
|
||||||
|
|
||||||
// p.is all the parsing support code essentially most of it is error
|
|
||||||
// recovery stuff.//
|
|
||||||
func NewBaseParser(input TokenStream) *BaseParser {
|
|
||||||
|
|
||||||
p := new(BaseParser)
|
|
||||||
|
|
||||||
p.BaseRecognizer = NewBaseRecognizer()
|
|
||||||
|
|
||||||
// The input stream.
|
|
||||||
p.input = nil
|
|
||||||
// The error handling strategy for the parser. The default value is a new
|
|
||||||
// instance of {@link DefaultErrorStrategy}.
|
|
||||||
p.errHandler = NewDefaultErrorStrategy()
|
|
||||||
p.precedenceStack = make([]int, 0)
|
|
||||||
p.precedenceStack.Push(0)
|
|
||||||
// The {@link ParserRuleContext} object for the currently executing rule.
|
|
||||||
// p.is always non-nil during the parsing process.
|
|
||||||
p.ctx = nil
|
|
||||||
// Specifies whether or not the parser should construct a parse tree during
|
|
||||||
// the parsing process. The default value is {@code true}.
|
|
||||||
p.BuildParseTrees = true
|
|
||||||
// When {@link //setTrace}{@code (true)} is called, a reference to the
|
|
||||||
// {@link TraceListener} is stored here so it can be easily removed in a
|
|
||||||
// later call to {@link //setTrace}{@code (false)}. The listener itself is
|
|
||||||
// implemented as a parser listener so p.field is not directly used by
|
|
||||||
// other parser methods.
|
|
||||||
p.tracer = nil
|
|
||||||
// The list of {@link ParseTreeListener} listeners registered to receive
|
|
||||||
// events during the parse.
|
|
||||||
p.parseListeners = nil
|
|
||||||
// The number of syntax errors Reported during parsing. p.value is
|
|
||||||
// incremented each time {@link //NotifyErrorListeners} is called.
|
|
||||||
p._SyntaxErrors = 0
|
|
||||||
p.SetInputStream(input)
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// p.field maps from the serialized ATN string to the deserialized {@link
|
|
||||||
// ATN} with
|
|
||||||
// bypass alternatives.
|
|
||||||
//
|
|
||||||
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
|
|
||||||
//
|
|
||||||
var bypassAltsAtnCache = make(map[string]int)
|
|
||||||
|
|
||||||
// reset the parser's state//
|
|
||||||
func (p *BaseParser) reset() {
|
|
||||||
if p.input != nil {
|
|
||||||
p.input.Seek(0)
|
|
||||||
}
|
|
||||||
p.errHandler.reset(p)
|
|
||||||
p.ctx = nil
|
|
||||||
p._SyntaxErrors = 0
|
|
||||||
p.SetTrace(nil)
|
|
||||||
p.precedenceStack = make([]int, 0)
|
|
||||||
p.precedenceStack.Push(0)
|
|
||||||
if p.Interpreter != nil {
|
|
||||||
p.Interpreter.reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetErrorHandler() ErrorStrategy {
|
|
||||||
return p.errHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
|
|
||||||
p.errHandler = e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match current input symbol against {@code ttype}. If the symbol type
|
|
||||||
// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
|
|
||||||
// called to complete the Match process.
|
|
||||||
//
|
|
||||||
// <p>If the symbol type does not Match,
|
|
||||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
|
|
||||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
|
|
||||||
// {@code true} and the token index of the symbol returned by
|
|
||||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
|
|
||||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
|
|
||||||
//
|
|
||||||
// @param ttype the token type to Match
|
|
||||||
// @return the Matched symbol
|
|
||||||
// @panics RecognitionException if the current input symbol did not Match
|
|
||||||
// {@code ttype} and the error strategy could not recover from the
|
|
||||||
// mismatched symbol
|
|
||||||
|
|
||||||
func (p *BaseParser) Match(ttype int) Token {
|
|
||||||
|
|
||||||
t := p.GetCurrentToken()
|
|
||||||
|
|
||||||
if t.GetTokenType() == ttype {
|
|
||||||
p.errHandler.ReportMatch(p)
|
|
||||||
p.Consume()
|
|
||||||
} else {
|
|
||||||
t = p.errHandler.RecoverInline(p)
|
|
||||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
|
||||||
// we must have conjured up a Newtoken during single token
|
|
||||||
// insertion
|
|
||||||
// if it's not the current symbol
|
|
||||||
p.ctx.AddErrorNode(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match current input symbol as a wildcard. If the symbol type Matches
|
|
||||||
// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
|
|
||||||
// and {@link //consume} are called to complete the Match process.
|
|
||||||
//
|
|
||||||
// <p>If the symbol type does not Match,
|
|
||||||
// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
|
|
||||||
// strategy to attempt recovery. If {@link //getBuildParseTree} is
|
|
||||||
// {@code true} and the token index of the symbol returned by
|
|
||||||
// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
|
|
||||||
// the parse tree by calling {@link ParserRuleContext//addErrorNode}.</p>
|
|
||||||
//
|
|
||||||
// @return the Matched symbol
|
|
||||||
// @panics RecognitionException if the current input symbol did not Match
|
|
||||||
// a wildcard and the error strategy could not recover from the mismatched
|
|
||||||
// symbol
|
|
||||||
|
|
||||||
func (p *BaseParser) MatchWildcard() Token {
|
|
||||||
t := p.GetCurrentToken()
|
|
||||||
if t.GetTokenType() > 0 {
|
|
||||||
p.errHandler.ReportMatch(p)
|
|
||||||
p.Consume()
|
|
||||||
} else {
|
|
||||||
t = p.errHandler.RecoverInline(p)
|
|
||||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
|
||||||
// we must have conjured up a Newtoken during single token
|
|
||||||
// insertion
|
|
||||||
// if it's not the current symbol
|
|
||||||
p.ctx.AddErrorNode(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
|
|
||||||
return p.ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
|
|
||||||
p.ctx = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetParseListeners() []ParseTreeListener {
|
|
||||||
if p.parseListeners == nil {
|
|
||||||
return make([]ParseTreeListener, 0)
|
|
||||||
}
|
|
||||||
return p.parseListeners
|
|
||||||
}
|
|
||||||
|
|
||||||
// Registers {@code listener} to receive events during the parsing process.
|
|
||||||
//
|
|
||||||
// <p>To support output-preserving grammar transformations (including but not
|
|
||||||
// limited to left-recursion removal, automated left-factoring, and
|
|
||||||
// optimized code generation), calls to listener methods during the parse
|
|
||||||
// may differ substantially from calls made by
|
|
||||||
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
|
|
||||||
// particular, rule entry and exit events may occur in a different order
|
|
||||||
// during the parse than after the parser. In addition, calls to certain
|
|
||||||
// rule entry methods may be omitted.</p>
|
|
||||||
//
|
|
||||||
// <p>With the following specific exceptions, calls to listener events are
|
|
||||||
// <em>deterministic</em>, i.e. for identical input the calls to listener
|
|
||||||
// methods will be the same.</p>
|
|
||||||
//
|
|
||||||
// <ul>
|
|
||||||
// <li>Alterations to the grammar used to generate code may change the
|
|
||||||
// behavior of the listener calls.</li>
|
|
||||||
// <li>Alterations to the command line options passed to ANTLR 4 when
|
|
||||||
// generating the parser may change the behavior of the listener calls.</li>
|
|
||||||
// <li>Changing the version of the ANTLR Tool used to generate the parser
|
|
||||||
// may change the behavior of the listener calls.</li>
|
|
||||||
// </ul>
|
|
||||||
//
|
|
||||||
// @param listener the listener to add
|
|
||||||
//
|
|
||||||
// @panics nilPointerException if {@code} listener is {@code nil}
|
|
||||||
//
|
|
||||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
|
||||||
if listener == nil {
|
|
||||||
panic("listener")
|
|
||||||
}
|
|
||||||
if p.parseListeners == nil {
|
|
||||||
p.parseListeners = make([]ParseTreeListener, 0)
|
|
||||||
}
|
|
||||||
p.parseListeners = append(p.parseListeners, listener)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Remove {@code listener} from the list of parse listeners.
|
|
||||||
//
|
|
||||||
// <p>If {@code listener} is {@code nil} or has not been added as a parse
|
|
||||||
// listener, p.method does nothing.</p>
|
|
||||||
// @param listener the listener to remove
|
|
||||||
//
|
|
||||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
|
|
||||||
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
|
|
||||||
idx := -1
|
|
||||||
for i, v := range p.parseListeners {
|
|
||||||
if v == listener {
|
|
||||||
idx = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if idx == -1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the listener from the slice
|
|
||||||
p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
|
|
||||||
|
|
||||||
if len(p.parseListeners) == 0 {
|
|
||||||
p.parseListeners = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all parse listeners.
|
|
||||||
func (p *BaseParser) removeParseListeners() {
|
|
||||||
p.parseListeners = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify any parse listeners of an enter rule event.
|
|
||||||
func (p *BaseParser) TriggerEnterRuleEvent() {
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
ctx := p.ctx
|
|
||||||
for _, listener := range p.parseListeners {
|
|
||||||
listener.EnterEveryRule(ctx)
|
|
||||||
ctx.EnterRule(listener)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Notify any parse listeners of an exit rule event.
|
|
||||||
//
|
|
||||||
// @see //addParseListener
|
|
||||||
//
|
|
||||||
func (p *BaseParser) TriggerExitRuleEvent() {
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
// reverse order walk of listeners
|
|
||||||
ctx := p.ctx
|
|
||||||
l := len(p.parseListeners) - 1
|
|
||||||
|
|
||||||
for i := range p.parseListeners {
|
|
||||||
listener := p.parseListeners[l-i]
|
|
||||||
ctx.ExitRule(listener)
|
|
||||||
listener.ExitEveryRule(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
|
|
||||||
return p.Interpreter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetATN() *ATN {
|
|
||||||
return p.Interpreter.atn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetTokenFactory() TokenFactory {
|
|
||||||
return p.input.GetTokenSource().GetTokenFactory()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tell our token source and error strategy about a Newway to create tokens.//
|
|
||||||
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
|
|
||||||
p.input.GetTokenSource().setTokenFactory(factory)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The ATN with bypass alternatives is expensive to create so we create it
|
|
||||||
// lazily.
|
|
||||||
//
|
|
||||||
// @panics UnsupportedOperationException if the current parser does not
|
|
||||||
// implement the {@link //getSerializedATN()} method.
|
|
||||||
//
|
|
||||||
func (p *BaseParser) GetATNWithBypassAlts() {
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
panic("Not implemented!")
|
|
||||||
|
|
||||||
// serializedAtn := p.getSerializedATN()
|
|
||||||
// if (serializedAtn == nil) {
|
|
||||||
// panic("The current parser does not support an ATN with bypass alternatives.")
|
|
||||||
// }
|
|
||||||
// result := p.bypassAltsAtnCache[serializedAtn]
|
|
||||||
// if (result == nil) {
|
|
||||||
// deserializationOptions := NewATNDeserializationOptions(nil)
|
|
||||||
// deserializationOptions.generateRuleBypassTransitions = true
|
|
||||||
// result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
|
|
||||||
// p.bypassAltsAtnCache[serializedAtn] = result
|
|
||||||
// }
|
|
||||||
// return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// The preferred method of getting a tree pattern. For example, here's a
|
|
||||||
// sample use:
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// ParseTree t = parser.expr()
|
|
||||||
// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
|
|
||||||
// MyParser.RULE_expr)
|
|
||||||
// ParseTreeMatch m = p.Match(t)
|
|
||||||
// String id = m.Get("ID")
|
|
||||||
// </pre>
|
|
||||||
|
|
||||||
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
|
|
||||||
|
|
||||||
panic("NewParseTreePatternMatcher not implemented!")
|
|
||||||
//
|
|
||||||
// if (lexer == nil) {
|
|
||||||
// if (p.GetTokenStream() != nil) {
|
|
||||||
// tokenSource := p.GetTokenStream().GetTokenSource()
|
|
||||||
// if _, ok := tokenSource.(ILexer); ok {
|
|
||||||
// lexer = tokenSource
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// if (lexer == nil) {
|
|
||||||
// panic("Parser can't discover a lexer to use")
|
|
||||||
// }
|
|
||||||
|
|
||||||
// m := NewParseTreePatternMatcher(lexer, p)
|
|
||||||
// return m.compile(pattern, patternRuleIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetInputStream() IntStream {
|
|
||||||
return p.GetTokenStream()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) SetInputStream(input TokenStream) {
|
|
||||||
p.SetTokenStream(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetTokenStream() TokenStream {
|
|
||||||
return p.input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the token stream and reset the parser.//
|
|
||||||
func (p *BaseParser) SetTokenStream(input TokenStream) {
|
|
||||||
p.input = nil
|
|
||||||
p.reset()
|
|
||||||
p.input = input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match needs to return the current input symbol, which gets put
|
|
||||||
// into the label for the associated token ref e.g., x=ID.
|
|
||||||
//
|
|
||||||
func (p *BaseParser) GetCurrentToken() Token {
|
|
||||||
return p.input.LT(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
|
|
||||||
if offendingToken == nil {
|
|
||||||
offendingToken = p.GetCurrentToken()
|
|
||||||
}
|
|
||||||
p._SyntaxErrors++
|
|
||||||
line := offendingToken.GetLine()
|
|
||||||
column := offendingToken.GetColumn()
|
|
||||||
listener := p.GetErrorListenerDispatch()
|
|
||||||
listener.SyntaxError(p, offendingToken, line, column, msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) Consume() Token {
|
|
||||||
o := p.GetCurrentToken()
|
|
||||||
if o.GetTokenType() != TokenEOF {
|
|
||||||
p.GetInputStream().Consume()
|
|
||||||
}
|
|
||||||
hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
|
|
||||||
if p.BuildParseTrees || hasListener {
|
|
||||||
if p.errHandler.inErrorRecoveryMode(p) {
|
|
||||||
node := p.ctx.AddErrorNode(o)
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
for _, l := range p.parseListeners {
|
|
||||||
l.VisitErrorNode(node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
node := p.ctx.AddTokenNode(o)
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
for _, l := range p.parseListeners {
|
|
||||||
l.VisitTerminal(node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// node.invokingState = p.state
|
|
||||||
}
|
|
||||||
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) addContextToParseTree() {
|
|
||||||
// add current context to parent if we have a parent
|
|
||||||
if p.ctx.GetParent() != nil {
|
|
||||||
p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
|
|
||||||
p.SetState(state)
|
|
||||||
p.ctx = localctx
|
|
||||||
p.ctx.SetStart(p.input.LT(1))
|
|
||||||
if p.BuildParseTrees {
|
|
||||||
p.addContextToParseTree()
|
|
||||||
}
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
p.TriggerEnterRuleEvent()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) ExitRule() {
|
|
||||||
p.ctx.SetStop(p.input.LT(-1))
|
|
||||||
// trigger event on ctx, before it reverts to parent
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
p.TriggerExitRuleEvent()
|
|
||||||
}
|
|
||||||
p.SetState(p.ctx.GetInvokingState())
|
|
||||||
if p.ctx.GetParent() != nil {
|
|
||||||
p.ctx = p.ctx.GetParent().(ParserRuleContext)
|
|
||||||
} else {
|
|
||||||
p.ctx = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
|
|
||||||
localctx.SetAltNumber(altNum)
|
|
||||||
// if we have Newlocalctx, make sure we replace existing ctx
|
|
||||||
// that is previous child of parse tree
|
|
||||||
if p.BuildParseTrees && p.ctx != localctx {
|
|
||||||
if p.ctx.GetParent() != nil {
|
|
||||||
p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
|
|
||||||
p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.ctx = localctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the precedence level for the top-most precedence rule.
|
|
||||||
//
|
|
||||||
// @return The precedence level for the top-most precedence rule, or -1 if
|
|
||||||
// the parser context is not nested within a precedence rule.
|
|
||||||
|
|
||||||
func (p *BaseParser) GetPrecedence() int {
|
|
||||||
if len(p.precedenceStack) == 0 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.precedenceStack[len(p.precedenceStack)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
|
|
||||||
p.SetState(state)
|
|
||||||
p.precedenceStack.Push(precedence)
|
|
||||||
p.ctx = localctx
|
|
||||||
p.ctx.SetStart(p.input.LT(1))
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
p.TriggerEnterRuleEvent() // simulates rule entry for
|
|
||||||
// left-recursive rules
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Like {@link //EnterRule} but for recursive rules.
|
|
||||||
|
|
||||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
|
|
||||||
previous := p.ctx
|
|
||||||
previous.SetParent(localctx)
|
|
||||||
previous.SetInvokingState(state)
|
|
||||||
previous.SetStop(p.input.LT(-1))
|
|
||||||
|
|
||||||
p.ctx = localctx
|
|
||||||
p.ctx.SetStart(previous.GetStart())
|
|
||||||
if p.BuildParseTrees {
|
|
||||||
p.ctx.AddChild(previous)
|
|
||||||
}
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
p.TriggerEnterRuleEvent() // simulates rule entry for
|
|
||||||
// left-recursive rules
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
|
|
||||||
p.precedenceStack.Pop()
|
|
||||||
p.ctx.SetStop(p.input.LT(-1))
|
|
||||||
retCtx := p.ctx // save current ctx (return value)
|
|
||||||
// unroll so ctx is as it was before call to recursive method
|
|
||||||
if p.parseListeners != nil {
|
|
||||||
for p.ctx != parentCtx {
|
|
||||||
p.TriggerExitRuleEvent()
|
|
||||||
p.ctx = p.ctx.GetParent().(ParserRuleContext)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.ctx = parentCtx
|
|
||||||
}
|
|
||||||
// hook into tree
|
|
||||||
retCtx.SetParent(parentCtx)
|
|
||||||
if p.BuildParseTrees && parentCtx != nil {
|
|
||||||
// add return ctx into invoking rule's tree
|
|
||||||
parentCtx.AddChild(retCtx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
|
|
||||||
ctx := p.ctx
|
|
||||||
for ctx != nil {
|
|
||||||
if ctx.GetRuleIndex() == ruleIndex {
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
ctx = ctx.GetParent().(ParserRuleContext)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
|
|
||||||
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) inContext(context ParserRuleContext) bool {
|
|
||||||
// TODO: useful in parser?
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Checks whether or not {@code symbol} can follow the current state in the
|
|
||||||
// ATN. The behavior of p.method is equivalent to the following, but is
|
|
||||||
// implemented such that the complete context-sensitive follow set does not
|
|
||||||
// need to be explicitly constructed.
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// return getExpectedTokens().contains(symbol)
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
// @param symbol the symbol type to check
|
|
||||||
// @return {@code true} if {@code symbol} can follow the current state in
|
|
||||||
// the ATN, otherwise {@code false}.
|
|
||||||
|
|
||||||
func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
|
||||||
atn := p.Interpreter.atn
|
|
||||||
ctx := p.ctx
|
|
||||||
s := atn.states[p.state]
|
|
||||||
following := atn.NextTokens(s, nil)
|
|
||||||
if following.contains(symbol) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if !following.contains(TokenEpsilon) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
|
|
||||||
invokingState := atn.states[ctx.GetInvokingState()]
|
|
||||||
rt := invokingState.GetTransitions()[0]
|
|
||||||
following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
|
|
||||||
if following.contains(symbol) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
ctx = ctx.GetParent().(ParserRuleContext)
|
|
||||||
}
|
|
||||||
if following.contains(TokenEpsilon) && symbol == TokenEOF {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computes the set of input symbols which could follow the current parser
|
|
||||||
// state and context, as given by {@link //GetState} and {@link //GetContext},
|
|
||||||
// respectively.
|
|
||||||
//
|
|
||||||
// @see ATN//getExpectedTokens(int, RuleContext)
|
|
||||||
//
|
|
||||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
|
|
||||||
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
|
|
||||||
atn := p.Interpreter.atn
|
|
||||||
s := atn.states[p.state]
|
|
||||||
return atn.NextTokens(s, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
|
|
||||||
func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
|
||||||
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
|
|
||||||
if ok {
|
|
||||||
return ruleIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return List<String> of the rule names in your parser instance
|
|
||||||
// leading up to a call to the current rule. You could override if
|
|
||||||
// you want more details such as the file/line info of where
|
|
||||||
// in the ATN a rule is invoked.
|
|
||||||
//
|
|
||||||
// this very useful for error messages.
|
|
||||||
|
|
||||||
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
|
||||||
if c == nil {
|
|
||||||
c = p.ctx
|
|
||||||
}
|
|
||||||
stack := make([]string, 0)
|
|
||||||
for c != nil {
|
|
||||||
// compute what follows who invoked us
|
|
||||||
ruleIndex := c.GetRuleIndex()
|
|
||||||
if ruleIndex < 0 {
|
|
||||||
stack = append(stack, "n/a")
|
|
||||||
} else {
|
|
||||||
stack = append(stack, p.GetRuleNames()[ruleIndex])
|
|
||||||
}
|
|
||||||
|
|
||||||
vp := c.GetParent()
|
|
||||||
|
|
||||||
if vp == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
c = vp.(ParserRuleContext)
|
|
||||||
}
|
|
||||||
return stack
|
|
||||||
}
|
|
||||||
|
|
||||||
// For debugging and other purposes.//
|
|
||||||
func (p *BaseParser) GetDFAStrings() string {
|
|
||||||
return fmt.Sprint(p.Interpreter.decisionToDFA)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For debugging and other purposes.//
|
|
||||||
func (p *BaseParser) DumpDFA() {
|
|
||||||
seenOne := false
|
|
||||||
for _, dfa := range p.Interpreter.decisionToDFA {
|
|
||||||
if dfa.numStates() > 0 {
|
|
||||||
if seenOne {
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
|
|
||||||
fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
|
|
||||||
seenOne = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *BaseParser) GetSourceName() string {
|
|
||||||
return p.GrammarFileName
|
|
||||||
}
|
|
||||||
|
|
||||||
// During a parse is sometimes useful to listen in on the rule entry and exit
|
|
||||||
// events as well as token Matches. p.is for quick and dirty debugging.
|
|
||||||
//
|
|
||||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
|
|
||||||
if trace == nil {
|
|
||||||
p.RemoveParseListener(p.tracer)
|
|
||||||
p.tracer = nil
|
|
||||||
} else {
|
|
||||||
if p.tracer != nil {
|
|
||||||
p.RemoveParseListener(p.tracer)
|
|
||||||
}
|
|
||||||
p.tracer = NewTraceListener(p)
|
|
||||||
p.AddParseListener(p.tracer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,362 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ParserRuleContext interface {
|
|
||||||
RuleContext
|
|
||||||
|
|
||||||
SetException(RecognitionException)
|
|
||||||
|
|
||||||
AddTokenNode(token Token) *TerminalNodeImpl
|
|
||||||
AddErrorNode(badToken Token) *ErrorNodeImpl
|
|
||||||
|
|
||||||
EnterRule(listener ParseTreeListener)
|
|
||||||
ExitRule(listener ParseTreeListener)
|
|
||||||
|
|
||||||
SetStart(Token)
|
|
||||||
GetStart() Token
|
|
||||||
|
|
||||||
SetStop(Token)
|
|
||||||
GetStop() Token
|
|
||||||
|
|
||||||
AddChild(child RuleContext) RuleContext
|
|
||||||
RemoveLastChild()
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseParserRuleContext struct {
|
|
||||||
*BaseRuleContext
|
|
||||||
|
|
||||||
start, stop Token
|
|
||||||
exception RecognitionException
|
|
||||||
children []Tree
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
|
|
||||||
prc := new(BaseParserRuleContext)
|
|
||||||
|
|
||||||
prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
|
|
||||||
|
|
||||||
prc.RuleIndex = -1
|
|
||||||
// * If we are debugging or building a parse tree for a Visitor,
|
|
||||||
// we need to track all of the tokens and rule invocations associated
|
|
||||||
// with prc rule's context. This is empty for parsing w/o tree constr.
|
|
||||||
// operation because we don't the need to track the details about
|
|
||||||
// how we parse prc rule.
|
|
||||||
// /
|
|
||||||
prc.children = nil
|
|
||||||
prc.start = nil
|
|
||||||
prc.stop = nil
|
|
||||||
// The exception that forced prc rule to return. If the rule successfully
|
|
||||||
// completed, prc is {@code nil}.
|
|
||||||
prc.exception = nil
|
|
||||||
|
|
||||||
return prc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
|
|
||||||
prc.exception = e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetChildren() []Tree {
|
|
||||||
return prc.children
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
|
|
||||||
// from RuleContext
|
|
||||||
prc.parentCtx = ctx.parentCtx
|
|
||||||
prc.invokingState = ctx.invokingState
|
|
||||||
prc.children = nil
|
|
||||||
prc.start = ctx.start
|
|
||||||
prc.stop = ctx.stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetText() string {
|
|
||||||
if prc.GetChildCount() == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var s string
|
|
||||||
for _, child := range prc.children {
|
|
||||||
s += child.(ParseTree).GetText()
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Double dispatch methods for listeners
|
|
||||||
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// * Does not set parent link other add methods do that///
|
|
||||||
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
|
|
||||||
if prc.children == nil {
|
|
||||||
prc.children = make([]Tree, 0)
|
|
||||||
}
|
|
||||||
if child == nil {
|
|
||||||
panic("Child may not be null")
|
|
||||||
}
|
|
||||||
prc.children = append(prc.children, child)
|
|
||||||
return child
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
|
|
||||||
if prc.children == nil {
|
|
||||||
prc.children = make([]Tree, 0)
|
|
||||||
}
|
|
||||||
if child == nil {
|
|
||||||
panic("Child may not be null")
|
|
||||||
}
|
|
||||||
prc.children = append(prc.children, child)
|
|
||||||
return child
|
|
||||||
}
|
|
||||||
|
|
||||||
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
|
|
||||||
// we entered a rule. If we have // label, we will need to remove
|
|
||||||
// generic ruleContext object.
|
|
||||||
// /
|
|
||||||
func (prc *BaseParserRuleContext) RemoveLastChild() {
|
|
||||||
if prc.children != nil && len(prc.children) > 0 {
|
|
||||||
prc.children = prc.children[0 : len(prc.children)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
|
|
||||||
|
|
||||||
node := NewTerminalNodeImpl(token)
|
|
||||||
prc.addTerminalNodeChild(node)
|
|
||||||
node.parentCtx = prc
|
|
||||||
return node
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
|
|
||||||
node := NewErrorNodeImpl(badToken)
|
|
||||||
prc.addTerminalNodeChild(node)
|
|
||||||
node.parentCtx = prc
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetChild(i int) Tree {
|
|
||||||
if prc.children != nil && len(prc.children) >= i {
|
|
||||||
return prc.children[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
|
|
||||||
if childType == nil {
|
|
||||||
return prc.GetChild(i).(RuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
for j := 0; j < len(prc.children); j++ {
|
|
||||||
child := prc.children[j]
|
|
||||||
if reflect.TypeOf(child) == childType {
|
|
||||||
if i == 0 {
|
|
||||||
return child.(RuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
|
|
||||||
return TreesStringTree(prc, ruleNames, recog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
|
|
||||||
return prc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
|
|
||||||
return visitor.VisitChildren(prc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) SetStart(t Token) {
|
|
||||||
prc.start = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetStart() Token {
|
|
||||||
return prc.start
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) SetStop(t Token) {
|
|
||||||
prc.stop = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetStop() Token {
|
|
||||||
return prc.stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
|
|
||||||
|
|
||||||
for j := 0; j < len(prc.children); j++ {
|
|
||||||
child := prc.children[j]
|
|
||||||
if c2, ok := child.(TerminalNode); ok {
|
|
||||||
if c2.GetSymbol().GetTokenType() == ttype {
|
|
||||||
if i == 0 {
|
|
||||||
return c2
|
|
||||||
}
|
|
||||||
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
|
|
||||||
if prc.children == nil {
|
|
||||||
return make([]TerminalNode, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens := make([]TerminalNode, 0)
|
|
||||||
|
|
||||||
for j := 0; j < len(prc.children); j++ {
|
|
||||||
child := prc.children[j]
|
|
||||||
if tchild, ok := child.(TerminalNode); ok {
|
|
||||||
if tchild.GetSymbol().GetTokenType() == ttype {
|
|
||||||
tokens = append(tokens, tchild)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetPayload() interface{} {
|
|
||||||
return prc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
|
|
||||||
if prc.children == nil || i < 0 || i >= len(prc.children) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
j := -1 // what element have we found with ctxType?
|
|
||||||
for _, o := range prc.children {
|
|
||||||
|
|
||||||
childType := reflect.TypeOf(o)
|
|
||||||
|
|
||||||
if childType.Implements(ctxType) {
|
|
||||||
j++
|
|
||||||
if j == i {
|
|
||||||
return o.(RuleContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
|
|
||||||
// check for convertibility
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
|
|
||||||
return prc.getChild(ctxType, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
|
|
||||||
if prc.children == nil {
|
|
||||||
return make([]RuleContext, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
contexts := make([]RuleContext, 0)
|
|
||||||
|
|
||||||
for _, child := range prc.children {
|
|
||||||
childType := reflect.TypeOf(child)
|
|
||||||
|
|
||||||
if childType.ConvertibleTo(ctxType) {
|
|
||||||
contexts = append(contexts, child.(RuleContext))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return contexts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetChildCount() int {
|
|
||||||
if prc.children == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(prc.children)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
|
|
||||||
if prc.start == nil || prc.stop == nil {
|
|
||||||
return TreeInvalidInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
|
|
||||||
}
|
|
||||||
|
|
||||||
//need to manage circular dependencies, so export now
|
|
||||||
|
|
||||||
// Print out a whole tree, not just a node, in LISP format
|
|
||||||
// (root child1 .. childN). Print just a node if b is a leaf.
|
|
||||||
//
|
|
||||||
|
|
||||||
func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
|
|
||||||
|
|
||||||
var p ParserRuleContext = prc
|
|
||||||
s := "["
|
|
||||||
for p != nil && p != stop {
|
|
||||||
if ruleNames == nil {
|
|
||||||
if !p.IsEmpty() {
|
|
||||||
s += strconv.Itoa(p.GetInvokingState())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ri := p.GetRuleIndex()
|
|
||||||
var ruleName string
|
|
||||||
if ri >= 0 && ri < len(ruleNames) {
|
|
||||||
ruleName = ruleNames[ri]
|
|
||||||
} else {
|
|
||||||
ruleName = strconv.Itoa(ri)
|
|
||||||
}
|
|
||||||
s += ruleName
|
|
||||||
}
|
|
||||||
if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
|
|
||||||
s += " "
|
|
||||||
}
|
|
||||||
pi := p.GetParent()
|
|
||||||
if pi != nil {
|
|
||||||
p = pi.(ParserRuleContext)
|
|
||||||
} else {
|
|
||||||
p = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s += "]"
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
|
||||||
|
|
||||||
type InterpreterRuleContext interface {
|
|
||||||
ParserRuleContext
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseInterpreterRuleContext struct {
|
|
||||||
*BaseParserRuleContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
|
|
||||||
|
|
||||||
prc := new(BaseInterpreterRuleContext)
|
|
||||||
|
|
||||||
prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
|
|
||||||
|
|
||||||
prc.RuleIndex = ruleIndex
|
|
||||||
|
|
||||||
return prc
|
|
||||||
}
|
|
||||||
|
|
@ -1,756 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Represents {@code $} in local context prediction, which means wildcard.
|
|
||||||
// {@code//+x =//}.
|
|
||||||
// /
|
|
||||||
const (
|
|
||||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
|
||||||
)
|
|
||||||
|
|
||||||
// Represents {@code $} in an array in full context mode, when {@code $}
|
|
||||||
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
|
|
||||||
// {@code $} = {@link //EmptyReturnState}.
|
|
||||||
// /
|
|
||||||
|
|
||||||
var (
|
|
||||||
BasePredictionContextglobalNodeCount = 1
|
|
||||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
|
||||||
)
|
|
||||||
|
|
||||||
type PredictionContext interface {
|
|
||||||
hash() int
|
|
||||||
GetParent(int) PredictionContext
|
|
||||||
getReturnState(int) int
|
|
||||||
equals(PredictionContext) bool
|
|
||||||
length() int
|
|
||||||
isEmpty() bool
|
|
||||||
hasEmptyPath() bool
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type BasePredictionContext struct {
|
|
||||||
cachedHash int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
|
|
||||||
pc := new(BasePredictionContext)
|
|
||||||
pc.cachedHash = cachedHash
|
|
||||||
|
|
||||||
return pc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BasePredictionContext) isEmpty() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateHash(parent PredictionContext, returnState int) int {
|
|
||||||
h := murmurInit(1)
|
|
||||||
h = murmurUpdate(h, parent.hash())
|
|
||||||
h = murmurUpdate(h, returnState)
|
|
||||||
return murmurFinish(h, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateEmptyHash() int {
|
|
||||||
h := murmurInit(1)
|
|
||||||
return murmurFinish(h, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
|
|
||||||
// context cash associated with contexts in DFA states. This cache
|
|
||||||
// can be used for both lexers and parsers.
|
|
||||||
|
|
||||||
type PredictionContextCache struct {
|
|
||||||
cache map[PredictionContext]PredictionContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPredictionContextCache() *PredictionContextCache {
|
|
||||||
t := new(PredictionContextCache)
|
|
||||||
t.cache = make(map[PredictionContext]PredictionContext)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a context to the cache and return it. If the context already exists,
|
|
||||||
// return that one instead and do not add a Newcontext to the cache.
|
|
||||||
// Protect shared cache from unsafe thread access.
|
|
||||||
//
|
|
||||||
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
|
|
||||||
if ctx == BasePredictionContextEMPTY {
|
|
||||||
return BasePredictionContextEMPTY
|
|
||||||
}
|
|
||||||
existing := p.cache[ctx]
|
|
||||||
if existing != nil {
|
|
||||||
return existing
|
|
||||||
}
|
|
||||||
p.cache[ctx] = ctx
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
|
|
||||||
return p.cache[ctx]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PredictionContextCache) length() int {
|
|
||||||
return len(p.cache)
|
|
||||||
}
|
|
||||||
|
|
||||||
type SingletonPredictionContext interface {
|
|
||||||
PredictionContext
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseSingletonPredictionContext struct {
|
|
||||||
*BasePredictionContext
|
|
||||||
|
|
||||||
parentCtx PredictionContext
|
|
||||||
returnState int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
|
|
||||||
|
|
||||||
s := new(BaseSingletonPredictionContext)
|
|
||||||
s.BasePredictionContext = NewBasePredictionContext(37)
|
|
||||||
|
|
||||||
if parent != nil {
|
|
||||||
s.cachedHash = calculateHash(parent, returnState)
|
|
||||||
} else {
|
|
||||||
s.cachedHash = calculateEmptyHash()
|
|
||||||
}
|
|
||||||
|
|
||||||
s.parentCtx = parent
|
|
||||||
s.returnState = returnState
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
|
|
||||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
|
||||||
// someone can pass in the bits of an array ctx that mean $
|
|
||||||
return BasePredictionContextEMPTY
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) length() int {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
|
|
||||||
return b.parentCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
|
|
||||||
return b.returnState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
|
|
||||||
return b.returnState == BasePredictionContextEmptyReturnState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
|
|
||||||
if b == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
|
|
||||||
return false
|
|
||||||
} else if b.hash() != other.hash() {
|
|
||||||
return false // can't be same if hash is different
|
|
||||||
}
|
|
||||||
|
|
||||||
otherP := other.(*BaseSingletonPredictionContext)
|
|
||||||
|
|
||||||
if b.returnState != other.getReturnState(0) {
|
|
||||||
return false
|
|
||||||
} else if b.parentCtx == nil {
|
|
||||||
return otherP.parentCtx == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.parentCtx.equals(otherP.parentCtx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) hash() int {
|
|
||||||
h := murmurInit(1)
|
|
||||||
|
|
||||||
if b.parentCtx == nil {
|
|
||||||
return murmurFinish(h, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
h = murmurUpdate(h, b.parentCtx.hash())
|
|
||||||
h = murmurUpdate(h, b.returnState)
|
|
||||||
return murmurFinish(h, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseSingletonPredictionContext) String() string {
|
|
||||||
var up string
|
|
||||||
|
|
||||||
if b.parentCtx == nil {
|
|
||||||
up = ""
|
|
||||||
} else {
|
|
||||||
up = b.parentCtx.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(up) == 0 {
|
|
||||||
if b.returnState == BasePredictionContextEmptyReturnState {
|
|
||||||
return "$"
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.Itoa(b.returnState)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.Itoa(b.returnState) + " " + up
|
|
||||||
}
|
|
||||||
|
|
||||||
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
|
|
||||||
|
|
||||||
type EmptyPredictionContext struct {
|
|
||||||
*BaseSingletonPredictionContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEmptyPredictionContext() *EmptyPredictionContext {
|
|
||||||
|
|
||||||
p := new(EmptyPredictionContext)
|
|
||||||
|
|
||||||
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *EmptyPredictionContext) isEmpty() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *EmptyPredictionContext) getReturnState(index int) int {
|
|
||||||
return e.returnState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
|
|
||||||
return e == other
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *EmptyPredictionContext) String() string {
|
|
||||||
return "$"
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArrayPredictionContext struct {
|
|
||||||
*BasePredictionContext
|
|
||||||
|
|
||||||
parents []PredictionContext
|
|
||||||
returnStates []int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
|
|
||||||
// Parent can be nil only if full ctx mode and we make an array
|
|
||||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
|
||||||
// nil parent and
|
|
||||||
// returnState == {@link //EmptyReturnState}.
|
|
||||||
|
|
||||||
c := new(ArrayPredictionContext)
|
|
||||||
c.BasePredictionContext = NewBasePredictionContext(37)
|
|
||||||
|
|
||||||
for i := range parents {
|
|
||||||
c.cachedHash += calculateHash(parents[i], returnStates[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
c.parents = parents
|
|
||||||
c.returnStates = returnStates
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) GetReturnStates() []int {
|
|
||||||
return a.returnStates
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) hasEmptyPath() bool {
|
|
||||||
return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) isEmpty() bool {
|
|
||||||
// since EmptyReturnState can only appear in the last position, we
|
|
||||||
// don't need to verify that size==1
|
|
||||||
return a.returnStates[0] == BasePredictionContextEmptyReturnState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) length() int {
|
|
||||||
return len(a.returnStates)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
|
|
||||||
return a.parents[index]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) getReturnState(index int) int {
|
|
||||||
return a.returnStates[index]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
|
|
||||||
if _, ok := other.(*ArrayPredictionContext); !ok {
|
|
||||||
return false
|
|
||||||
} else if a.cachedHash != other.hash() {
|
|
||||||
return false // can't be same if hash is different
|
|
||||||
} else {
|
|
||||||
otherP := other.(*ArrayPredictionContext)
|
|
||||||
return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) hash() int {
|
|
||||||
h := murmurInit(1)
|
|
||||||
|
|
||||||
for _, p := range a.parents {
|
|
||||||
h = murmurUpdate(h, p.hash())
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range a.returnStates {
|
|
||||||
h = murmurUpdate(h, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
return murmurFinish(h, 2 * len(a.parents))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ArrayPredictionContext) String() string {
|
|
||||||
if a.isEmpty() {
|
|
||||||
return "[]"
|
|
||||||
}
|
|
||||||
|
|
||||||
s := "["
|
|
||||||
for i := 0; i < len(a.returnStates); i++ {
|
|
||||||
if i > 0 {
|
|
||||||
s = s + ", "
|
|
||||||
}
|
|
||||||
if a.returnStates[i] == BasePredictionContextEmptyReturnState {
|
|
||||||
s = s + "$"
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s = s + strconv.Itoa(a.returnStates[i])
|
|
||||||
if a.parents[i] != nil {
|
|
||||||
s = s + " " + a.parents[i].String()
|
|
||||||
} else {
|
|
||||||
s = s + "nil"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s + "]"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
|
||||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
|
||||||
// /
|
|
||||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
|
|
||||||
if outerContext == nil {
|
|
||||||
outerContext = RuleContextEmpty
|
|
||||||
}
|
|
||||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
|
||||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
|
||||||
if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
|
|
||||||
return BasePredictionContextEMPTY
|
|
||||||
}
|
|
||||||
// If we have a parent, convert it to a BasePredictionContext graph
|
|
||||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
|
||||||
state := a.states[outerContext.GetInvokingState()]
|
|
||||||
transition := state.GetTransitions()[0]
|
|
||||||
|
|
||||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
|
||||||
}
|
|
||||||
|
|
||||||
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
|
||||||
// share same graph if both same
|
|
||||||
if a == b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
ac, ok1 := a.(*BaseSingletonPredictionContext)
|
|
||||||
bc, ok2 := b.(*BaseSingletonPredictionContext)
|
|
||||||
|
|
||||||
if ok1 && ok2 {
|
|
||||||
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
|
|
||||||
}
|
|
||||||
// At least one of a or b is array
|
|
||||||
// If one is $ and rootIsWildcard, return $ as// wildcard
|
|
||||||
if rootIsWildcard {
|
|
||||||
if _, ok := a.(*EmptyPredictionContext); ok {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
if _, ok := b.(*EmptyPredictionContext); ok {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// convert singleton so both are arrays to normalize
|
|
||||||
if _, ok := a.(*BaseSingletonPredictionContext); ok {
|
|
||||||
a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
|
|
||||||
}
|
|
||||||
if _, ok := b.(*BaseSingletonPredictionContext); ok {
|
|
||||||
b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
|
|
||||||
}
|
|
||||||
return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Merge two {@link SingletonBasePredictionContext} instances.
|
|
||||||
//
|
|
||||||
// <p>Stack tops equal, parents merge is same return left graph.<br>
|
|
||||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
|
||||||
// type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
|
||||||
// remainders of those graphs. A Newroot node is created to point to the
|
|
||||||
// merged parents.<br>
|
|
||||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
|
||||||
// type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
|
||||||
// root where both element in the root point to the same (original)
|
|
||||||
// parent.<br>
|
|
||||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
|
||||||
// type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Different stack tops pointing to different parents. Make array node for
|
|
||||||
// the root where each element points to the corresponding original
|
|
||||||
// parent.<br>
|
|
||||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
|
||||||
// type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// @param a the first {@link SingletonBasePredictionContext}
|
|
||||||
// @param b the second {@link SingletonBasePredictionContext}
|
|
||||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
|
||||||
// otherwise false to indicate a full-context merge
|
|
||||||
// @param mergeCache
|
|
||||||
// /
|
|
||||||
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
|
||||||
if mergeCache != nil {
|
|
||||||
previous := mergeCache.Get(a.hash(), b.hash())
|
|
||||||
if previous != nil {
|
|
||||||
return previous.(PredictionContext)
|
|
||||||
}
|
|
||||||
previous = mergeCache.Get(b.hash(), a.hash())
|
|
||||||
if previous != nil {
|
|
||||||
return previous.(PredictionContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
|
||||||
if rootMerge != nil {
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), rootMerge)
|
|
||||||
}
|
|
||||||
return rootMerge
|
|
||||||
}
|
|
||||||
if a.returnState == b.returnState {
|
|
||||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
|
||||||
// if parent is same as existing a or b parent or reduced to a parent,
|
|
||||||
// return it
|
|
||||||
if parent == a.parentCtx {
|
|
||||||
return a // ax + bx = ax, if a=b
|
|
||||||
}
|
|
||||||
if parent == b.parentCtx {
|
|
||||||
return b // ax + bx = bx, if a=b
|
|
||||||
}
|
|
||||||
// else: ax + ay = a'[x,y]
|
|
||||||
// merge parents x and y, giving array node with x,y then remainders
|
|
||||||
// of those graphs. dup a, a' points at merged array
|
|
||||||
// Newjoined parent so create Newsingleton pointing to it, a'
|
|
||||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), spc)
|
|
||||||
}
|
|
||||||
return spc
|
|
||||||
}
|
|
||||||
// a != b payloads differ
|
|
||||||
// see if we can collapse parents due to $+x parents if local ctx
|
|
||||||
var singleParent PredictionContext
|
|
||||||
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
|
|
||||||
// bx =
|
|
||||||
// [a,b]x
|
|
||||||
singleParent = a.parentCtx
|
|
||||||
}
|
|
||||||
if singleParent != nil { // parents are same
|
|
||||||
// sort payloads and use same parent
|
|
||||||
payloads := []int{a.returnState, b.returnState}
|
|
||||||
if a.returnState > b.returnState {
|
|
||||||
payloads[0] = b.returnState
|
|
||||||
payloads[1] = a.returnState
|
|
||||||
}
|
|
||||||
parents := []PredictionContext{singleParent, singleParent}
|
|
||||||
apc := NewArrayPredictionContext(parents, payloads)
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), apc)
|
|
||||||
}
|
|
||||||
return apc
|
|
||||||
}
|
|
||||||
// parents differ and can't merge them. Just pack together
|
|
||||||
// into array can't merge.
|
|
||||||
// ax + by = [ax,by]
|
|
||||||
payloads := []int{a.returnState, b.returnState}
|
|
||||||
parents := []PredictionContext{a.parentCtx, b.parentCtx}
|
|
||||||
if a.returnState > b.returnState { // sort by payload
|
|
||||||
payloads[0] = b.returnState
|
|
||||||
payloads[1] = a.returnState
|
|
||||||
parents = []PredictionContext{b.parentCtx, a.parentCtx}
|
|
||||||
}
|
|
||||||
apc := NewArrayPredictionContext(parents, payloads)
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), apc)
|
|
||||||
}
|
|
||||||
return apc
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Handle case where at least one of {@code a} or {@code b} is
|
|
||||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
|
||||||
// to represent {@link //EMPTY}.
|
|
||||||
//
|
|
||||||
// <h2>Local-Context Merges</h2>
|
|
||||||
//
|
|
||||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
|
||||||
// is true.</p>
|
|
||||||
//
|
|
||||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
|
||||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
|
||||||
// {@code //EMPTY} return left graph.<br>
|
|
||||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Special case of last merge if local context.<br>
|
|
||||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <h2>Full-Context Merges</h2>
|
|
||||||
//
|
|
||||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
|
||||||
// is false.</p>
|
|
||||||
//
|
|
||||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
|
||||||
// nil parent).<br>
|
|
||||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// @param a the first {@link SingletonBasePredictionContext}
|
|
||||||
// @param b the second {@link SingletonBasePredictionContext}
|
|
||||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
|
||||||
// otherwise false to indicate a full-context merge
|
|
||||||
// /
|
|
||||||
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
|
|
||||||
if rootIsWildcard {
|
|
||||||
if a == BasePredictionContextEMPTY {
|
|
||||||
return BasePredictionContextEMPTY // // + b =//
|
|
||||||
}
|
|
||||||
if b == BasePredictionContextEMPTY {
|
|
||||||
return BasePredictionContextEMPTY // a +// =//
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
|
|
||||||
return BasePredictionContextEMPTY // $ + $ = $
|
|
||||||
} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
|
|
||||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
|
||||||
parents := []PredictionContext{b.GetParent(-1), nil}
|
|
||||||
return NewArrayPredictionContext(parents, payloads)
|
|
||||||
} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
|
|
||||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
|
||||||
parents := []PredictionContext{a.GetParent(-1), nil}
|
|
||||||
return NewArrayPredictionContext(parents, payloads)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
|
||||||
//
|
|
||||||
// <p>Different tops, different parents.<br>
|
|
||||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Shared top, same parents.<br>
|
|
||||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Shared top, different parents.<br>
|
|
||||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Shared top, all shared parents.<br>
|
|
||||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
|
||||||
// type="image/svg+xml"/></p>
|
|
||||||
//
|
|
||||||
// <p>Equal tops, merge parents and reduce top to
|
|
||||||
// {@link SingletonBasePredictionContext}.<br>
|
|
||||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
|
||||||
// /
|
|
||||||
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
|
||||||
if mergeCache != nil {
|
|
||||||
previous := mergeCache.Get(a.hash(), b.hash())
|
|
||||||
if previous != nil {
|
|
||||||
return previous.(PredictionContext)
|
|
||||||
}
|
|
||||||
previous = mergeCache.Get(b.hash(), a.hash())
|
|
||||||
if previous != nil {
|
|
||||||
return previous.(PredictionContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// merge sorted payloads a + b => M
|
|
||||||
i := 0 // walks a
|
|
||||||
j := 0 // walks b
|
|
||||||
k := 0 // walks target M array
|
|
||||||
|
|
||||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
|
||||||
mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
|
|
||||||
// walk and merge to yield mergedParents, mergedReturnStates
|
|
||||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
|
||||||
aParent := a.parents[i]
|
|
||||||
bParent := b.parents[j]
|
|
||||||
if a.returnStates[i] == b.returnStates[j] {
|
|
||||||
// same payload (stack tops are equal), must yield merged singleton
|
|
||||||
payload := a.returnStates[i]
|
|
||||||
// $+$ = $
|
|
||||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
|
||||||
axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
|
|
||||||
// ->
|
|
||||||
// ax
|
|
||||||
if bothDollars || axAX {
|
|
||||||
mergedParents[k] = aParent // choose left
|
|
||||||
mergedReturnStates[k] = payload
|
|
||||||
} else { // ax+ay -> a'[x,y]
|
|
||||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
|
||||||
mergedParents[k] = mergedParent
|
|
||||||
mergedReturnStates[k] = payload
|
|
||||||
}
|
|
||||||
i++ // hop over left one as usual
|
|
||||||
j++ // but also Skip one in right side since we merge
|
|
||||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
|
||||||
mergedParents[k] = aParent
|
|
||||||
mergedReturnStates[k] = a.returnStates[i]
|
|
||||||
i++
|
|
||||||
} else { // b > a, copy b[j] to M
|
|
||||||
mergedParents[k] = bParent
|
|
||||||
mergedReturnStates[k] = b.returnStates[j]
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
// copy over any payloads remaining in either array
|
|
||||||
if i < len(a.returnStates) {
|
|
||||||
for p := i; p < len(a.returnStates); p++ {
|
|
||||||
mergedParents[k] = a.parents[p]
|
|
||||||
mergedReturnStates[k] = a.returnStates[p]
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for p := j; p < len(b.returnStates); p++ {
|
|
||||||
mergedParents[k] = b.parents[p]
|
|
||||||
mergedReturnStates[k] = b.returnStates[p]
|
|
||||||
k++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// trim merged if we combined a few that had same stack tops
|
|
||||||
if k < len(mergedParents) { // write index < last position trim
|
|
||||||
if k == 1 { // for just one merged element, return singleton top
|
|
||||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), pc)
|
|
||||||
}
|
|
||||||
return pc
|
|
||||||
}
|
|
||||||
mergedParents = mergedParents[0:k]
|
|
||||||
mergedReturnStates = mergedReturnStates[0:k]
|
|
||||||
}
|
|
||||||
|
|
||||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
|
||||||
|
|
||||||
// if we created same array as a or b, return that instead
|
|
||||||
// TODO: track whether this is possible above during merge sort for speed
|
|
||||||
if M == a {
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), a)
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
if M == b {
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), b)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
combineCommonParents(mergedParents)
|
|
||||||
|
|
||||||
if mergeCache != nil {
|
|
||||||
mergeCache.set(a.hash(), b.hash(), M)
|
|
||||||
}
|
|
||||||
return M
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
|
|
||||||
// ones.
|
|
||||||
// /
|
|
||||||
func combineCommonParents(parents []PredictionContext) {
|
|
||||||
uniqueParents := make(map[PredictionContext]PredictionContext)
|
|
||||||
|
|
||||||
for p := 0; p < len(parents); p++ {
|
|
||||||
parent := parents[p]
|
|
||||||
if uniqueParents[parent] == nil {
|
|
||||||
uniqueParents[parent] = parent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for q := 0; q < len(parents); q++ {
|
|
||||||
parents[q] = uniqueParents[parents[q]]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
|
|
||||||
|
|
||||||
if context.isEmpty() {
|
|
||||||
return context
|
|
||||||
}
|
|
||||||
existing := visited[context]
|
|
||||||
if existing != nil {
|
|
||||||
return existing
|
|
||||||
}
|
|
||||||
existing = contextCache.Get(context)
|
|
||||||
if existing != nil {
|
|
||||||
visited[context] = existing
|
|
||||||
return existing
|
|
||||||
}
|
|
||||||
changed := false
|
|
||||||
parents := make([]PredictionContext, context.length())
|
|
||||||
for i := 0; i < len(parents); i++ {
|
|
||||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
|
||||||
if changed || parent != context.GetParent(i) {
|
|
||||||
if !changed {
|
|
||||||
parents = make([]PredictionContext, context.length())
|
|
||||||
for j := 0; j < context.length(); j++ {
|
|
||||||
parents[j] = context.GetParent(j)
|
|
||||||
}
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
parents[i] = parent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !changed {
|
|
||||||
contextCache.add(context)
|
|
||||||
visited[context] = context
|
|
||||||
return context
|
|
||||||
}
|
|
||||||
var updated PredictionContext
|
|
||||||
if len(parents) == 0 {
|
|
||||||
updated = BasePredictionContextEMPTY
|
|
||||||
} else if len(parents) == 1 {
|
|
||||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
|
||||||
} else {
|
|
||||||
updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
|
|
||||||
}
|
|
||||||
contextCache.add(updated)
|
|
||||||
visited[updated] = updated
|
|
||||||
visited[context] = updated
|
|
||||||
|
|
||||||
return updated
|
|
||||||
}
|
|
||||||
|
|
@ -1,553 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
|
|
||||||
// utility methods for analyzing configuration sets for conflicts and/or
|
|
||||||
// ambiguities.
|
|
||||||
|
|
||||||
const (
|
|
||||||
//
|
|
||||||
// The SLL(*) prediction mode. This prediction mode ignores the current
|
|
||||||
// parser context when making predictions. This is the fastest prediction
|
|
||||||
// mode, and provides correct results for many grammars. This prediction
|
|
||||||
// mode is more powerful than the prediction mode provided by ANTLR 3, but
|
|
||||||
// may result in syntax errors for grammar and input combinations which are
|
|
||||||
// not SLL.
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// When using this prediction mode, the parser will either return a correct
|
|
||||||
// parse tree (i.e. the same parse tree that would be returned with the
|
|
||||||
// {@link //LL} prediction mode), or it will Report a syntax error. If a
|
|
||||||
// syntax error is encountered when using the {@link //SLL} prediction mode,
|
|
||||||
// it may be due to either an actual syntax error in the input or indicate
|
|
||||||
// that the particular combination of grammar and input requires the more
|
|
||||||
// powerful {@link //LL} prediction abilities to complete successfully.</p>
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This prediction mode does not provide any guarantees for prediction
|
|
||||||
// behavior for syntactically-incorrect inputs.</p>
|
|
||||||
//
|
|
||||||
PredictionModeSLL = 0
|
|
||||||
//
|
|
||||||
// The LL(*) prediction mode. This prediction mode allows the current parser
|
|
||||||
// context to be used for resolving SLL conflicts that occur during
|
|
||||||
// prediction. This is the fastest prediction mode that guarantees correct
|
|
||||||
// parse results for all combinations of grammars with syntactically correct
|
|
||||||
// inputs.
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// When using this prediction mode, the parser will make correct decisions
|
|
||||||
// for all syntactically-correct grammar and input combinations. However, in
|
|
||||||
// cases where the grammar is truly ambiguous this prediction mode might not
|
|
||||||
// Report a precise answer for <em>exactly which</em> alternatives are
|
|
||||||
// ambiguous.</p>
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This prediction mode does not provide any guarantees for prediction
|
|
||||||
// behavior for syntactically-incorrect inputs.</p>
|
|
||||||
//
|
|
||||||
PredictionModeLL = 1
|
|
||||||
//
|
|
||||||
// The LL(*) prediction mode with exact ambiguity detection. In addition to
|
|
||||||
// the correctness guarantees provided by the {@link //LL} prediction mode,
|
|
||||||
// this prediction mode instructs the prediction algorithm to determine the
|
|
||||||
// complete and exact set of ambiguous alternatives for every ambiguous
|
|
||||||
// decision encountered while parsing.
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This prediction mode may be used for diagnosing ambiguities during
|
|
||||||
// grammar development. Due to the performance overhead of calculating sets
|
|
||||||
// of ambiguous alternatives, this prediction mode should be avoided when
|
|
||||||
// the exact results are not necessary.</p>
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This prediction mode does not provide any guarantees for prediction
|
|
||||||
// behavior for syntactically-incorrect inputs.</p>
|
|
||||||
//
|
|
||||||
PredictionModeLLExactAmbigDetection = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
//
|
|
||||||
// Computes the SLL prediction termination condition.
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// This method computes the SLL prediction termination condition for both of
|
|
||||||
// the following cases.</p>
|
|
||||||
//
|
|
||||||
// <ul>
|
|
||||||
// <li>The usual SLL+LL fallback upon SLL conflict</li>
|
|
||||||
// <li>Pure SLL without LL fallback</li>
|
|
||||||
// </ul>
|
|
||||||
//
|
|
||||||
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
|
|
||||||
//
|
|
||||||
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
|
|
||||||
// ensured regardless of how the termination condition is computed by this
|
|
||||||
// method. Due to the substantially higher cost of LL prediction, the
|
|
||||||
// prediction should only fall back to LL when the additional lookahead
|
|
||||||
// cannot lead to a unique SLL prediction.</p>
|
|
||||||
//
|
|
||||||
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
|
|
||||||
// conflicting subsets should fall back to full LL, even if the
|
|
||||||
// configuration sets don't resolve to the same alternative (e.g.
|
|
||||||
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
|
|
||||||
// configuration, SLL could continue with the hopes that more lookahead will
|
|
||||||
// resolve via one of those non-conflicting configurations.</p>
|
|
||||||
//
|
|
||||||
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
|
|
||||||
// stops when it sees only conflicting configuration subsets. In contrast,
|
|
||||||
// full LL keeps going when there is uncertainty.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>HEURISTIC</strong></p>
|
|
||||||
//
|
|
||||||
// <p>As a heuristic, we stop prediction when we see any conflicting subset
|
|
||||||
// unless we see a state that only has one alternative associated with it.
|
|
||||||
// The single-alt-state thing lets prediction continue upon rules like
|
|
||||||
// (otherwise, it would admit defeat too soon):</p>
|
|
||||||
//
|
|
||||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }</p>
|
|
||||||
//
|
|
||||||
// <p>When the ATN simulation reaches the state before {@code ''}, it has a
|
|
||||||
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
|
|
||||||
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
|
|
||||||
// processing this node because alternative to has another way to continue,
|
|
||||||
// via {@code [6|2|[]]}.</p>
|
|
||||||
//
|
|
||||||
// <p>It also let's us continue for this rule:</p>
|
|
||||||
//
|
|
||||||
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
|
|
||||||
//
|
|
||||||
// <p>After Matching input A, we reach the stop state for rule A, state 1.
|
|
||||||
// State 8 is the state right before B. Clearly alternatives 1 and 2
|
|
||||||
// conflict and no amount of further lookahead will separate the two.
|
|
||||||
// However, alternative 3 will be able to continue and so we do not stop
|
|
||||||
// working on this state. In the previous example, we're concerned with
|
|
||||||
// states associated with the conflicting alternatives. Here alt 3 is not
|
|
||||||
// associated with the conflicting configs, but since we can continue
|
|
||||||
// looking for input reasonably, don't declare the state done.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>PURE SLL PARSING</strong></p>
|
|
||||||
//
|
|
||||||
// <p>To handle pure SLL parsing, all we have to do is make sure that we
|
|
||||||
// combine stack contexts for configurations that differ only by semantic
|
|
||||||
// predicate. From there, we can do the usual SLL termination heuristic.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
|
|
||||||
//
|
|
||||||
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
|
|
||||||
// states because they need to create the DFA cache that works in all
|
|
||||||
// semantic situations. In contrast, full LL evaluates predicates collected
|
|
||||||
// during start state computation so it can ignore predicates thereafter.
|
|
||||||
// This means that SLL termination detection can totally ignore semantic
|
|
||||||
// predicates.</p>
|
|
||||||
//
|
|
||||||
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
|
|
||||||
// semantic predicate contexts so we might see two configurations like the
|
|
||||||
// following.</p>
|
|
||||||
//
|
|
||||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
|
|
||||||
//
|
|
||||||
// <p>Before testing these configurations against others, we have to merge
|
|
||||||
// {@code x} and {@code x'} (without modifying the existing configurations).
|
|
||||||
// For example, we test {@code (x+x')==x''} when looking for conflicts in
|
|
||||||
// the following configurations.</p>
|
|
||||||
//
|
|
||||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
|
|
||||||
//
|
|
||||||
// <p>If the configuration set has predicates (as indicated by
|
|
||||||
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
|
|
||||||
// the configurations to strip out all of the predicates so that a standard
|
|
||||||
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
|
|
||||||
//
|
|
||||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
|
|
||||||
// Configs in rule stop states indicate reaching the end of the decision
|
|
||||||
// rule (local context) or end of start rule (full context). If all
|
|
||||||
// configs meet this condition, then none of the configurations is able
|
|
||||||
// to Match additional input so we terminate prediction.
|
|
||||||
//
|
|
||||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// pure SLL mode parsing
|
|
||||||
if mode == PredictionModeSLL {
|
|
||||||
// Don't bother with combining configs from different semantic
|
|
||||||
// contexts if we can fail over to full LL costs more time
|
|
||||||
// since we'll often fail over anyway.
|
|
||||||
if configs.HasSemanticContext() {
|
|
||||||
// dup configs, tossing out semantic predicates
|
|
||||||
dup := NewBaseATNConfigSet(false)
|
|
||||||
for _, c := range configs.GetItems() {
|
|
||||||
|
|
||||||
// NewBaseATNConfig({semanticContext:}, c)
|
|
||||||
c = NewBaseATNConfig2(c, SemanticContextNone)
|
|
||||||
dup.Add(c, nil)
|
|
||||||
}
|
|
||||||
configs = dup
|
|
||||||
}
|
|
||||||
// now we have combined contexts for configs with dissimilar preds
|
|
||||||
}
|
|
||||||
// pure SLL or combined SLL+LL mode parsing
|
|
||||||
altsets := PredictionModegetConflictingAltSubsets(configs)
|
|
||||||
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if any configuration in {@code configs} is in a
|
|
||||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
|
||||||
// the end of the decision rule (local context) or end of start rule (full
|
|
||||||
// context).
|
|
||||||
//
|
|
||||||
// @param configs the configuration set to test
|
|
||||||
// @return {@code true} if any configuration in {@code configs} is in a
|
|
||||||
// {@link RuleStopState}, otherwise {@code false}
|
|
||||||
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
|
|
||||||
for _, c := range configs.GetItems() {
|
|
||||||
if _, ok := c.GetState().(*RuleStopState); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if all configurations in {@code configs} are in a
|
|
||||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
|
||||||
// the end of the decision rule (local context) or end of start rule (full
|
|
||||||
// context).
|
|
||||||
//
|
|
||||||
// @param configs the configuration set to test
|
|
||||||
// @return {@code true} if all configurations in {@code configs} are in a
|
|
||||||
// {@link RuleStopState}, otherwise {@code false}
|
|
||||||
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
|
||||||
|
|
||||||
for _, c := range configs.GetItems() {
|
|
||||||
if _, ok := c.GetState().(*RuleStopState); !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Full LL prediction termination.
|
|
||||||
//
|
|
||||||
// <p>Can we stop looking ahead during ATN simulation or is there some
|
|
||||||
// uncertainty as to which alternative we will ultimately pick, after
|
|
||||||
// consuming more input? Even if there are partial conflicts, we might know
|
|
||||||
// that everything is going to resolve to the same minimum alternative. That
|
|
||||||
// means we can stop since no more lookahead will change that fact. On the
|
|
||||||
// other hand, there might be multiple conflicts that resolve to different
|
|
||||||
// minimums. That means we need more look ahead to decide which of those
|
|
||||||
// alternatives we should predict.</p>
|
|
||||||
//
|
|
||||||
// <p>The basic idea is to split the set of configurations {@code C}, into
|
|
||||||
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
|
|
||||||
// non-conflicting configurations. Two configurations conflict if they have
|
|
||||||
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
|
|
||||||
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
|
|
||||||
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
|
|
||||||
//
|
|
||||||
// <p>Reduce these configuration subsets to the set of possible alternatives.
|
|
||||||
// You can compute the alternative subsets in one pass as follows:</p>
|
|
||||||
//
|
|
||||||
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
|
|
||||||
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
|
|
||||||
//
|
|
||||||
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
|
||||||
// alt and not pred
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
|
|
||||||
//
|
|
||||||
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
|
|
||||||
// {@code s} and {@code ctx}.</p>
|
|
||||||
//
|
|
||||||
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
|
|
||||||
// the union of these alternative subsets is a singleton, then no amount of
|
|
||||||
// more lookahead will help us. We will always pick that alternative. If,
|
|
||||||
// however, there is more than one alternative, then we are uncertain which
|
|
||||||
// alternative to predict and must continue looking for resolution. We may
|
|
||||||
// or may not discover an ambiguity in the future, even if there are no
|
|
||||||
// conflicting subsets this round.</p>
|
|
||||||
//
|
|
||||||
// <p>The biggest sin is to terminate early because it means we've made a
|
|
||||||
// decision but were uncertain as to the eventual outcome. We haven't used
|
|
||||||
// enough lookahead. On the other hand, announcing a conflict too late is no
|
|
||||||
// big deal you will still have the conflict. It's just inefficient. It
|
|
||||||
// might even look until the end of file.</p>
|
|
||||||
//
|
|
||||||
// <p>No special consideration for semantic predicates is required because
|
|
||||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
|
|
||||||
// no configuration contains a semantic context during the termination
|
|
||||||
// check.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>CONFLICTING CONFIGS</strong></p>
|
|
||||||
//
|
|
||||||
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
|
|
||||||
// when {@code i!=j} but {@code x=x'}. Because we merge all
|
|
||||||
// {@code (s, i, _)} configurations together, that means that there are at
|
|
||||||
// most {@code n} configurations associated with state {@code s} for
|
|
||||||
// {@code n} possible alternatives in the decision. The merged stacks
|
|
||||||
// complicate the comparison of configuration contexts {@code x} and
|
|
||||||
// {@code x'}. Sam checks to see if one is a subset of the other by calling
|
|
||||||
// merge and checking to see if the merged result is either {@code x} or
|
|
||||||
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
|
|
||||||
// is the superset, then {@code i} is the only possible prediction since the
|
|
||||||
// others resolve to {@code min(i)} as well. However, if {@code x} is
|
|
||||||
// associated with {@code j>i} then at least one stack configuration for
|
|
||||||
// {@code j} is not in conflict with alternative {@code i}. The algorithm
|
|
||||||
// should keep going, looking for more lookahead due to the uncertainty.</p>
|
|
||||||
//
|
|
||||||
// <p>For simplicity, I'm doing a equality check between {@code x} and
|
|
||||||
// {@code x'} that lets the algorithm continue to consume lookahead longer
|
|
||||||
// than necessary. The reason I like the equality is of course the
|
|
||||||
// simplicity but also because that is the test you need to detect the
|
|
||||||
// alternatives that are actually in conflict.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>CONTINUE/STOP RULE</strong></p>
|
|
||||||
//
|
|
||||||
// <p>Continue if union of resolved alternative sets from non-conflicting and
|
|
||||||
// conflicting alternative subsets has more than one alternative. We are
|
|
||||||
// uncertain about which alternative to predict.</p>
|
|
||||||
//
|
|
||||||
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
|
|
||||||
// alternatives are still in the running for the amount of input we've
|
|
||||||
// consumed at this point. The conflicting sets let us to strip away
|
|
||||||
// configurations that won't lead to more states because we resolve
|
|
||||||
// conflicts to the configuration with a minimum alternate for the
|
|
||||||
// conflicting set.</p>
|
|
||||||
//
|
|
||||||
// <p><strong>CASES</strong></p>
|
|
||||||
//
|
|
||||||
// <ul>
|
|
||||||
//
|
|
||||||
// <li>no conflicts and more than 1 alternative in set => continue</li>
|
|
||||||
//
|
|
||||||
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
|
|
||||||
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
|
|
||||||
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
|
||||||
// {@code {1,3}} => continue
|
|
||||||
// </li>
|
|
||||||
//
|
|
||||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
|
||||||
// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
|
|
||||||
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
|
||||||
// {@code {1}} => stop and predict 1</li>
|
|
||||||
//
|
|
||||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
|
||||||
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
|
|
||||||
// {@code {1}} = {@code {1}} => stop and predict 1, can announce
|
|
||||||
// ambiguity {@code {1,2}}</li>
|
|
||||||
//
|
|
||||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
|
|
||||||
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
|
|
||||||
// {@code {2}} = {@code {1,2}} => continue</li>
|
|
||||||
//
|
|
||||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
|
|
||||||
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
|
|
||||||
// {@code {3}} = {@code {1,3}} => continue</li>
|
|
||||||
//
|
|
||||||
// </ul>
|
|
||||||
//
|
|
||||||
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
|
|
||||||
//
|
|
||||||
// <p>If all states Report the same conflicting set of alternatives, then we
|
|
||||||
// know we have the exact ambiguity set.</p>
|
|
||||||
//
|
|
||||||
// <p><code>|A_<em>i</em>|>1</code> and
|
|
||||||
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
|
|
||||||
//
|
|
||||||
// <p>In other words, we continue examining lookahead until all {@code A_i}
|
|
||||||
// have more than one alternative and all {@code A_i} are the same. If
|
|
||||||
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
|
|
||||||
// because the resolved set is {@code {1}}. To determine what the real
|
|
||||||
// ambiguity is, we have to know whether the ambiguity is between one and
|
|
||||||
// two or one and three so we keep going. We can only stop prediction when
|
|
||||||
// we need exact ambiguity detection when the sets look like
|
|
||||||
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
|
|
||||||
//
|
|
||||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
|
||||||
return PredictionModegetSingleViableAlt(altsets)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Determines if every alternative subset in {@code altsets} contains more
|
|
||||||
// than one alternative.
|
|
||||||
//
|
|
||||||
// @param altsets a collection of alternative subsets
|
|
||||||
// @return {@code true} if every {@link BitSet} in {@code altsets} has
|
|
||||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
|
||||||
//
|
|
||||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
|
||||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Determines if any single alternative subset in {@code altsets} contains
|
|
||||||
// exactly one alternative.
|
|
||||||
//
|
|
||||||
// @param altsets a collection of alternative subsets
|
|
||||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
|
||||||
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
|
|
||||||
//
|
|
||||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
|
||||||
for i := 0; i < len(altsets); i++ {
|
|
||||||
alts := altsets[i]
|
|
||||||
if alts.length() == 1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Determines if any single alternative subset in {@code altsets} contains
|
|
||||||
// more than one alternative.
|
|
||||||
//
|
|
||||||
// @param altsets a collection of alternative subsets
|
|
||||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
|
||||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
|
||||||
//
|
|
||||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
|
||||||
for i := 0; i < len(altsets); i++ {
|
|
||||||
alts := altsets[i]
|
|
||||||
if alts.length() > 1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Determines if every alternative subset in {@code altsets} is equivalent.
|
|
||||||
//
|
|
||||||
// @param altsets a collection of alternative subsets
|
|
||||||
// @return {@code true} if every member of {@code altsets} is equal to the
|
|
||||||
// others, otherwise {@code false}
|
|
||||||
//
|
|
||||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
|
||||||
var first *BitSet
|
|
||||||
|
|
||||||
for i := 0; i < len(altsets); i++ {
|
|
||||||
alts := altsets[i]
|
|
||||||
if first == nil {
|
|
||||||
first = alts
|
|
||||||
} else if alts != first {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Returns the unique alternative predicted by all alternative subsets in
|
|
||||||
// {@code altsets}. If no such alternative exists, this method returns
|
|
||||||
// {@link ATN//INVALID_ALT_NUMBER}.
|
|
||||||
//
|
|
||||||
// @param altsets a collection of alternative subsets
|
|
||||||
//
|
|
||||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
|
||||||
all := PredictionModeGetAlts(altsets)
|
|
||||||
if all.length() == 1 {
|
|
||||||
return all.minValue()
|
|
||||||
}
|
|
||||||
|
|
||||||
return ATNInvalidAltNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the complete set of represented alternatives for a collection of
|
|
||||||
// alternative subsets. This method returns the union of each {@link BitSet}
|
|
||||||
// in {@code altsets}.
|
|
||||||
//
|
|
||||||
// @param altsets a collection of alternative subsets
|
|
||||||
// @return the set of represented alternatives in {@code altsets}
|
|
||||||
//
|
|
||||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
|
||||||
all := NewBitSet()
|
|
||||||
for _, alts := range altsets {
|
|
||||||
all.or(alts)
|
|
||||||
}
|
|
||||||
return all
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// This func gets the conflicting alt subsets from a configuration set.
|
|
||||||
// For each configuration {@code c} in {@code configs}:
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
|
||||||
// alt and not pred
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
|
|
||||||
configToAlts := make(map[int]*BitSet)
|
|
||||||
|
|
||||||
for _, c := range configs.GetItems() {
|
|
||||||
key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
|
|
||||||
|
|
||||||
alts, ok := configToAlts[key]
|
|
||||||
if !ok {
|
|
||||||
alts = NewBitSet()
|
|
||||||
configToAlts[key] = alts
|
|
||||||
}
|
|
||||||
alts.add(c.GetAlt())
|
|
||||||
}
|
|
||||||
|
|
||||||
values := make([]*BitSet, 0, 10)
|
|
||||||
for _, v := range configToAlts {
|
|
||||||
values = append(values, v)
|
|
||||||
}
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Get a map from state to alt subset from a configuration set. For each
|
|
||||||
// configuration {@code c} in {@code configs}:
|
|
||||||
//
|
|
||||||
// <pre>
|
|
||||||
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
|
|
||||||
// </pre>
|
|
||||||
//
|
|
||||||
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
|
|
||||||
m := NewAltDict()
|
|
||||||
|
|
||||||
for _, c := range configs.GetItems() {
|
|
||||||
alts := m.Get(c.GetState().String())
|
|
||||||
if alts == nil {
|
|
||||||
alts = NewBitSet()
|
|
||||||
m.put(c.GetState().String(), alts)
|
|
||||||
}
|
|
||||||
alts.(*BitSet).add(c.GetAlt())
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
|
|
||||||
values := PredictionModeGetStateToAltMap(configs).values()
|
|
||||||
for i := 0; i < len(values); i++ {
|
|
||||||
if values[i].(*BitSet).length() == 1 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
|
|
||||||
result := ATNInvalidAltNumber
|
|
||||||
|
|
||||||
for i := 0; i < len(altsets); i++ {
|
|
||||||
alts := altsets[i]
|
|
||||||
minAlt := alts.minValue()
|
|
||||||
if result == ATNInvalidAltNumber {
|
|
||||||
result = minAlt
|
|
||||||
} else if result != minAlt { // more than 1 viable alt
|
|
||||||
return ATNInvalidAltNumber
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
@ -1,217 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Recognizer interface {
|
|
||||||
GetLiteralNames() []string
|
|
||||||
GetSymbolicNames() []string
|
|
||||||
GetRuleNames() []string
|
|
||||||
|
|
||||||
Sempred(RuleContext, int, int) bool
|
|
||||||
Precpred(RuleContext, int) bool
|
|
||||||
|
|
||||||
GetState() int
|
|
||||||
SetState(int)
|
|
||||||
Action(RuleContext, int, int)
|
|
||||||
AddErrorListener(ErrorListener)
|
|
||||||
RemoveErrorListeners()
|
|
||||||
GetATN() *ATN
|
|
||||||
GetErrorListenerDispatch() ErrorListener
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseRecognizer struct {
|
|
||||||
listeners []ErrorListener
|
|
||||||
state int
|
|
||||||
|
|
||||||
RuleNames []string
|
|
||||||
LiteralNames []string
|
|
||||||
SymbolicNames []string
|
|
||||||
GrammarFileName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseRecognizer() *BaseRecognizer {
|
|
||||||
rec := new(BaseRecognizer)
|
|
||||||
rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
|
|
||||||
rec.state = -1
|
|
||||||
return rec
|
|
||||||
}
|
|
||||||
|
|
||||||
var tokenTypeMapCache = make(map[string]int)
|
|
||||||
var ruleIndexMapCache = make(map[string]int)
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
|
||||||
runtimeVersion := "4.9.1"
|
|
||||||
if runtimeVersion != toolVersion {
|
|
||||||
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
|
|
||||||
panic("action not implemented on Recognizer!")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
|
|
||||||
b.listeners = append(b.listeners, listener)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) RemoveErrorListeners() {
|
|
||||||
b.listeners = make([]ErrorListener, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetRuleNames() []string {
|
|
||||||
return b.RuleNames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetTokenNames() []string {
|
|
||||||
return b.LiteralNames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetSymbolicNames() []string {
|
|
||||||
return b.SymbolicNames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetLiteralNames() []string {
|
|
||||||
return b.LiteralNames
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetState() int {
|
|
||||||
return b.state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) SetState(v int) {
|
|
||||||
b.state = v
|
|
||||||
}
|
|
||||||
|
|
||||||
//func (b *Recognizer) GetTokenTypeMap() {
|
|
||||||
// var tokenNames = b.GetTokenNames()
|
|
||||||
// if (tokenNames==nil) {
|
|
||||||
// panic("The current recognizer does not provide a list of token names.")
|
|
||||||
// }
|
|
||||||
// var result = tokenTypeMapCache[tokenNames]
|
|
||||||
// if(result==nil) {
|
|
||||||
// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
|
|
||||||
// result.EOF = TokenEOF
|
|
||||||
// tokenTypeMapCache[tokenNames] = result
|
|
||||||
// }
|
|
||||||
// return result
|
|
||||||
//}
|
|
||||||
|
|
||||||
// Get a map from rule names to rule indexes.
|
|
||||||
//
|
|
||||||
// <p>Used for XPath and tree pattern compilation.</p>
|
|
||||||
//
|
|
||||||
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
|
|
||||||
|
|
||||||
panic("Method not defined!")
|
|
||||||
// var ruleNames = b.GetRuleNames()
|
|
||||||
// if (ruleNames==nil) {
|
|
||||||
// panic("The current recognizer does not provide a list of rule names.")
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// var result = ruleIndexMapCache[ruleNames]
|
|
||||||
// if(result==nil) {
|
|
||||||
// result = ruleNames.reduce(function(o, k, i) { o[k] = i })
|
|
||||||
// ruleIndexMapCache[ruleNames] = result
|
|
||||||
// }
|
|
||||||
// return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetTokenType(tokenName string) int {
|
|
||||||
panic("Method not defined!")
|
|
||||||
// var ttype = b.GetTokenTypeMap()[tokenName]
|
|
||||||
// if (ttype !=nil) {
|
|
||||||
// return ttype
|
|
||||||
// } else {
|
|
||||||
// return TokenInvalidType
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
|
|
||||||
//func (b *Recognizer) GetTokenTypeMap() map[string]int {
|
|
||||||
// Vocabulary vocabulary = getVocabulary()
|
|
||||||
//
|
|
||||||
// Synchronized (tokenTypeMapCache) {
|
|
||||||
// Map<String, Integer> result = tokenTypeMapCache.Get(vocabulary)
|
|
||||||
// if (result == null) {
|
|
||||||
// result = new HashMap<String, Integer>()
|
|
||||||
// for (int i = 0; i < GetATN().maxTokenType; i++) {
|
|
||||||
// String literalName = vocabulary.getLiteralName(i)
|
|
||||||
// if (literalName != null) {
|
|
||||||
// result.put(literalName, i)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// String symbolicName = vocabulary.GetSymbolicName(i)
|
|
||||||
// if (symbolicName != null) {
|
|
||||||
// result.put(symbolicName, i)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// result.put("EOF", Token.EOF)
|
|
||||||
// result = Collections.unmodifiableMap(result)
|
|
||||||
// tokenTypeMapCache.put(vocabulary, result)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return result
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
|
|
||||||
// What is the error header, normally line/character position information?//
|
|
||||||
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
|
|
||||||
line := e.GetOffendingToken().GetLine()
|
|
||||||
column := e.GetOffendingToken().GetColumn()
|
|
||||||
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
|
|
||||||
}
|
|
||||||
|
|
||||||
// How should a token be displayed in an error message? The default
|
|
||||||
// is to display just the text, but during development you might
|
|
||||||
// want to have a lot of information spit out. Override in that case
|
|
||||||
// to use t.String() (which, for CommonToken, dumps everything about
|
|
||||||
// the token). This is better than forcing you to override a method in
|
|
||||||
// your token objects because you don't have to go modify your lexer
|
|
||||||
// so that it creates a NewJava type.
|
|
||||||
//
|
|
||||||
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
|
|
||||||
// implementations of {@link ANTLRErrorStrategy} may provide a similar
|
|
||||||
// feature when necessary. For example, see
|
|
||||||
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
|
|
||||||
//
|
|
||||||
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
|
|
||||||
if t == nil {
|
|
||||||
return "<no token>"
|
|
||||||
}
|
|
||||||
s := t.GetText()
|
|
||||||
if s == "" {
|
|
||||||
if t.GetTokenType() == TokenEOF {
|
|
||||||
s = "<EOF>"
|
|
||||||
} else {
|
|
||||||
s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s = strings.Replace(s, "\t", "\\t", -1)
|
|
||||||
s = strings.Replace(s, "\n", "\\n", -1)
|
|
||||||
s = strings.Replace(s, "\r", "\\r", -1)
|
|
||||||
|
|
||||||
return "'" + s + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
|
|
||||||
return NewProxyErrorListener(b.listeners)
|
|
||||||
}
|
|
||||||
|
|
||||||
// subclass needs to override these if there are sempreds or actions
|
|
||||||
// that the ATN interp needs to execute
|
|
||||||
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
@ -1,114 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// A rule context is a record of a single rule invocation. It knows
|
|
||||||
// which context invoked it, if any. If there is no parent context, then
|
|
||||||
// naturally the invoking state is not valid. The parent link
|
|
||||||
// provides a chain upwards from the current rule invocation to the root
|
|
||||||
// of the invocation tree, forming a stack. We actually carry no
|
|
||||||
// information about the rule associated with b context (except
|
|
||||||
// when parsing). We keep only the state number of the invoking state from
|
|
||||||
// the ATN submachine that invoked b. Contrast b with the s
|
|
||||||
// pointer inside ParserRuleContext that tracks the current state
|
|
||||||
// being "executed" for the current rule.
|
|
||||||
//
|
|
||||||
// The parent contexts are useful for computing lookahead sets and
|
|
||||||
// getting error information.
|
|
||||||
//
|
|
||||||
// These objects are used during parsing and prediction.
|
|
||||||
// For the special case of parsers, we use the subclass
|
|
||||||
// ParserRuleContext.
|
|
||||||
//
|
|
||||||
// @see ParserRuleContext
|
|
||||||
//
|
|
||||||
|
|
||||||
type RuleContext interface {
|
|
||||||
RuleNode
|
|
||||||
|
|
||||||
GetInvokingState() int
|
|
||||||
SetInvokingState(int)
|
|
||||||
|
|
||||||
GetRuleIndex() int
|
|
||||||
IsEmpty() bool
|
|
||||||
|
|
||||||
GetAltNumber() int
|
|
||||||
SetAltNumber(altNumber int)
|
|
||||||
|
|
||||||
String([]string, RuleContext) string
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseRuleContext struct {
|
|
||||||
parentCtx RuleContext
|
|
||||||
invokingState int
|
|
||||||
RuleIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
|
|
||||||
|
|
||||||
rn := new(BaseRuleContext)
|
|
||||||
|
|
||||||
// What context invoked b rule?
|
|
||||||
rn.parentCtx = parent
|
|
||||||
|
|
||||||
// What state invoked the rule associated with b context?
|
|
||||||
// The "return address" is the followState of invokingState
|
|
||||||
// If parent is nil, b should be -1.
|
|
||||||
if parent == nil {
|
|
||||||
rn.invokingState = -1
|
|
||||||
} else {
|
|
||||||
rn.invokingState = invokingState
|
|
||||||
}
|
|
||||||
|
|
||||||
return rn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) SetParent(v Tree) {
|
|
||||||
if v == nil {
|
|
||||||
b.parentCtx = nil
|
|
||||||
} else {
|
|
||||||
b.parentCtx = v.(RuleContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) GetInvokingState() int {
|
|
||||||
return b.invokingState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) SetInvokingState(t int) {
|
|
||||||
b.invokingState = t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) GetRuleIndex() int {
|
|
||||||
return b.RuleIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) GetAltNumber() int {
|
|
||||||
return ATNInvalidAltNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
|
|
||||||
|
|
||||||
// A context is empty if there is no invoking state meaning nobody call
|
|
||||||
// current context.
|
|
||||||
func (b *BaseRuleContext) IsEmpty() bool {
|
|
||||||
return b.invokingState == -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the combined text of all child nodes. This method only considers
|
|
||||||
// tokens which have been added to the parse tree.
|
|
||||||
// <p>
|
|
||||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
|
||||||
// added to the parse trees, they will not appear in the output of b
|
|
||||||
// method.
|
|
||||||
//
|
|
||||||
|
|
||||||
func (b *BaseRuleContext) GetParent() Tree {
|
|
||||||
return b.parentCtx
|
|
||||||
}
|
|
||||||
|
|
@ -1,455 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A tree structure used to record the semantic context in which
|
|
||||||
// an ATN configuration is valid. It's either a single predicate,
|
|
||||||
// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
|
|
||||||
//
|
|
||||||
// <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
|
|
||||||
// {@link SemanticContext} within the scope of this outer class.</p>
|
|
||||||
//
|
|
||||||
|
|
||||||
type SemanticContext interface {
|
|
||||||
comparable
|
|
||||||
|
|
||||||
evaluate(parser Recognizer, outerContext RuleContext) bool
|
|
||||||
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
|
|
||||||
|
|
||||||
hash() int
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func SemanticContextandContext(a, b SemanticContext) SemanticContext {
|
|
||||||
if a == nil || a == SemanticContextNone {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
if b == nil || b == SemanticContextNone {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
result := NewAND(a, b)
|
|
||||||
if len(result.opnds) == 1 {
|
|
||||||
return result.opnds[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func SemanticContextorContext(a, b SemanticContext) SemanticContext {
|
|
||||||
if a == nil {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
if b == nil {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
if a == SemanticContextNone || b == SemanticContextNone {
|
|
||||||
return SemanticContextNone
|
|
||||||
}
|
|
||||||
result := NewOR(a, b)
|
|
||||||
if len(result.opnds) == 1 {
|
|
||||||
return result.opnds[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
type Predicate struct {
|
|
||||||
ruleIndex int
|
|
||||||
predIndex int
|
|
||||||
isCtxDependent bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
|
|
||||||
p := new(Predicate)
|
|
||||||
|
|
||||||
p.ruleIndex = ruleIndex
|
|
||||||
p.predIndex = predIndex
|
|
||||||
p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
//The default {@link SemanticContext}, which is semantically equivalent to
|
|
||||||
//a predicate of the form {@code {true}?}.
|
|
||||||
|
|
||||||
var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
|
|
||||||
|
|
||||||
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
|
||||||
|
|
||||||
var localctx RuleContext
|
|
||||||
|
|
||||||
if p.isCtxDependent {
|
|
||||||
localctx = outerContext
|
|
||||||
}
|
|
||||||
|
|
||||||
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Predicate) equals(other interface{}) bool {
|
|
||||||
if p == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*Predicate); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return p.ruleIndex == other.(*Predicate).ruleIndex &&
|
|
||||||
p.predIndex == other.(*Predicate).predIndex &&
|
|
||||||
p.isCtxDependent == other.(*Predicate).isCtxDependent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Predicate) hash() int {
|
|
||||||
return p.ruleIndex*43 + p.predIndex*47
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Predicate) String() string {
|
|
||||||
return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrecedencePredicate struct {
|
|
||||||
precedence int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
|
|
||||||
|
|
||||||
p := new(PrecedencePredicate)
|
|
||||||
p.precedence = precedence
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
|
||||||
return parser.Precpred(outerContext, p.precedence)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
|
||||||
if parser.Precpred(outerContext, p.precedence) {
|
|
||||||
return SemanticContextNone
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
|
|
||||||
return p.precedence - other.precedence
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrecedencePredicate) equals(other interface{}) bool {
|
|
||||||
if p == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*PrecedencePredicate); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
return p.precedence == other.(*PrecedencePredicate).precedence
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrecedencePredicate) hash() int {
|
|
||||||
return p.precedence * 51
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PrecedencePredicate) String() string {
|
|
||||||
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
|
|
||||||
}
|
|
||||||
|
|
||||||
func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate {
|
|
||||||
result := make([]*PrecedencePredicate, 0)
|
|
||||||
|
|
||||||
for _, v := range set.values() {
|
|
||||||
if c2, ok := v.(*PrecedencePredicate); ok {
|
|
||||||
result = append(result, c2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// A semantic context which is true whenever none of the contained contexts
|
|
||||||
// is false.`
|
|
||||||
|
|
||||||
type AND struct {
|
|
||||||
opnds []SemanticContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAND(a, b SemanticContext) *AND {
|
|
||||||
|
|
||||||
operands := NewSet(nil, nil)
|
|
||||||
if aa, ok := a.(*AND); ok {
|
|
||||||
for _, o := range aa.opnds {
|
|
||||||
operands.add(o)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
operands.add(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ba, ok := b.(*AND); ok {
|
|
||||||
for _, o := range ba.opnds {
|
|
||||||
operands.add(o)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
operands.add(b)
|
|
||||||
}
|
|
||||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
|
||||||
if len(precedencePredicates) > 0 {
|
|
||||||
// interested in the transition with the lowest precedence
|
|
||||||
var reduced *PrecedencePredicate
|
|
||||||
|
|
||||||
for _, p := range precedencePredicates {
|
|
||||||
if reduced == nil || p.precedence < reduced.precedence {
|
|
||||||
reduced = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
operands.add(reduced)
|
|
||||||
}
|
|
||||||
|
|
||||||
vs := operands.values()
|
|
||||||
opnds := make([]SemanticContext, len(vs))
|
|
||||||
for i, v := range vs {
|
|
||||||
opnds[i] = v.(SemanticContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
and := new(AND)
|
|
||||||
and.opnds = opnds
|
|
||||||
|
|
||||||
return and
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AND) equals(other interface{}) bool {
|
|
||||||
if a == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*AND); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
for i, v := range other.(*AND).opnds {
|
|
||||||
if !a.opnds[i].equals(v) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// {@inheritDoc}
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// The evaluation of predicates by a context is short-circuiting, but
|
|
||||||
// unordered.</p>
|
|
||||||
//
|
|
||||||
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
|
||||||
for i := 0; i < len(a.opnds); i++ {
|
|
||||||
if !a.opnds[i].evaluate(parser, outerContext) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
|
||||||
differs := false
|
|
||||||
operands := make([]SemanticContext, 0)
|
|
||||||
|
|
||||||
for i := 0; i < len(a.opnds); i++ {
|
|
||||||
context := a.opnds[i]
|
|
||||||
evaluated := context.evalPrecedence(parser, outerContext)
|
|
||||||
differs = differs || (evaluated != context)
|
|
||||||
if evaluated == nil {
|
|
||||||
// The AND context is false if any element is false
|
|
||||||
return nil
|
|
||||||
} else if evaluated != SemanticContextNone {
|
|
||||||
// Reduce the result by Skipping true elements
|
|
||||||
operands = append(operands, evaluated)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !differs {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(operands) == 0 {
|
|
||||||
// all elements were true, so the AND context is true
|
|
||||||
return SemanticContextNone
|
|
||||||
}
|
|
||||||
|
|
||||||
var result SemanticContext
|
|
||||||
|
|
||||||
for _, o := range operands {
|
|
||||||
if result == nil {
|
|
||||||
result = o
|
|
||||||
} else {
|
|
||||||
result = SemanticContextandContext(result, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AND) hash() int {
|
|
||||||
h := murmurInit(37) // Init with a value different from OR
|
|
||||||
for _, op := range a.opnds {
|
|
||||||
h = murmurUpdate(h, op.hash())
|
|
||||||
}
|
|
||||||
return murmurFinish(h, len(a.opnds))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *OR) hash() int {
|
|
||||||
h := murmurInit(41) // Init with a value different from AND
|
|
||||||
for _, op := range a.opnds {
|
|
||||||
h = murmurUpdate(h, op.hash())
|
|
||||||
}
|
|
||||||
return murmurFinish(h, len(a.opnds))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AND) String() string {
|
|
||||||
s := ""
|
|
||||||
|
|
||||||
for _, o := range a.opnds {
|
|
||||||
s += "&& " + fmt.Sprint(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(s) > 3 {
|
|
||||||
return s[0:3]
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// A semantic context which is true whenever at least one of the contained
|
|
||||||
// contexts is true.
|
|
||||||
//
|
|
||||||
|
|
||||||
type OR struct {
|
|
||||||
opnds []SemanticContext
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOR(a, b SemanticContext) *OR {
|
|
||||||
|
|
||||||
operands := NewSet(nil, nil)
|
|
||||||
if aa, ok := a.(*OR); ok {
|
|
||||||
for _, o := range aa.opnds {
|
|
||||||
operands.add(o)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
operands.add(a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ba, ok := b.(*OR); ok {
|
|
||||||
for _, o := range ba.opnds {
|
|
||||||
operands.add(o)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
operands.add(b)
|
|
||||||
}
|
|
||||||
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
|
|
||||||
if len(precedencePredicates) > 0 {
|
|
||||||
// interested in the transition with the lowest precedence
|
|
||||||
var reduced *PrecedencePredicate
|
|
||||||
|
|
||||||
for _, p := range precedencePredicates {
|
|
||||||
if reduced == nil || p.precedence > reduced.precedence {
|
|
||||||
reduced = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
operands.add(reduced)
|
|
||||||
}
|
|
||||||
|
|
||||||
vs := operands.values()
|
|
||||||
|
|
||||||
opnds := make([]SemanticContext, len(vs))
|
|
||||||
for i, v := range vs {
|
|
||||||
opnds[i] = v.(SemanticContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := new(OR)
|
|
||||||
o.opnds = opnds
|
|
||||||
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *OR) equals(other interface{}) bool {
|
|
||||||
if o == other {
|
|
||||||
return true
|
|
||||||
} else if _, ok := other.(*OR); !ok {
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
for i, v := range other.(*OR).opnds {
|
|
||||||
if !o.opnds[i].equals(v) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// The evaluation of predicates by o context is short-circuiting, but
|
|
||||||
// unordered.</p>
|
|
||||||
//
|
|
||||||
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
|
|
||||||
for i := 0; i < len(o.opnds); i++ {
|
|
||||||
if o.opnds[i].evaluate(parser, outerContext) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
|
||||||
differs := false
|
|
||||||
operands := make([]SemanticContext, 0)
|
|
||||||
for i := 0; i < len(o.opnds); i++ {
|
|
||||||
context := o.opnds[i]
|
|
||||||
evaluated := context.evalPrecedence(parser, outerContext)
|
|
||||||
differs = differs || (evaluated != context)
|
|
||||||
if evaluated == SemanticContextNone {
|
|
||||||
// The OR context is true if any element is true
|
|
||||||
return SemanticContextNone
|
|
||||||
} else if evaluated != nil {
|
|
||||||
// Reduce the result by Skipping false elements
|
|
||||||
operands = append(operands, evaluated)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !differs {
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
if len(operands) == 0 {
|
|
||||||
// all elements were false, so the OR context is false
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var result SemanticContext
|
|
||||||
|
|
||||||
for _, o := range operands {
|
|
||||||
if result == nil {
|
|
||||||
result = o
|
|
||||||
} else {
|
|
||||||
result = SemanticContextorContext(result, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *OR) String() string {
|
|
||||||
s := ""
|
|
||||||
|
|
||||||
for _, o := range o.opnds {
|
|
||||||
s += "|| " + fmt.Sprint(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(s) > 3 {
|
|
||||||
return s[0:3]
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
@ -1,210 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TokenSourceCharStreamPair struct {
|
|
||||||
tokenSource TokenSource
|
|
||||||
charStream CharStream
|
|
||||||
}
|
|
||||||
|
|
||||||
// A token has properties: text, type, line, character position in the line
|
|
||||||
// (so we can ignore tabs), token channel, index, and source from which
|
|
||||||
// we obtained this token.
|
|
||||||
|
|
||||||
type Token interface {
|
|
||||||
GetSource() *TokenSourceCharStreamPair
|
|
||||||
GetTokenType() int
|
|
||||||
GetChannel() int
|
|
||||||
GetStart() int
|
|
||||||
GetStop() int
|
|
||||||
GetLine() int
|
|
||||||
GetColumn() int
|
|
||||||
|
|
||||||
GetText() string
|
|
||||||
SetText(s string)
|
|
||||||
|
|
||||||
GetTokenIndex() int
|
|
||||||
SetTokenIndex(v int)
|
|
||||||
|
|
||||||
GetTokenSource() TokenSource
|
|
||||||
GetInputStream() CharStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseToken struct {
|
|
||||||
source *TokenSourceCharStreamPair
|
|
||||||
tokenType int // token type of the token
|
|
||||||
channel int // The parser ignores everything not on DEFAULT_CHANNEL
|
|
||||||
start int // optional return -1 if not implemented.
|
|
||||||
stop int // optional return -1 if not implemented.
|
|
||||||
tokenIndex int // from 0..n-1 of the token object in the input stream
|
|
||||||
line int // line=1..n of the 1st character
|
|
||||||
column int // beginning of the line at which it occurs, 0..n-1
|
|
||||||
text string // text of the token.
|
|
||||||
readOnly bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
TokenInvalidType = 0
|
|
||||||
|
|
||||||
// During lookahead operations, this "token" signifies we hit rule end ATN state
|
|
||||||
// and did not follow it despite needing to.
|
|
||||||
TokenEpsilon = -2
|
|
||||||
|
|
||||||
TokenMinUserTokenType = 1
|
|
||||||
|
|
||||||
TokenEOF = -1
|
|
||||||
|
|
||||||
// All tokens go to the parser (unless Skip() is called in that rule)
|
|
||||||
// on a particular "channel". The parser tunes to a particular channel
|
|
||||||
// so that whitespace etc... can go to the parser on a "hidden" channel.
|
|
||||||
|
|
||||||
TokenDefaultChannel = 0
|
|
||||||
|
|
||||||
// Anything on different channel than DEFAULT_CHANNEL is not parsed
|
|
||||||
// by parser.
|
|
||||||
|
|
||||||
TokenHiddenChannel = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *BaseToken) GetChannel() int {
|
|
||||||
return b.channel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetStart() int {
|
|
||||||
return b.start
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetStop() int {
|
|
||||||
return b.stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetLine() int {
|
|
||||||
return b.line
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetColumn() int {
|
|
||||||
return b.column
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetTokenType() int {
|
|
||||||
return b.tokenType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
|
|
||||||
return b.source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetTokenIndex() int {
|
|
||||||
return b.tokenIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) SetTokenIndex(v int) {
|
|
||||||
b.tokenIndex = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetTokenSource() TokenSource {
|
|
||||||
return b.source.tokenSource
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BaseToken) GetInputStream() CharStream {
|
|
||||||
return b.source.charStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type CommonToken struct {
|
|
||||||
*BaseToken
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
|
|
||||||
|
|
||||||
t := new(CommonToken)
|
|
||||||
|
|
||||||
t.BaseToken = new(BaseToken)
|
|
||||||
|
|
||||||
t.source = source
|
|
||||||
t.tokenType = tokenType
|
|
||||||
t.channel = channel
|
|
||||||
t.start = start
|
|
||||||
t.stop = stop
|
|
||||||
t.tokenIndex = -1
|
|
||||||
if t.source.tokenSource != nil {
|
|
||||||
t.line = source.tokenSource.GetLine()
|
|
||||||
t.column = source.tokenSource.GetCharPositionInLine()
|
|
||||||
} else {
|
|
||||||
t.column = -1
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// An empty {@link Pair} which is used as the default value of
|
|
||||||
// {@link //source} for tokens that do not have a source.
|
|
||||||
|
|
||||||
//CommonToken.EMPTY_SOURCE = [ nil, nil ]
|
|
||||||
|
|
||||||
// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
|
|
||||||
//
|
|
||||||
// <p>
|
|
||||||
// If {@code oldToken} is also a {@link CommonToken} instance, the newly
|
|
||||||
// constructed token will share a reference to the {@link //text} field and
|
|
||||||
// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
|
|
||||||
// be assigned the result of calling {@link //GetText}, and {@link //source}
|
|
||||||
// will be constructed from the result of {@link Token//GetTokenSource} and
|
|
||||||
// {@link Token//GetInputStream}.</p>
|
|
||||||
//
|
|
||||||
// @param oldToken The token to copy.
|
|
||||||
//
|
|
||||||
func (c *CommonToken) clone() *CommonToken {
|
|
||||||
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
|
|
||||||
t.tokenIndex = c.GetTokenIndex()
|
|
||||||
t.line = c.GetLine()
|
|
||||||
t.column = c.GetColumn()
|
|
||||||
t.text = c.GetText()
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonToken) GetText() string {
|
|
||||||
if c.text != "" {
|
|
||||||
return c.text
|
|
||||||
}
|
|
||||||
input := c.GetInputStream()
|
|
||||||
if input == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
n := input.Size()
|
|
||||||
if c.start < n && c.stop < n {
|
|
||||||
return input.GetTextFromInterval(NewInterval(c.start, c.stop))
|
|
||||||
}
|
|
||||||
return "<EOF>"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonToken) SetText(text string) {
|
|
||||||
c.text = text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommonToken) String() string {
|
|
||||||
txt := c.GetText()
|
|
||||||
if txt != "" {
|
|
||||||
txt = strings.Replace(txt, "\n", "\\n", -1)
|
|
||||||
txt = strings.Replace(txt, "\r", "\\r", -1)
|
|
||||||
txt = strings.Replace(txt, "\t", "\\t", -1)
|
|
||||||
} else {
|
|
||||||
txt = "<no text>"
|
|
||||||
}
|
|
||||||
|
|
||||||
var ch string
|
|
||||||
if c.channel > 0 {
|
|
||||||
ch = ",channel=" + strconv.Itoa(c.channel)
|
|
||||||
} else {
|
|
||||||
ch = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
|
|
||||||
txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
|
|
||||||
ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
|
|
||||||
}
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
type TokenSource interface {
|
|
||||||
NextToken() Token
|
|
||||||
Skip()
|
|
||||||
More()
|
|
||||||
GetLine() int
|
|
||||||
GetCharPositionInLine() int
|
|
||||||
GetInputStream() CharStream
|
|
||||||
GetSourceName() string
|
|
||||||
setTokenFactory(factory TokenFactory)
|
|
||||||
GetTokenFactory() TokenFactory
|
|
||||||
}
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
type TokenStream interface {
|
|
||||||
IntStream
|
|
||||||
|
|
||||||
LT(k int) Token
|
|
||||||
|
|
||||||
Get(index int) Token
|
|
||||||
GetTokenSource() TokenSource
|
|
||||||
SetTokenSource(TokenSource)
|
|
||||||
|
|
||||||
GetAllText() string
|
|
||||||
GetTextFromInterval(*Interval) string
|
|
||||||
GetTextFromRuleContext(RuleContext) string
|
|
||||||
GetTextFromTokens(Token, Token) string
|
|
||||||
}
|
|
||||||
|
|
@ -1,649 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// Useful for rewriting out a buffered input token stream after doing some
|
|
||||||
// augmentation or other manipulations on it.
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// You can insert stuff, replace, and delete chunks. Note that the operations
|
|
||||||
// are done lazily--only if you convert the buffer to a {@link String} with
|
|
||||||
// {@link TokenStream#getText()}. This is very efficient because you are not
|
|
||||||
// moving data around all the time. As the buffer of tokens is converted to
|
|
||||||
// strings, the {@link #getText()} method(s) scan the input token stream and
|
|
||||||
// check to see if there is an operation at the current index. If so, the
|
|
||||||
// operation is done and then normal {@link String} rendering continues on the
|
|
||||||
// buffer. This is like having multiple Turing machine instruction streams
|
|
||||||
// (programs) operating on a single input tape. :)</p>
|
|
||||||
// <p>
|
|
||||||
|
|
||||||
// This rewriter makes no modifications to the token stream. It does not ask the
|
|
||||||
// stream to fill itself up nor does it advance the input cursor. The token
|
|
||||||
// stream {@link TokenStream#index()} will return the same value before and
|
|
||||||
// after any {@link #getText()} call.</p>
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// The rewriter only works on tokens that you have in the buffer and ignores the
|
|
||||||
// current input cursor. If you are buffering tokens on-demand, calling
|
|
||||||
// {@link #getText()} halfway through the input will only do rewrites for those
|
|
||||||
// tokens in the first half of the file.</p>
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// Since the operations are done lazily at {@link #getText}-time, operations do
|
|
||||||
// not screw up the token index values. That is, an insert operation at token
|
|
||||||
// index {@code i} does not change the index values for tokens
|
|
||||||
// {@code i}+1..n-1.</p>
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// Because operations never actually alter the buffer, you may always get the
|
|
||||||
// original token stream back without undoing anything. Since the instructions
|
|
||||||
// are queued up, you can easily simulate transactions and roll back any changes
|
|
||||||
// if there is an error just by removing instructions. For example,</p>
|
|
||||||
|
|
||||||
// <pre>
|
|
||||||
// CharStream input = new ANTLRFileStream("input");
|
|
||||||
// TLexer lex = new TLexer(input);
|
|
||||||
// CommonTokenStream tokens = new CommonTokenStream(lex);
|
|
||||||
// T parser = new T(tokens);
|
|
||||||
// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
|
|
||||||
// parser.startRule();
|
|
||||||
// </pre>
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// Then in the rules, you can execute (assuming rewriter is visible):</p>
|
|
||||||
|
|
||||||
// <pre>
|
|
||||||
// Token t,u;
|
|
||||||
// ...
|
|
||||||
// rewriter.insertAfter(t, "text to put after t");}
|
|
||||||
// rewriter.insertAfter(u, "text after u");}
|
|
||||||
// System.out.println(rewriter.getText());
|
|
||||||
// </pre>
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// You can also have multiple "instruction streams" and get multiple rewrites
|
|
||||||
// from a single pass over the input. Just name the instruction streams and use
|
|
||||||
// that name again when printing the buffer. This could be useful for generating
|
|
||||||
// a C file and also its header file--all from the same buffer:</p>
|
|
||||||
|
|
||||||
// <pre>
|
|
||||||
// rewriter.insertAfter("pass1", t, "text to put after t");}
|
|
||||||
// rewriter.insertAfter("pass2", u, "text after u");}
|
|
||||||
// System.out.println(rewriter.getText("pass1"));
|
|
||||||
// System.out.println(rewriter.getText("pass2"));
|
|
||||||
// </pre>
|
|
||||||
|
|
||||||
// <p>
|
|
||||||
// If you don't use named rewrite streams, a "default" stream is used as the
|
|
||||||
// first example shows.</p>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
const(
|
|
||||||
Default_Program_Name = "default"
|
|
||||||
Program_Init_Size = 100
|
|
||||||
Min_Token_Index = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
// Define the rewrite operation hierarchy
|
|
||||||
|
|
||||||
type RewriteOperation interface {
|
|
||||||
// Execute the rewrite operation by possibly adding to the buffer.
|
|
||||||
// Return the index of the next token to operate on.
|
|
||||||
Execute(buffer *bytes.Buffer) int
|
|
||||||
String() string
|
|
||||||
GetInstructionIndex() int
|
|
||||||
GetIndex() int
|
|
||||||
GetText() string
|
|
||||||
GetOpName() string
|
|
||||||
GetTokens() TokenStream
|
|
||||||
SetInstructionIndex(val int)
|
|
||||||
SetIndex(int)
|
|
||||||
SetText(string)
|
|
||||||
SetOpName(string)
|
|
||||||
SetTokens(TokenStream)
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseRewriteOperation struct {
|
|
||||||
//Current index of rewrites list
|
|
||||||
instruction_index int
|
|
||||||
//Token buffer index
|
|
||||||
index int
|
|
||||||
//Substitution text
|
|
||||||
text string
|
|
||||||
//Actual operation name
|
|
||||||
op_name string
|
|
||||||
//Pointer to token steam
|
|
||||||
tokens TokenStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)GetInstructionIndex() int{
|
|
||||||
return op.instruction_index
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)GetIndex() int{
|
|
||||||
return op.index
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)GetText() string{
|
|
||||||
return op.text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)GetOpName() string{
|
|
||||||
return op.op_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)GetTokens() TokenStream{
|
|
||||||
return op.tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)SetInstructionIndex(val int){
|
|
||||||
op.instruction_index = val
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)SetIndex(val int) {
|
|
||||||
op.index = val
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)SetText(val string){
|
|
||||||
op.text = val
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)SetOpName(val string){
|
|
||||||
op.op_name = val
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
|
|
||||||
op.tokens = val
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
|
|
||||||
return op.index
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *BaseRewriteOperation) String() string {
|
|
||||||
return fmt.Sprintf("<%s@%d:\"%s\">",
|
|
||||||
op.op_name,
|
|
||||||
op.tokens.Get(op.GetIndex()),
|
|
||||||
op.text,
|
|
||||||
)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
type InsertBeforeOp struct {
|
|
||||||
BaseRewriteOperation
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
|
|
||||||
return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
|
|
||||||
index:index,
|
|
||||||
text:text,
|
|
||||||
op_name:"InsertBeforeOp",
|
|
||||||
tokens:stream,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
|
|
||||||
buffer.WriteString(op.text)
|
|
||||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
|
|
||||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
|
||||||
}
|
|
||||||
return op.index+1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *InsertBeforeOp) String() string {
|
|
||||||
return op.BaseRewriteOperation.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Distinguish between insert after/before to do the "insert afters"
|
|
||||||
// first and then the "insert befores" at same index. Implementation
|
|
||||||
// of "insert after" is "insert before index+1".
|
|
||||||
|
|
||||||
type InsertAfterOp struct {
|
|
||||||
BaseRewriteOperation
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
|
|
||||||
return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
|
|
||||||
index:index+1,
|
|
||||||
text:text,
|
|
||||||
tokens:stream,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
|
|
||||||
buffer.WriteString(op.text)
|
|
||||||
if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
|
|
||||||
buffer.WriteString(op.tokens.Get(op.index).GetText())
|
|
||||||
}
|
|
||||||
return op.index+1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *InsertAfterOp) String() string {
|
|
||||||
return op.BaseRewriteOperation.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
|
|
||||||
// instructions.
|
|
||||||
type ReplaceOp struct{
|
|
||||||
BaseRewriteOperation
|
|
||||||
LastIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
|
|
||||||
return &ReplaceOp{
|
|
||||||
BaseRewriteOperation:BaseRewriteOperation{
|
|
||||||
index:from,
|
|
||||||
text:text,
|
|
||||||
op_name:"ReplaceOp",
|
|
||||||
tokens:stream,
|
|
||||||
},
|
|
||||||
LastIndex:to,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
|
|
||||||
if op.text != ""{
|
|
||||||
buffer.WriteString(op.text)
|
|
||||||
}
|
|
||||||
return op.LastIndex +1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *ReplaceOp) String() string {
|
|
||||||
if op.text == "" {
|
|
||||||
return fmt.Sprintf("<DeleteOP@%d..%d>",
|
|
||||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("<ReplaceOp@%d..%d:\"%s\">",
|
|
||||||
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
type TokenStreamRewriter struct {
|
|
||||||
//Our source stream
|
|
||||||
tokens TokenStream
|
|
||||||
// You may have multiple, named streams of rewrite operations.
|
|
||||||
// I'm calling these things "programs."
|
|
||||||
// Maps String (name) → rewrite (List)
|
|
||||||
programs map[string][]RewriteOperation
|
|
||||||
last_rewrite_token_indexes map[string]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
|
|
||||||
return &TokenStreamRewriter{
|
|
||||||
tokens: tokens,
|
|
||||||
programs: map[string][]RewriteOperation{
|
|
||||||
Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
|
|
||||||
},
|
|
||||||
last_rewrite_token_indexes: map[string]int{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
|
|
||||||
return tsr.tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rollback the instruction stream for a program so that
|
|
||||||
// the indicated instruction (via instructionIndex) is no
|
|
||||||
// longer in the stream. UNTESTED!
|
|
||||||
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
|
|
||||||
is, ok := tsr.programs[program_name]
|
|
||||||
if ok{
|
|
||||||
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
|
|
||||||
tsr.Rollback(Default_Program_Name, instruction_index)
|
|
||||||
}
|
|
||||||
//Reset the program so that no instructions exist
|
|
||||||
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
|
|
||||||
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
|
|
||||||
tsr.DeleteProgram(Default_Program_Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
|
|
||||||
// to insert after, just insert before next index (even if past end)
|
|
||||||
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
|
|
||||||
rewrites := tsr.GetProgram(program_name)
|
|
||||||
op.SetInstructionIndex(len(rewrites))
|
|
||||||
tsr.AddToProgram(program_name, op)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
|
|
||||||
tsr.InsertAfter(Default_Program_Name, index, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
|
|
||||||
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
|
|
||||||
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
|
|
||||||
rewrites := tsr.GetProgram(program_name)
|
|
||||||
op.SetInstructionIndex(len(rewrites))
|
|
||||||
tsr.AddToProgram(program_name, op)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
|
|
||||||
tsr.InsertBefore(Default_Program_Name, index, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
|
|
||||||
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
|
|
||||||
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
|
|
||||||
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
|
|
||||||
from, to, tsr.tokens.Size()))
|
|
||||||
}
|
|
||||||
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
|
|
||||||
rewrites := tsr.GetProgram(program_name)
|
|
||||||
op.SetInstructionIndex(len(rewrites))
|
|
||||||
tsr.AddToProgram(program_name, op)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
|
|
||||||
tsr.Replace(Default_Program_Name, from, to, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
|
|
||||||
tsr.ReplaceDefault(index, index, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
|
|
||||||
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
|
|
||||||
tsr.ReplaceToken(Default_Program_Name, from, to, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
|
|
||||||
tsr.ReplaceTokenDefault(index, index, text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
|
|
||||||
tsr.Replace(program_name, from, to, "" )
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
|
|
||||||
tsr.Delete(Default_Program_Name, from, to)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
|
|
||||||
tsr.DeleteDefault(index,index)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
|
|
||||||
tsr.ReplaceToken(program_name, from, to, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
|
|
||||||
tsr.DeleteToken(Default_Program_Name, from, to)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
|
|
||||||
i, ok := tsr.last_rewrite_token_indexes[program_name]
|
|
||||||
if !ok{
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
|
|
||||||
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
|
|
||||||
tsr.last_rewrite_token_indexes[program_name] = i
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
|
|
||||||
is := make([]RewriteOperation, 0, Program_Init_Size)
|
|
||||||
tsr.programs[name] = is
|
|
||||||
return is
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
|
|
||||||
is := tsr.GetProgram(name)
|
|
||||||
is = append(is, op)
|
|
||||||
tsr.programs[name] = is
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
|
|
||||||
is, ok := tsr.programs[name]
|
|
||||||
if !ok{
|
|
||||||
is = tsr.InitializeProgram(name)
|
|
||||||
}
|
|
||||||
return is
|
|
||||||
}
|
|
||||||
// Return the text from the original tokens altered per the
|
|
||||||
// instructions given to this rewriter.
|
|
||||||
func (tsr *TokenStreamRewriter)GetTextDefault() string{
|
|
||||||
return tsr.GetText(
|
|
||||||
Default_Program_Name,
|
|
||||||
NewInterval(0, tsr.tokens.Size()-1))
|
|
||||||
}
|
|
||||||
// Return the text from the original tokens altered per the
|
|
||||||
// instructions given to this rewriter.
|
|
||||||
func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
|
|
||||||
rewrites := tsr.programs[program_name]
|
|
||||||
start := interval.Start
|
|
||||||
stop := interval.Stop
|
|
||||||
// ensure start/end are in range
|
|
||||||
stop = min(stop, tsr.tokens.Size()-1)
|
|
||||||
start = max(start,0)
|
|
||||||
if rewrites == nil || len(rewrites) == 0{
|
|
||||||
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
|
|
||||||
}
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
// First, optimize instruction stream
|
|
||||||
indexToOp := reduceToSingleOperationPerIndex(rewrites)
|
|
||||||
// Walk buffer, executing instructions and emitting tokens
|
|
||||||
for i:=start; i<=stop && i<tsr.tokens.Size();{
|
|
||||||
op := indexToOp[i]
|
|
||||||
delete(indexToOp, i)// remove so any left have index size-1
|
|
||||||
t := tsr.tokens.Get(i)
|
|
||||||
if op == nil{
|
|
||||||
// no operation at that index, just dump token
|
|
||||||
if t.GetTokenType() != TokenEOF {buf.WriteString(t.GetText())}
|
|
||||||
i++ // move to next token
|
|
||||||
}else {
|
|
||||||
i = op.Execute(&buf)// execute operation and skip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// include stuff after end if it's last index in buffer
|
|
||||||
// So, if they did an insertAfter(lastValidIndex, "foo"), include
|
|
||||||
// foo if end==lastValidIndex.
|
|
||||||
if stop == tsr.tokens.Size()-1{
|
|
||||||
// Scan any remaining operations after last token
|
|
||||||
// should be included (they will be inserts).
|
|
||||||
for _, op := range indexToOp{
|
|
||||||
if op.GetIndex() >= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to combine operations and report invalid operations (like
|
|
||||||
// overlapping replaces that are not completed nested). Inserts to
|
|
||||||
// same index need to be combined etc... Here are the cases:
|
|
||||||
//
|
|
||||||
// I.i.u I.j.v leave alone, nonoverlapping
|
|
||||||
// I.i.u I.i.v combine: Iivu
|
|
||||||
//
|
|
||||||
// R.i-j.u R.x-y.v | i-j in x-y delete first R
|
|
||||||
// R.i-j.u R.i-j.v delete first R
|
|
||||||
// R.i-j.u R.x-y.v | x-y in i-j ERROR
|
|
||||||
// R.i-j.u R.x-y.v | boundaries overlap ERROR
|
|
||||||
//
|
|
||||||
// Delete special case of replace (text==null):
|
|
||||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
|
||||||
//
|
|
||||||
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
|
||||||
// we're not deleting i)
|
|
||||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
|
||||||
// R.x-y.v I.i.u | i in x-y ERROR
|
|
||||||
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
|
||||||
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
|
||||||
//
|
|
||||||
// I.i.u = insert u before op @ index i
|
|
||||||
// R.x-y.u = replace x-y indexed tokens with u
|
|
||||||
//
|
|
||||||
// First we need to examine replaces. For any replace op:
|
|
||||||
//
|
|
||||||
// 1. wipe out any insertions before op within that range.
|
|
||||||
// 2. Drop any replace op before that is contained completely within
|
|
||||||
// that range.
|
|
||||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
|
||||||
//
|
|
||||||
// Then we can deal with inserts:
|
|
||||||
//
|
|
||||||
// 1. for any inserts to same index, combine even if not adjacent.
|
|
||||||
// 2. for any prior replace with same left boundary, combine this
|
|
||||||
// insert with replace and delete this replace.
|
|
||||||
// 3. throw exception if index in same range as previous replace
|
|
||||||
//
|
|
||||||
// Don't actually delete; make op null in list. Easier to walk list.
|
|
||||||
// Later we can throw as we add to index → op map.
|
|
||||||
//
|
|
||||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
|
||||||
// inserted stuff would be before the replace range. But, if you
|
|
||||||
// add tokens in front of a method body '{' and then delete the method
|
|
||||||
// body, I think the stuff before the '{' you added should disappear too.
|
|
||||||
//
|
|
||||||
// Return a map from token index to operation.
|
|
||||||
//
|
|
||||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
|
|
||||||
// WALK REPLACES
|
|
||||||
for i:=0; i < len(rewrites); i++{
|
|
||||||
op := rewrites[i]
|
|
||||||
if op == nil{continue}
|
|
||||||
rop, ok := op.(*ReplaceOp)
|
|
||||||
if !ok{continue}
|
|
||||||
// Wipe prior inserts within range
|
|
||||||
for j:=0; j<i && j < len(rewrites); j++{
|
|
||||||
if iop, ok := rewrites[j].(*InsertBeforeOp);ok{
|
|
||||||
if iop.index == rop.index{
|
|
||||||
// E.g., insert before 2, delete 2..2; update replace
|
|
||||||
// text to include insert before, kill insert
|
|
||||||
rewrites[iop.instruction_index] = nil
|
|
||||||
if rop.text != ""{
|
|
||||||
rop.text = iop.text + rop.text
|
|
||||||
}else{
|
|
||||||
rop.text = iop.text
|
|
||||||
}
|
|
||||||
}else if iop.index > rop.index && iop.index <=rop.LastIndex{
|
|
||||||
// delete insert as it's a no-op.
|
|
||||||
rewrites[iop.instruction_index] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Drop any prior replaces contained within
|
|
||||||
for j:=0; j<i && j < len(rewrites); j++{
|
|
||||||
if prevop, ok := rewrites[j].(*ReplaceOp);ok{
|
|
||||||
if prevop.index>=rop.index && prevop.LastIndex <= rop.LastIndex{
|
|
||||||
// delete replace as it's a no-op.
|
|
||||||
rewrites[prevop.instruction_index] = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// throw exception unless disjoint or identical
|
|
||||||
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
|
|
||||||
// Delete special case of replace (text==null):
|
|
||||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
|
||||||
if prevop.text == "" && rop.text == "" && !disjoint{
|
|
||||||
rewrites[prevop.instruction_index] = nil
|
|
||||||
rop.index = min(prevop.index, rop.index)
|
|
||||||
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
|
|
||||||
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
|
|
||||||
}else if !disjoint{
|
|
||||||
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// WALK INSERTS
|
|
||||||
for i:=0; i < len(rewrites); i++ {
|
|
||||||
op := rewrites[i]
|
|
||||||
if op == nil{continue}
|
|
||||||
//hack to replicate inheritance in composition
|
|
||||||
_, iok := rewrites[i].(*InsertBeforeOp)
|
|
||||||
_, aok := rewrites[i].(*InsertAfterOp)
|
|
||||||
if !iok && !aok{continue}
|
|
||||||
iop := rewrites[i]
|
|
||||||
// combine current insert with prior if any at same index
|
|
||||||
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
|
|
||||||
for j:=0; j<i && j < len(rewrites); j++{
|
|
||||||
if nextIop, ok := rewrites[j].(*InsertAfterOp); ok{
|
|
||||||
if nextIop.index == iop.GetIndex(){
|
|
||||||
iop.SetText(nextIop.text + iop.GetText())
|
|
||||||
rewrites[j] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok{
|
|
||||||
if prevIop.index == iop.GetIndex(){
|
|
||||||
iop.SetText(iop.GetText() + prevIop.text)
|
|
||||||
rewrites[prevIop.instruction_index] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// look for replaces where iop.index is in range; error
|
|
||||||
for j:=0; j<i && j < len(rewrites); j++{
|
|
||||||
if rop,ok := rewrites[j].(*ReplaceOp); ok{
|
|
||||||
if iop.GetIndex() == rop.index{
|
|
||||||
rop.text = iop.GetText() + rop.text
|
|
||||||
rewrites[i] = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex{
|
|
||||||
panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m := map[int]RewriteOperation{}
|
|
||||||
for i:=0; i < len(rewrites); i++{
|
|
||||||
op := rewrites[i]
|
|
||||||
if op == nil {continue}
|
|
||||||
if _, ok := m[op.GetIndex()]; ok{
|
|
||||||
panic("should only be one op per index")
|
|
||||||
}
|
|
||||||
m[op.GetIndex()] = op
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
Quick fixing Go lack of overloads
|
|
||||||
*/
|
|
||||||
|
|
||||||
func max(a,b int)int{
|
|
||||||
if a>b{
|
|
||||||
return a
|
|
||||||
}else {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func min(a,b int)int{
|
|
||||||
if a<b{
|
|
||||||
return a
|
|
||||||
}else {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
type TraceListener struct {
|
|
||||||
parser *BaseParser
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTraceListener(parser *BaseParser) *TraceListener {
|
|
||||||
tl := new(TraceListener)
|
|
||||||
tl.parser = parser
|
|
||||||
return tl
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
|
|
||||||
fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TraceListener) VisitTerminal(node TerminalNode) {
|
|
||||||
fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
|
|
||||||
fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
|
|
||||||
}
|
|
||||||
|
|
@ -1,421 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// atom, set, epsilon, action, predicate, rule transitions.
|
|
||||||
//
|
|
||||||
// <p>This is a one way link. It emanates from a state (usually via a list of
|
|
||||||
// transitions) and has a target state.</p>
|
|
||||||
//
|
|
||||||
// <p>Since we never have to change the ATN transitions once we construct it,
|
|
||||||
// the states. We'll use the term Edge for the DFA to distinguish them from
|
|
||||||
// ATN transitions.</p>
|
|
||||||
|
|
||||||
type Transition interface {
|
|
||||||
getTarget() ATNState
|
|
||||||
setTarget(ATNState)
|
|
||||||
getIsEpsilon() bool
|
|
||||||
getLabel() *IntervalSet
|
|
||||||
getSerializationType() int
|
|
||||||
Matches(int, int, int) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseTransition struct {
|
|
||||||
target ATNState
|
|
||||||
isEpsilon bool
|
|
||||||
label int
|
|
||||||
intervalSet *IntervalSet
|
|
||||||
serializationType int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBaseTransition(target ATNState) *BaseTransition {
|
|
||||||
|
|
||||||
if target == nil {
|
|
||||||
panic("target cannot be nil.")
|
|
||||||
}
|
|
||||||
|
|
||||||
t := new(BaseTransition)
|
|
||||||
|
|
||||||
t.target = target
|
|
||||||
// Are we epsilon, action, sempred?
|
|
||||||
t.isEpsilon = false
|
|
||||||
t.intervalSet = nil
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransition) getTarget() ATNState {
|
|
||||||
return t.target
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransition) setTarget(s ATNState) {
|
|
||||||
t.target = s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransition) getIsEpsilon() bool {
|
|
||||||
return t.isEpsilon
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransition) getLabel() *IntervalSet {
|
|
||||||
return t.intervalSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransition) getSerializationType() int {
|
|
||||||
return t.serializationType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
panic("Not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
TransitionEPSILON = 1
|
|
||||||
TransitionRANGE = 2
|
|
||||||
TransitionRULE = 3
|
|
||||||
TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
|
|
||||||
TransitionATOM = 5
|
|
||||||
TransitionACTION = 6
|
|
||||||
TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
|
|
||||||
TransitionNOTSET = 8
|
|
||||||
TransitionWILDCARD = 9
|
|
||||||
TransitionPRECEDENCE = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
var TransitionserializationNames = []string{
|
|
||||||
"INVALID",
|
|
||||||
"EPSILON",
|
|
||||||
"RANGE",
|
|
||||||
"RULE",
|
|
||||||
"PREDICATE",
|
|
||||||
"ATOM",
|
|
||||||
"ACTION",
|
|
||||||
"SET",
|
|
||||||
"NOT_SET",
|
|
||||||
"WILDCARD",
|
|
||||||
"PRECEDENCE",
|
|
||||||
}
|
|
||||||
|
|
||||||
//var TransitionserializationTypes struct {
|
|
||||||
// EpsilonTransition int
|
|
||||||
// RangeTransition int
|
|
||||||
// RuleTransition int
|
|
||||||
// PredicateTransition int
|
|
||||||
// AtomTransition int
|
|
||||||
// ActionTransition int
|
|
||||||
// SetTransition int
|
|
||||||
// NotSetTransition int
|
|
||||||
// WildcardTransition int
|
|
||||||
// PrecedencePredicateTransition int
|
|
||||||
//}{
|
|
||||||
// TransitionEPSILON,
|
|
||||||
// TransitionRANGE,
|
|
||||||
// TransitionRULE,
|
|
||||||
// TransitionPREDICATE,
|
|
||||||
// TransitionATOM,
|
|
||||||
// TransitionACTION,
|
|
||||||
// TransitionSET,
|
|
||||||
// TransitionNOTSET,
|
|
||||||
// TransitionWILDCARD,
|
|
||||||
// TransitionPRECEDENCE
|
|
||||||
//}
|
|
||||||
|
|
||||||
// TODO: make all transitions sets? no, should remove set edges
|
|
||||||
type AtomTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
|
|
||||||
|
|
||||||
t := new(AtomTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
t.label = intervalSet // The token type or character value or, signifies special intervalSet.
|
|
||||||
t.intervalSet = t.makeLabel()
|
|
||||||
t.serializationType = TransitionATOM
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AtomTransition) makeLabel() *IntervalSet {
|
|
||||||
s := NewIntervalSet()
|
|
||||||
s.addOne(t.label)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return t.label == symbol
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *AtomTransition) String() string {
|
|
||||||
return strconv.Itoa(t.label)
|
|
||||||
}
|
|
||||||
|
|
||||||
type RuleTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
|
|
||||||
followState ATNState
|
|
||||||
ruleIndex, precedence int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
|
|
||||||
|
|
||||||
t := new(RuleTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(ruleStart)
|
|
||||||
|
|
||||||
t.ruleIndex = ruleIndex
|
|
||||||
t.precedence = precedence
|
|
||||||
t.followState = followState
|
|
||||||
t.serializationType = TransitionRULE
|
|
||||||
t.isEpsilon = true
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type EpsilonTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
|
|
||||||
outermostPrecedenceReturn int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
|
|
||||||
|
|
||||||
t := new(EpsilonTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionEPSILON
|
|
||||||
t.isEpsilon = true
|
|
||||||
t.outermostPrecedenceReturn = outermostPrecedenceReturn
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *EpsilonTransition) String() string {
|
|
||||||
return "epsilon"
|
|
||||||
}
|
|
||||||
|
|
||||||
type RangeTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
|
|
||||||
start, stop int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
|
|
||||||
|
|
||||||
t := new(RangeTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionRANGE
|
|
||||||
t.start = start
|
|
||||||
t.stop = stop
|
|
||||||
t.intervalSet = t.makeLabel()
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RangeTransition) makeLabel() *IntervalSet {
|
|
||||||
s := NewIntervalSet()
|
|
||||||
s.addRange(t.start, t.stop)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return symbol >= t.start && symbol <= t.stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *RangeTransition) String() string {
|
|
||||||
return "'" + string(t.start) + "'..'" + string(t.stop) + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
type AbstractPredicateTransition interface {
|
|
||||||
Transition
|
|
||||||
IAbstractPredicateTransitionFoo()
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseAbstractPredicateTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
|
|
||||||
|
|
||||||
t := new(BaseAbstractPredicateTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
|
|
||||||
|
|
||||||
type PredicateTransition struct {
|
|
||||||
*BaseAbstractPredicateTransition
|
|
||||||
|
|
||||||
isCtxDependent bool
|
|
||||||
ruleIndex, predIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
|
|
||||||
|
|
||||||
t := new(PredicateTransition)
|
|
||||||
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionPREDICATE
|
|
||||||
t.ruleIndex = ruleIndex
|
|
||||||
t.predIndex = predIndex
|
|
||||||
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
|
||||||
t.isEpsilon = true
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *PredicateTransition) getPredicate() *Predicate {
|
|
||||||
return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *PredicateTransition) String() string {
|
|
||||||
return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ActionTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
|
|
||||||
isCtxDependent bool
|
|
||||||
ruleIndex, actionIndex, predIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
|
|
||||||
|
|
||||||
t := new(ActionTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionACTION
|
|
||||||
t.ruleIndex = ruleIndex
|
|
||||||
t.actionIndex = actionIndex
|
|
||||||
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
|
||||||
t.isEpsilon = true
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *ActionTransition) String() string {
|
|
||||||
return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
type SetTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
|
|
||||||
|
|
||||||
t := new(SetTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionSET
|
|
||||||
if set != nil {
|
|
||||||
t.intervalSet = set
|
|
||||||
} else {
|
|
||||||
t.intervalSet = NewIntervalSet()
|
|
||||||
t.intervalSet.addOne(TokenInvalidType)
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return t.intervalSet.contains(symbol)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *SetTransition) String() string {
|
|
||||||
return t.intervalSet.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type NotSetTransition struct {
|
|
||||||
*SetTransition
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
|
|
||||||
|
|
||||||
t := new(NotSetTransition)
|
|
||||||
|
|
||||||
t.SetTransition = NewSetTransition(target, set)
|
|
||||||
|
|
||||||
t.serializationType = TransitionNOTSET
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *NotSetTransition) String() string {
|
|
||||||
return "~" + t.intervalSet.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type WildcardTransition struct {
|
|
||||||
*BaseTransition
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewWildcardTransition(target ATNState) *WildcardTransition {
|
|
||||||
|
|
||||||
t := new(WildcardTransition)
|
|
||||||
t.BaseTransition = NewBaseTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionWILDCARD
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *WildcardTransition) String() string {
|
|
||||||
return "."
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrecedencePredicateTransition struct {
|
|
||||||
*BaseAbstractPredicateTransition
|
|
||||||
|
|
||||||
precedence int
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
|
|
||||||
|
|
||||||
t := new(PrecedencePredicateTransition)
|
|
||||||
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
|
|
||||||
|
|
||||||
t.serializationType = TransitionPRECEDENCE
|
|
||||||
t.precedence = precedence
|
|
||||||
t.isEpsilon = true
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
|
|
||||||
return NewPrecedencePredicate(t.precedence)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *PrecedencePredicateTransition) String() string {
|
|
||||||
return fmt.Sprint(t.precedence) + " >= _p"
|
|
||||||
}
|
|
||||||
|
|
@ -1,256 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
// The basic notion of a tree has a parent, a payload, and a list of children.
|
|
||||||
// It is the most abstract interface for all the trees used by ANTLR.
|
|
||||||
///
|
|
||||||
|
|
||||||
var TreeInvalidInterval = NewInterval(-1, -2)
|
|
||||||
|
|
||||||
type Tree interface {
|
|
||||||
GetParent() Tree
|
|
||||||
SetParent(Tree)
|
|
||||||
GetPayload() interface{}
|
|
||||||
GetChild(i int) Tree
|
|
||||||
GetChildCount() int
|
|
||||||
GetChildren() []Tree
|
|
||||||
}
|
|
||||||
|
|
||||||
type SyntaxTree interface {
|
|
||||||
Tree
|
|
||||||
|
|
||||||
GetSourceInterval() *Interval
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParseTree interface {
|
|
||||||
SyntaxTree
|
|
||||||
|
|
||||||
Accept(Visitor ParseTreeVisitor) interface{}
|
|
||||||
GetText() string
|
|
||||||
|
|
||||||
ToStringTree([]string, Recognizer) string
|
|
||||||
}
|
|
||||||
|
|
||||||
type RuleNode interface {
|
|
||||||
ParseTree
|
|
||||||
|
|
||||||
GetRuleContext() RuleContext
|
|
||||||
GetBaseRuleContext() *BaseRuleContext
|
|
||||||
}
|
|
||||||
|
|
||||||
type TerminalNode interface {
|
|
||||||
ParseTree
|
|
||||||
|
|
||||||
GetSymbol() Token
|
|
||||||
}
|
|
||||||
|
|
||||||
type ErrorNode interface {
|
|
||||||
TerminalNode
|
|
||||||
|
|
||||||
errorNode()
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParseTreeVisitor interface {
|
|
||||||
Visit(tree ParseTree) interface{}
|
|
||||||
VisitChildren(node RuleNode) interface{}
|
|
||||||
VisitTerminal(node TerminalNode) interface{}
|
|
||||||
VisitErrorNode(node ErrorNode) interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseParseTreeVisitor struct{}
|
|
||||||
|
|
||||||
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
|
|
||||||
|
|
||||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil }
|
|
||||||
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
|
|
||||||
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
|
|
||||||
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
//func (this ParseTreeVisitor) Visit(ctx) {
|
|
||||||
// if (Utils.isArray(ctx)) {
|
|
||||||
// self := this
|
|
||||||
// return ctx.map(function(child) { return VisitAtom(self, child)})
|
|
||||||
// } else {
|
|
||||||
// return VisitAtom(this, ctx)
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//func VisitAtom(Visitor, ctx) {
|
|
||||||
// if (ctx.parser == nil) { //is terminal
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// name := ctx.parser.ruleNames[ctx.ruleIndex]
|
|
||||||
// funcName := "Visit" + Utils.titleCase(name)
|
|
||||||
//
|
|
||||||
// return Visitor[funcName](ctx)
|
|
||||||
//}
|
|
||||||
|
|
||||||
type ParseTreeListener interface {
|
|
||||||
VisitTerminal(node TerminalNode)
|
|
||||||
VisitErrorNode(node ErrorNode)
|
|
||||||
EnterEveryRule(ctx ParserRuleContext)
|
|
||||||
ExitEveryRule(ctx ParserRuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
type BaseParseTreeListener struct{}
|
|
||||||
|
|
||||||
var _ ParseTreeListener = &BaseParseTreeListener{}
|
|
||||||
|
|
||||||
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
|
|
||||||
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
|
|
||||||
func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
|
|
||||||
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
|
|
||||||
|
|
||||||
type TerminalNodeImpl struct {
|
|
||||||
parentCtx RuleContext
|
|
||||||
|
|
||||||
symbol Token
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ TerminalNode = &TerminalNodeImpl{}
|
|
||||||
|
|
||||||
func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
|
|
||||||
tn := new(TerminalNodeImpl)
|
|
||||||
|
|
||||||
tn.parentCtx = nil
|
|
||||||
tn.symbol = symbol
|
|
||||||
|
|
||||||
return tn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetChild(i int) Tree {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetChildren() []Tree {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
|
|
||||||
panic("Cannot set children on terminal node")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetSymbol() Token {
|
|
||||||
return t.symbol
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetParent() Tree {
|
|
||||||
return t.parentCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) SetParent(tree Tree) {
|
|
||||||
t.parentCtx = tree.(RuleContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetPayload() interface{} {
|
|
||||||
return t.symbol
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
|
|
||||||
if t.symbol == nil {
|
|
||||||
return TreeInvalidInterval
|
|
||||||
}
|
|
||||||
tokenIndex := t.symbol.GetTokenIndex()
|
|
||||||
return NewInterval(tokenIndex, tokenIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetChildCount() int {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
|
|
||||||
return v.VisitTerminal(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) GetText() string {
|
|
||||||
return t.symbol.GetText()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) String() string {
|
|
||||||
if t.symbol.GetTokenType() == TokenEOF {
|
|
||||||
return "<EOF>"
|
|
||||||
}
|
|
||||||
|
|
||||||
return t.symbol.GetText()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
|
|
||||||
return t.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represents a token that was consumed during reSynchronization
|
|
||||||
// rather than during a valid Match operation. For example,
|
|
||||||
// we will create this kind of a node during single token insertion
|
|
||||||
// and deletion as well as during "consume until error recovery set"
|
|
||||||
// upon no viable alternative exceptions.
|
|
||||||
|
|
||||||
type ErrorNodeImpl struct {
|
|
||||||
*TerminalNodeImpl
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ErrorNode = &ErrorNodeImpl{}
|
|
||||||
|
|
||||||
func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
|
|
||||||
en := new(ErrorNodeImpl)
|
|
||||||
en.TerminalNodeImpl = NewTerminalNodeImpl(token)
|
|
||||||
return en
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ErrorNodeImpl) errorNode() {}
|
|
||||||
|
|
||||||
func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
|
|
||||||
return v.VisitErrorNode(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParseTreeWalker struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewParseTreeWalker() *ParseTreeWalker {
|
|
||||||
return new(ParseTreeWalker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Performs a walk on the given parse tree starting at the root and going down recursively
|
|
||||||
// with depth-first search. On each node, EnterRule is called before
|
|
||||||
// recursively walking down into child nodes, then
|
|
||||||
// ExitRule is called after the recursive call to wind up.
|
|
||||||
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
|
||||||
switch tt := t.(type) {
|
|
||||||
case ErrorNode:
|
|
||||||
listener.VisitErrorNode(tt)
|
|
||||||
case TerminalNode:
|
|
||||||
listener.VisitTerminal(tt)
|
|
||||||
default:
|
|
||||||
p.EnterRule(listener, t.(RuleNode))
|
|
||||||
for i := 0; i < t.GetChildCount(); i++ {
|
|
||||||
child := t.GetChild(i)
|
|
||||||
p.Walk(listener, child)
|
|
||||||
}
|
|
||||||
p.ExitRule(listener, t.(RuleNode))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
|
|
||||||
// then by triggering the event specific to the given parse tree node
|
|
||||||
//
|
|
||||||
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
|
||||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
|
||||||
listener.EnterEveryRule(ctx)
|
|
||||||
ctx.EnterRule(listener)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exits a grammar rule by first triggering the event specific to the given parse tree node
|
|
||||||
// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
|
|
||||||
//
|
|
||||||
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
|
|
||||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
|
||||||
ctx.ExitRule(listener)
|
|
||||||
listener.ExitEveryRule(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var ParseTreeWalkerDefault = NewParseTreeWalker()
|
|
||||||
|
|
@ -1,137 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
/** A set of utility routines useful for all kinds of ANTLR trees. */
|
|
||||||
|
|
||||||
// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
|
|
||||||
// node payloads to get the text for the nodes. Detect
|
|
||||||
// parse trees and extract data appropriately.
|
|
||||||
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
|
|
||||||
|
|
||||||
if recog != nil {
|
|
||||||
ruleNames = recog.GetRuleNames()
|
|
||||||
}
|
|
||||||
|
|
||||||
s := TreesGetNodeText(tree, ruleNames, nil)
|
|
||||||
|
|
||||||
s = EscapeWhitespace(s, false)
|
|
||||||
c := tree.GetChildCount()
|
|
||||||
if c == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
res := "(" + s + " "
|
|
||||||
if c > 0 {
|
|
||||||
s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
|
|
||||||
res += s
|
|
||||||
}
|
|
||||||
for i := 1; i < c; i++ {
|
|
||||||
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
|
|
||||||
res += (" " + s)
|
|
||||||
}
|
|
||||||
res += ")"
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
|
|
||||||
if recog != nil {
|
|
||||||
ruleNames = recog.GetRuleNames()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ruleNames != nil {
|
|
||||||
switch t2 := t.(type) {
|
|
||||||
case RuleNode:
|
|
||||||
t3 := t2.GetRuleContext()
|
|
||||||
altNumber := t3.GetAltNumber()
|
|
||||||
|
|
||||||
if altNumber != ATNInvalidAltNumber {
|
|
||||||
return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
|
|
||||||
}
|
|
||||||
return ruleNames[t3.GetRuleIndex()]
|
|
||||||
case ErrorNode:
|
|
||||||
return fmt.Sprint(t2)
|
|
||||||
case TerminalNode:
|
|
||||||
if t2.GetSymbol() != nil {
|
|
||||||
return t2.GetSymbol().GetText()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// no recog for rule names
|
|
||||||
payload := t.GetPayload()
|
|
||||||
if p2, ok := payload.(Token); ok {
|
|
||||||
return p2.GetText()
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprint(t.GetPayload())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return ordered list of all children of this node
|
|
||||||
func TreesGetChildren(t Tree) []Tree {
|
|
||||||
list := make([]Tree, 0)
|
|
||||||
for i := 0; i < t.GetChildCount(); i++ {
|
|
||||||
list = append(list, t.GetChild(i))
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a list of all ancestors of this node. The first node of
|
|
||||||
// list is the root and the last is the parent of this node.
|
|
||||||
//
|
|
||||||
func TreesgetAncestors(t Tree) []Tree {
|
|
||||||
ancestors := make([]Tree, 0)
|
|
||||||
t = t.GetParent()
|
|
||||||
for t != nil {
|
|
||||||
f := []Tree{t}
|
|
||||||
ancestors = append(f, ancestors...)
|
|
||||||
t = t.GetParent()
|
|
||||||
}
|
|
||||||
return ancestors
|
|
||||||
}
|
|
||||||
|
|
||||||
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
|
|
||||||
return TreesfindAllNodes(t, ttype, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
|
|
||||||
return TreesfindAllNodes(t, ruleIndex, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
|
|
||||||
nodes := make([]ParseTree, 0)
|
|
||||||
treesFindAllNodes(t, index, findTokens, &nodes)
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
|
|
||||||
// check this node (the root) first
|
|
||||||
|
|
||||||
t2, ok := t.(TerminalNode)
|
|
||||||
t3, ok2 := t.(ParserRuleContext)
|
|
||||||
|
|
||||||
if findTokens && ok {
|
|
||||||
if t2.GetSymbol().GetTokenType() == index {
|
|
||||||
*nodes = append(*nodes, t2)
|
|
||||||
}
|
|
||||||
} else if !findTokens && ok2 {
|
|
||||||
if t3.GetRuleIndex() == index {
|
|
||||||
*nodes = append(*nodes, t3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// check children
|
|
||||||
for i := 0; i < t.GetChildCount(); i++ {
|
|
||||||
treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TreesDescendants(t ParseTree) []ParseTree {
|
|
||||||
nodes := []ParseTree{t}
|
|
||||||
for i := 0; i < t.GetChildCount(); i++ {
|
|
||||||
nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
@ -1,417 +0,0 @@
|
||||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
|
||||||
// Use of this file is governed by the BSD 3-clause license that
|
|
||||||
// can be found in the LICENSE.txt file in the project root.
|
|
||||||
|
|
||||||
package antlr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func intMin(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func intMax(a, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// A simple integer stack
|
|
||||||
|
|
||||||
type IntStack []int
|
|
||||||
|
|
||||||
var ErrEmptyStack = errors.New("Stack is empty")
|
|
||||||
|
|
||||||
func (s *IntStack) Pop() (int, error) {
|
|
||||||
l := len(*s) - 1
|
|
||||||
if l < 0 {
|
|
||||||
return 0, ErrEmptyStack
|
|
||||||
}
|
|
||||||
v := (*s)[l]
|
|
||||||
*s = (*s)[0:l]
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *IntStack) Push(e int) {
|
|
||||||
*s = append(*s, e)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Set struct {
|
|
||||||
data map[int][]interface{}
|
|
||||||
hashcodeFunction func(interface{}) int
|
|
||||||
equalsFunction func(interface{}, interface{}) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSet(
|
|
||||||
hashcodeFunction func(interface{}) int,
|
|
||||||
equalsFunction func(interface{}, interface{}) bool) *Set {
|
|
||||||
|
|
||||||
s := new(Set)
|
|
||||||
|
|
||||||
s.data = make(map[int][]interface{})
|
|
||||||
|
|
||||||
if hashcodeFunction != nil {
|
|
||||||
s.hashcodeFunction = hashcodeFunction
|
|
||||||
} else {
|
|
||||||
s.hashcodeFunction = standardHashFunction
|
|
||||||
}
|
|
||||||
|
|
||||||
if equalsFunction == nil {
|
|
||||||
s.equalsFunction = standardEqualsFunction
|
|
||||||
} else {
|
|
||||||
s.equalsFunction = equalsFunction
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func standardEqualsFunction(a interface{}, b interface{}) bool {
|
|
||||||
|
|
||||||
ac, oka := a.(comparable)
|
|
||||||
bc, okb := b.(comparable)
|
|
||||||
|
|
||||||
if !oka || !okb {
|
|
||||||
panic("Not Comparable")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ac.equals(bc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func standardHashFunction(a interface{}) int {
|
|
||||||
if h, ok := a.(hasher); ok {
|
|
||||||
return h.hash()
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("Not Hasher")
|
|
||||||
}
|
|
||||||
|
|
||||||
type hasher interface {
|
|
||||||
hash() int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Set) length() int {
|
|
||||||
return len(s.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Set) add(value interface{}) interface{} {
|
|
||||||
|
|
||||||
key := s.hashcodeFunction(value)
|
|
||||||
|
|
||||||
values := s.data[key]
|
|
||||||
|
|
||||||
if s.data[key] != nil {
|
|
||||||
for i := 0; i < len(values); i++ {
|
|
||||||
if s.equalsFunction(value, values[i]) {
|
|
||||||
return values[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.data[key] = append(s.data[key], value)
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
v := make([]interface{}, 1, 10)
|
|
||||||
v[0] = value
|
|
||||||
s.data[key] = v
|
|
||||||
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Set) contains(value interface{}) bool {
|
|
||||||
|
|
||||||
key := s.hashcodeFunction(value)
|
|
||||||
|
|
||||||
values := s.data[key]
|
|
||||||
|
|
||||||
if s.data[key] != nil {
|
|
||||||
for i := 0; i < len(values); i++ {
|
|
||||||
if s.equalsFunction(value, values[i]) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Set) values() []interface{} {
|
|
||||||
var l []interface{}
|
|
||||||
|
|
||||||
for _, v := range s.data {
|
|
||||||
l = append(l, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Set) String() string {
|
|
||||||
r := ""
|
|
||||||
|
|
||||||
for _, av := range s.data {
|
|
||||||
for _, v := range av {
|
|
||||||
r += fmt.Sprint(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
type BitSet struct {
|
|
||||||
data map[int]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBitSet() *BitSet {
|
|
||||||
b := new(BitSet)
|
|
||||||
b.data = make(map[int]bool)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) add(value int) {
|
|
||||||
b.data[value] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) clear(index int) {
|
|
||||||
delete(b.data, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) or(set *BitSet) {
|
|
||||||
for k := range set.data {
|
|
||||||
b.add(k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) remove(value int) {
|
|
||||||
delete(b.data, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) contains(value int) bool {
|
|
||||||
return b.data[value]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) values() []int {
|
|
||||||
ks := make([]int, len(b.data))
|
|
||||||
i := 0
|
|
||||||
for k := range b.data {
|
|
||||||
ks[i] = k
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Ints(ks)
|
|
||||||
return ks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) minValue() int {
|
|
||||||
min := 2147483647
|
|
||||||
|
|
||||||
for k := range b.data {
|
|
||||||
if k < min {
|
|
||||||
min = k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return min
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) equals(other interface{}) bool {
|
|
||||||
otherBitSet, ok := other.(*BitSet)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b.data) != len(otherBitSet.data) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range b.data {
|
|
||||||
if otherBitSet.data[k] != v {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) length() int {
|
|
||||||
return len(b.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BitSet) String() string {
|
|
||||||
vals := b.values()
|
|
||||||
valsS := make([]string, len(vals))
|
|
||||||
|
|
||||||
for i, val := range vals {
|
|
||||||
valsS[i] = strconv.Itoa(val)
|
|
||||||
}
|
|
||||||
return "{" + strings.Join(valsS, ", ") + "}"
|
|
||||||
}
|
|
||||||
|
|
||||||
type AltDict struct {
|
|
||||||
data map[string]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAltDict() *AltDict {
|
|
||||||
d := new(AltDict)
|
|
||||||
d.data = make(map[string]interface{})
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AltDict) Get(key string) interface{} {
|
|
||||||
key = "k-" + key
|
|
||||||
return a.data[key]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AltDict) put(key string, value interface{}) {
|
|
||||||
key = "k-" + key
|
|
||||||
a.data[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *AltDict) values() []interface{} {
|
|
||||||
vs := make([]interface{}, len(a.data))
|
|
||||||
i := 0
|
|
||||||
for _, v := range a.data {
|
|
||||||
vs[i] = v
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
|
|
||||||
type DoubleDict struct {
|
|
||||||
data map[int]map[int]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDoubleDict() *DoubleDict {
|
|
||||||
dd := new(DoubleDict)
|
|
||||||
dd.data = make(map[int]map[int]interface{})
|
|
||||||
return dd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoubleDict) Get(a, b int) interface{} {
|
|
||||||
data := d.data[a]
|
|
||||||
|
|
||||||
if data == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return data[b]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoubleDict) set(a, b int, o interface{}) {
|
|
||||||
data := d.data[a]
|
|
||||||
|
|
||||||
if data == nil {
|
|
||||||
data = make(map[int]interface{})
|
|
||||||
d.data[a] = data
|
|
||||||
}
|
|
||||||
|
|
||||||
data[b] = o
|
|
||||||
}
|
|
||||||
|
|
||||||
func EscapeWhitespace(s string, escapeSpaces bool) string {
|
|
||||||
|
|
||||||
s = strings.Replace(s, "\t", "\\t", -1)
|
|
||||||
s = strings.Replace(s, "\n", "\\n", -1)
|
|
||||||
s = strings.Replace(s, "\r", "\\r", -1)
|
|
||||||
if escapeSpaces {
|
|
||||||
s = strings.Replace(s, " ", "\u00B7", -1)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func TerminalNodeToStringArray(sa []TerminalNode) []string {
|
|
||||||
st := make([]string, len(sa))
|
|
||||||
|
|
||||||
for i, s := range sa {
|
|
||||||
st[i] = fmt.Sprintf("%v", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return st
|
|
||||||
}
|
|
||||||
|
|
||||||
func PrintArrayJavaStyle(sa []string) string {
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
|
|
||||||
buffer.WriteString("[")
|
|
||||||
|
|
||||||
for i, s := range sa {
|
|
||||||
buffer.WriteString(s)
|
|
||||||
if i != len(sa)-1 {
|
|
||||||
buffer.WriteString(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.WriteString("]")
|
|
||||||
|
|
||||||
return buffer.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following routines were lifted from bits.rotate* available in Go 1.9.
|
|
||||||
|
|
||||||
const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64
|
|
||||||
|
|
||||||
// rotateLeft returns the value of x rotated left by (k mod UintSize) bits.
|
|
||||||
// To rotate x right by k bits, call RotateLeft(x, -k).
|
|
||||||
func rotateLeft(x uint, k int) uint {
|
|
||||||
if uintSize == 32 {
|
|
||||||
return uint(rotateLeft32(uint32(x), k))
|
|
||||||
}
|
|
||||||
return uint(rotateLeft64(uint64(x), k))
|
|
||||||
}
|
|
||||||
|
|
||||||
// rotateLeft32 returns the value of x rotated left by (k mod 32) bits.
|
|
||||||
func rotateLeft32(x uint32, k int) uint32 {
|
|
||||||
const n = 32
|
|
||||||
s := uint(k) & (n - 1)
|
|
||||||
return x<<s | x>>(n-s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// rotateLeft64 returns the value of x rotated left by (k mod 64) bits.
|
|
||||||
func rotateLeft64(x uint64, k int) uint64 {
|
|
||||||
const n = 64
|
|
||||||
s := uint(k) & (n - 1)
|
|
||||||
return x<<s | x>>(n-s)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// murmur hash
|
|
||||||
const (
|
|
||||||
c1_32 uint = 0xCC9E2D51
|
|
||||||
c2_32 uint = 0x1B873593
|
|
||||||
n1_32 uint = 0xE6546B64
|
|
||||||
)
|
|
||||||
|
|
||||||
func murmurInit(seed int) int {
|
|
||||||
return seed
|
|
||||||
}
|
|
||||||
|
|
||||||
func murmurUpdate(h1 int, k1 int) int {
|
|
||||||
var k1u uint
|
|
||||||
k1u = uint(k1) * c1_32
|
|
||||||
k1u = rotateLeft(k1u, 15)
|
|
||||||
k1u *= c2_32
|
|
||||||
|
|
||||||
var h1u = uint(h1) ^ k1u
|
|
||||||
k1u = rotateLeft(k1u, 13)
|
|
||||||
h1u = h1u*5 + 0xe6546b64
|
|
||||||
return int(h1u)
|
|
||||||
}
|
|
||||||
|
|
||||||
func murmurFinish(h1 int, numberOfWords int) int {
|
|
||||||
var h1u uint = uint(h1)
|
|
||||||
h1u ^= uint(numberOfWords * 4)
|
|
||||||
h1u ^= h1u >> 16
|
|
||||||
h1u *= uint(0x85ebca6b)
|
|
||||||
h1u ^= h1u >> 13
|
|
||||||
h1u *= 0xc2b2ae35
|
|
||||||
h1u ^= h1u >> 16
|
|
||||||
|
|
||||||
return int(h1u)
|
|
||||||
}
|
|
||||||
|
|
@ -1,79 +0,0 @@
|
||||||
lexer grammar CESQLLexer;
|
|
||||||
|
|
||||||
// NOTE:
|
|
||||||
// This grammar is case-sensitive, although CESQL keywords are case-insensitive.
|
|
||||||
// In order to implement case-insensitivity, check out
|
|
||||||
// https://github.com/antlr/antlr4/blob/master/doc/case-insensitive-lexing.md#custom-character-streams-approach
|
|
||||||
|
|
||||||
// Skip tab, carriage return and newlines
|
|
||||||
|
|
||||||
SPACE: [ \t\r\n]+ -> skip;
|
|
||||||
|
|
||||||
// Fragments for Literal primitives
|
|
||||||
|
|
||||||
fragment ID_LITERAL: [a-zA-Z0-9]+;
|
|
||||||
fragment DQUOTA_STRING: '"' ( '\\'. | '""' | ~('"'| '\\') )* '"';
|
|
||||||
fragment SQUOTA_STRING: '\'' ('\\'. | '\'\'' | ~('\'' | '\\'))* '\'';
|
|
||||||
fragment INT_DIGIT: [0-9];
|
|
||||||
fragment FN_LITERAL: [A-Z] [A-Z_]*;
|
|
||||||
|
|
||||||
// Constructors symbols
|
|
||||||
|
|
||||||
LR_BRACKET: '(';
|
|
||||||
RR_BRACKET: ')';
|
|
||||||
COMMA: ',';
|
|
||||||
SINGLE_QUOTE_SYMB: '\'';
|
|
||||||
DOUBLE_QUOTE_SYMB: '"';
|
|
||||||
|
|
||||||
fragment QUOTE_SYMB
|
|
||||||
: SINGLE_QUOTE_SYMB | DOUBLE_QUOTE_SYMB
|
|
||||||
;
|
|
||||||
|
|
||||||
// Operators
|
|
||||||
// - Logic
|
|
||||||
|
|
||||||
AND: 'AND';
|
|
||||||
OR: 'OR';
|
|
||||||
XOR: 'XOR';
|
|
||||||
NOT: 'NOT';
|
|
||||||
|
|
||||||
// - Arithmetics
|
|
||||||
|
|
||||||
STAR: '*';
|
|
||||||
DIVIDE: '/';
|
|
||||||
MODULE: '%';
|
|
||||||
PLUS: '+';
|
|
||||||
MINUS: '-';
|
|
||||||
|
|
||||||
// - Comparison
|
|
||||||
|
|
||||||
EQUAL: '=';
|
|
||||||
NOT_EQUAL: '!=';
|
|
||||||
GREATER: '>';
|
|
||||||
GREATER_OR_EQUAL: '>=';
|
|
||||||
LESS: '<';
|
|
||||||
LESS_GREATER: '<>';
|
|
||||||
LESS_OR_EQUAL: '<=';
|
|
||||||
|
|
||||||
// Like, exists, in
|
|
||||||
|
|
||||||
LIKE: 'LIKE';
|
|
||||||
EXISTS: 'EXISTS';
|
|
||||||
IN: 'IN';
|
|
||||||
|
|
||||||
// Booleans
|
|
||||||
|
|
||||||
TRUE: 'TRUE';
|
|
||||||
FALSE: 'FALSE';
|
|
||||||
|
|
||||||
// Literals
|
|
||||||
|
|
||||||
DQUOTED_STRING_LITERAL: DQUOTA_STRING;
|
|
||||||
SQUOTED_STRING_LITERAL: SQUOTA_STRING;
|
|
||||||
INTEGER_LITERAL: INT_DIGIT+;
|
|
||||||
|
|
||||||
// Identifiers
|
|
||||||
|
|
||||||
IDENTIFIER: [a-zA-Z]+;
|
|
||||||
IDENTIFIER_WITH_NUMBER: [a-zA-Z0-9]+;
|
|
||||||
FUNCTION_IDENTIFIER_WITH_UNDERSCORE: [A-Z] [A-Z_]*;
|
|
||||||
|
|
@ -1,62 +0,0 @@
|
||||||
grammar CESQLParser;
|
|
||||||
|
|
||||||
import CESQLLexer;
|
|
||||||
|
|
||||||
// Entrypoint
|
|
||||||
cesql: expression EOF;
|
|
||||||
|
|
||||||
// Structure of operations, function invocations and expression
|
|
||||||
expression
|
|
||||||
: functionIdentifier functionParameterList #functionInvocationExpression
|
|
||||||
// unary operators are the highest priority
|
|
||||||
| NOT expression #unaryLogicExpression
|
|
||||||
| MINUS expression # unaryNumericExpression
|
|
||||||
// LIKE, EXISTS and IN takes precedence over all the other binary operators
|
|
||||||
| expression NOT? LIKE stringLiteral #likeExpression
|
|
||||||
| EXISTS identifier #existsExpression
|
|
||||||
| expression NOT? IN setExpression #inExpression
|
|
||||||
// Numeric operations
|
|
||||||
| expression (STAR | DIVIDE | MODULE) expression #binaryMultiplicativeExpression
|
|
||||||
| expression (PLUS | MINUS) expression #binaryAdditiveExpression
|
|
||||||
// Comparison operations
|
|
||||||
| expression (EQUAL | NOT_EQUAL | LESS_GREATER | GREATER_OR_EQUAL | LESS_OR_EQUAL | LESS | GREATER) expression #binaryComparisonExpression
|
|
||||||
// Logic operations
|
|
||||||
|<assoc=right> expression (AND | OR | XOR) expression #binaryLogicExpression
|
|
||||||
// Subexpressions and atoms
|
|
||||||
| LR_BRACKET expression RR_BRACKET #subExpression
|
|
||||||
| atom #atomExpression
|
|
||||||
;
|
|
||||||
|
|
||||||
atom
|
|
||||||
: booleanLiteral #booleanAtom
|
|
||||||
| integerLiteral #integerAtom
|
|
||||||
| stringLiteral #stringAtom
|
|
||||||
| identifier #identifierAtom
|
|
||||||
;
|
|
||||||
|
|
||||||
// Identifiers
|
|
||||||
|
|
||||||
identifier
|
|
||||||
: (IDENTIFIER | IDENTIFIER_WITH_NUMBER)
|
|
||||||
;
|
|
||||||
functionIdentifier
|
|
||||||
: (IDENTIFIER | FUNCTION_IDENTIFIER_WITH_UNDERSCORE)
|
|
||||||
;
|
|
||||||
|
|
||||||
// Literals
|
|
||||||
|
|
||||||
booleanLiteral: (TRUE | FALSE);
|
|
||||||
stringLiteral: (DQUOTED_STRING_LITERAL | SQUOTED_STRING_LITERAL);
|
|
||||||
integerLiteral: INTEGER_LITERAL;
|
|
||||||
|
|
||||||
// Functions
|
|
||||||
|
|
||||||
functionParameterList
|
|
||||||
: LR_BRACKET ( expression ( COMMA expression )* )? RR_BRACKET
|
|
||||||
;
|
|
||||||
|
|
||||||
// Sets
|
|
||||||
|
|
||||||
setExpression
|
|
||||||
: LR_BRACKET expression ( COMMA expression )* RR_BRACKET // Empty sets are not allowed
|
|
||||||
;
|
|
||||||
|
|
@ -1,201 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
# CloudEvents Expression Language Go implementation
|
|
||||||
|
|
||||||
CloudEvents Expression Language implementation.
|
|
||||||
|
|
||||||
Note: this package is a work in progress, APIs might break in future releases.
|
|
||||||
|
|
||||||
## User guide
|
|
||||||
|
|
||||||
To start using it:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
|
|
||||||
|
|
||||||
// Parse the expression
|
|
||||||
expression, err := cesqlparser.Parse("subject = 'Hello world'")
|
|
||||||
|
|
||||||
// Res can be either int32, bool or string
|
|
||||||
res, err := expression.Evaluate(event)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Development guide
|
|
||||||
|
|
||||||
To regenerate the parser, make sure you have [ANTLR4 installed](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md) and then run:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
antlr4 -Dlanguage=Go -package gen -o gen -visitor -no-listener CESQLParser.g4
|
|
||||||
```
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v2
|
|
||||||
|
|
||||||
import cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
|
|
||||||
// Expression represents a parsed CloudEvents SQL Expression.
|
|
||||||
type Expression interface {
|
|
||||||
|
|
||||||
// Evaluate the expression using the provided input type.
|
|
||||||
// The return value can be either int32, bool or string.
|
|
||||||
// The evaluation fails as soon as an error arises.
|
|
||||||
Evaluate(event cloudevents.Event) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
|
|
||||||
type baseUnaryExpression struct {
|
|
||||||
child cesql.Expression
|
|
||||||
}
|
|
||||||
|
|
||||||
type baseBinaryExpression struct {
|
|
||||||
left cesql.Expression
|
|
||||||
right cesql.Expression
|
|
||||||
}
|
|
||||||
56
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/comparison_expressions.go
generated
vendored
56
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/comparison_expressions.go
generated
vendored
|
|
@ -1,56 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type equalExpression struct {
|
|
||||||
baseBinaryExpression
|
|
||||||
equal bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s equalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
leftVal, err := s.left.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err := s.right.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
leftVal, err = utils.Cast(leftVal, cesql.TypeFromVal(rightVal))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return (leftVal == rightVal) == s.equal, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return equalExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
equal: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewNotEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return equalExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
equal: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type existsExpression struct {
|
|
||||||
identifier string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l existsExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
return utils.ContainsAttribute(event, l.identifier), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewExistsExpression(identifier string) cesql.Expression {
|
|
||||||
return existsExpression{identifier: identifier}
|
|
||||||
}
|
|
||||||
57
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/function_invocation_expression.go
generated
vendored
57
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/function_invocation_expression.go
generated
vendored
|
|
@ -1,57 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/runtime"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type functionInvocationExpression struct {
|
|
||||||
name string
|
|
||||||
argumentsExpression []cesql.Expression
|
|
||||||
}
|
|
||||||
|
|
||||||
func (expr functionInvocationExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
fn := runtime.ResolveFunction(expr.name, len(expr.argumentsExpression))
|
|
||||||
if fn == nil {
|
|
||||||
return nil, fmt.Errorf("cannot resolve function %s", expr.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
args := make([]interface{}, len(expr.argumentsExpression))
|
|
||||||
|
|
||||||
for i, expr := range expr.argumentsExpression {
|
|
||||||
arg, err := expr.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
argType := fn.ArgType(i)
|
|
||||||
if argType == nil {
|
|
||||||
return nil, fmt.Errorf("cannot resolve arg type at index %d", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
arg, err = utils.Cast(arg, *argType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
args[i] = arg
|
|
||||||
}
|
|
||||||
|
|
||||||
return fn.Run(event, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFunctionInvocationExpression(name string, argumentsExpression []cesql.Expression) cesql.Expression {
|
|
||||||
return functionInvocationExpression{
|
|
||||||
name: name,
|
|
||||||
argumentsExpression: argumentsExpression,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
31
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/identifier_expression.go
generated
vendored
31
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/identifier_expression.go
generated
vendored
|
|
@ -1,31 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type identifierExpression struct {
|
|
||||||
identifier string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l identifierExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
value := utils.GetAttribute(event, l.identifier)
|
|
||||||
if value == nil {
|
|
||||||
return nil, fmt.Errorf("missing attribute '%s'", l.identifier)
|
|
||||||
}
|
|
||||||
|
|
||||||
return value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewIdentifierExpression(identifier string) cesql.Expression {
|
|
||||||
return identifierExpression{identifier: identifier}
|
|
||||||
}
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type inExpression struct {
|
|
||||||
leftExpression cesql.Expression
|
|
||||||
setExpression []cesql.Expression
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l inExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
leftValue, err := l.leftExpression.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, rightExpression := range l.setExpression {
|
|
||||||
rightValue, err := rightExpression.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightValue, err = utils.Cast(rightValue, cesql.TypeFromVal(leftValue))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if leftValue == rightValue {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInExpression(leftExpression cesql.Expression, setExpression []cesql.Expression) cesql.Expression {
|
|
||||||
return inExpression{leftExpression, setExpression}
|
|
||||||
}
|
|
||||||
89
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/integer_comparison_expressions.go
generated
vendored
89
vendor/github.com/cloudevents/sdk-go/sql/v2/expression/integer_comparison_expressions.go
generated
vendored
|
|
@ -1,89 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type integerComparisonExpression struct {
|
|
||||||
baseBinaryExpression
|
|
||||||
fn func(x, y int32) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s integerComparisonExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
leftVal, err := s.left.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err := s.right.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.fn(leftVal.(int32), rightVal.(int32)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLessExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return integerComparisonExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) bool {
|
|
||||||
return x < y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLessOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return integerComparisonExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) bool {
|
|
||||||
return x <= y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGreaterExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return integerComparisonExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) bool {
|
|
||||||
return x > y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewGreaterOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return integerComparisonExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) bool {
|
|
||||||
return x >= y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,96 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type likeExpression struct {
|
|
||||||
baseUnaryExpression
|
|
||||||
pattern *regexp.Regexp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l likeExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
val, err := l.child.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err = utils.Cast(val, cesql.StringType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.pattern.MatchString(val.(string)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLikeExpression(child cesql.Expression, pattern string) (cesql.Expression, error) {
|
|
||||||
// Converting to regex is not the most performant impl, but it works
|
|
||||||
p, err := convertLikePatternToRegex(pattern)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return likeExpression{
|
|
||||||
baseUnaryExpression: baseUnaryExpression{
|
|
||||||
child: child,
|
|
||||||
},
|
|
||||||
pattern: p,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertLikePatternToRegex(pattern string) (*regexp.Regexp, error) {
|
|
||||||
var chunks []string
|
|
||||||
chunks = append(chunks, "^")
|
|
||||||
|
|
||||||
var chunk strings.Builder
|
|
||||||
|
|
||||||
for i := 0; i < len(pattern); i++ {
|
|
||||||
if pattern[i] == '\\' && i < len(pattern)-1 {
|
|
||||||
if pattern[i+1] == '%' {
|
|
||||||
// \% case
|
|
||||||
chunk.WriteRune('%')
|
|
||||||
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
|
|
||||||
chunk.Reset()
|
|
||||||
i++
|
|
||||||
continue
|
|
||||||
} else if pattern[i+1] == '_' {
|
|
||||||
// \_ case
|
|
||||||
chunk.WriteRune('_')
|
|
||||||
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
|
|
||||||
chunk.Reset()
|
|
||||||
i++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if pattern[i] == '_' {
|
|
||||||
// replace with .
|
|
||||||
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
|
|
||||||
chunk.Reset()
|
|
||||||
chunks = append(chunks, ".")
|
|
||||||
} else if pattern[i] == '%' {
|
|
||||||
// replace with .*
|
|
||||||
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
|
|
||||||
chunk.Reset()
|
|
||||||
chunks = append(chunks, ".*")
|
|
||||||
} else {
|
|
||||||
chunk.WriteByte(pattern[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if chunk.Len() != 0 {
|
|
||||||
chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks = append(chunks, "$")
|
|
||||||
|
|
||||||
return regexp.Compile(strings.Join(chunks, ""))
|
|
||||||
}
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type literalExpression struct {
|
|
||||||
value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l literalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
return l.value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLiteralExpression(value interface{}) cesql.Expression {
|
|
||||||
return literalExpression{value: value}
|
|
||||||
}
|
|
||||||
|
|
@ -1,77 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type logicExpression struct {
|
|
||||||
baseBinaryExpression
|
|
||||||
fn func(x, y bool) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s logicExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
leftVal, err := s.left.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err := s.right.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
leftVal, err = utils.Cast(leftVal, cesql.BooleanType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err = utils.Cast(rightVal, cesql.BooleanType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.fn(leftVal.(bool), rightVal.(bool)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAndExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return logicExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y bool) bool {
|
|
||||||
return x && y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOrExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return logicExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y bool) bool {
|
|
||||||
return x || y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewXorExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return logicExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y bool) bool {
|
|
||||||
return x != y
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,109 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mathExpression struct {
|
|
||||||
baseBinaryExpression
|
|
||||||
fn func(x, y int32) (int32, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s mathExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
leftVal, err := s.left.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err := s.right.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.fn(leftVal.(int32), rightVal.(int32))
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSumExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return mathExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) (int32, error) {
|
|
||||||
return x + y, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDifferenceExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return mathExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) (int32, error) {
|
|
||||||
return x - y, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMultiplicationExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return mathExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) (int32, error) {
|
|
||||||
return x * y, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewModuleExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return mathExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) (int32, error) {
|
|
||||||
if y == 0 {
|
|
||||||
return 0, errors.New("math error: division by zero")
|
|
||||||
}
|
|
||||||
return x % y, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDivisionExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
|
|
||||||
return mathExpression{
|
|
||||||
baseBinaryExpression: baseBinaryExpression{
|
|
||||||
left: left,
|
|
||||||
right: right,
|
|
||||||
},
|
|
||||||
fn: func(x, y int32) (int32, error) {
|
|
||||||
if y == 0 {
|
|
||||||
return 0, errors.New("math error: division by zero")
|
|
||||||
}
|
|
||||||
return x / y, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type negateExpression baseUnaryExpression
|
|
||||||
|
|
||||||
func (l negateExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
val, err := l.child.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err = utils.Cast(val, cesql.IntegerType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return -(val.(int32)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewNegateExpression(child cesql.Expression) cesql.Expression {
|
|
||||||
return negateExpression{child: child}
|
|
||||||
}
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package expression
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type notExpression baseUnaryExpression
|
|
||||||
|
|
||||||
func (l notExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
|
|
||||||
val, err := l.child.Evaluate(event)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err = utils.Cast(val, cesql.BooleanType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return !(val.(bool)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewNotExpression(child cesql.Expression) cesql.Expression {
|
|
||||||
return notExpression{child: child}
|
|
||||||
}
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v2
|
|
||||||
|
|
||||||
import cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
|
|
||||||
type Function interface {
|
|
||||||
Name() string
|
|
||||||
Arity() int
|
|
||||||
IsVariadic() bool
|
|
||||||
ArgType(index int) *Type
|
|
||||||
|
|
||||||
Run(event cloudevents.Event, arguments []interface{}) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
@ -1,57 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package function
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/utils"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var IntFunction function = function{
|
|
||||||
name: "INT",
|
|
||||||
fixedArgs: []cesql.Type{cesql.AnyType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return utils.Cast(i[0], cesql.IntegerType)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var BoolFunction function = function{
|
|
||||||
name: "BOOL",
|
|
||||||
fixedArgs: []cesql.Type{cesql.AnyType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return utils.Cast(i[0], cesql.BooleanType)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var StringFunction function = function{
|
|
||||||
name: "STRING",
|
|
||||||
fixedArgs: []cesql.Type{cesql.AnyType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return utils.Cast(i[0], cesql.StringType)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var IsIntFunction function = function{
|
|
||||||
name: "IS_INT",
|
|
||||||
fixedArgs: []cesql.Type{cesql.AnyType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return utils.CanCast(i[0], cesql.IntegerType), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var IsBoolFunction function = function{
|
|
||||||
name: "IS_BOOL",
|
|
||||||
fixedArgs: []cesql.Type{cesql.AnyType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return utils.CanCast(i[0], cesql.BooleanType), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package function
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type function struct {
|
|
||||||
name string
|
|
||||||
fixedArgs []cesql.Type
|
|
||||||
variadicArgs *cesql.Type
|
|
||||||
fn func(cloudevents.Event, []interface{}) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f function) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f function) Arity() int {
|
|
||||||
return len(f.fixedArgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f function) IsVariadic() bool {
|
|
||||||
return f.variadicArgs != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f function) ArgType(index int) *cesql.Type {
|
|
||||||
if index < len(f.fixedArgs) {
|
|
||||||
return &f.fixedArgs[index]
|
|
||||||
}
|
|
||||||
return f.variadicArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f function) Run(event cloudevents.Event, arguments []interface{}) (interface{}, error) {
|
|
||||||
return f.fn(event, arguments)
|
|
||||||
}
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package function
|
|
||||||
|
|
||||||
import (
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var AbsFunction function = function{
|
|
||||||
name: "ABS",
|
|
||||||
fixedArgs: []cesql.Type{cesql.IntegerType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
x := i[0].(int32)
|
|
||||||
if x < 0 {
|
|
||||||
return -x, nil
|
|
||||||
}
|
|
||||||
return x, nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
@ -1,177 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package function
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var LengthFunction function = function{
|
|
||||||
name: "LENGTH",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType},
|
|
||||||
variadicArgs: nil,
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return int32(len(i[0].(string))), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var ConcatFunction function = function{
|
|
||||||
name: "CONCAT",
|
|
||||||
variadicArgs: cesql.TypePtr(cesql.StringType),
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
var sb strings.Builder
|
|
||||||
for _, v := range i {
|
|
||||||
sb.WriteString(v.(string))
|
|
||||||
}
|
|
||||||
return sb.String(), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var ConcatWSFunction function = function{
|
|
||||||
name: "CONCAT_WS",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType},
|
|
||||||
variadicArgs: cesql.TypePtr(cesql.StringType),
|
|
||||||
fn: func(event cloudevents.Event, args []interface{}) (interface{}, error) {
|
|
||||||
if len(args) == 1 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
separator := args[0].(string)
|
|
||||||
|
|
||||||
var sb strings.Builder
|
|
||||||
for i := 1; i < len(args)-1; i++ {
|
|
||||||
sb.WriteString(args[i].(string))
|
|
||||||
sb.WriteString(separator)
|
|
||||||
}
|
|
||||||
sb.WriteString(args[len(args)-1].(string))
|
|
||||||
return sb.String(), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var LowerFunction function = function{
|
|
||||||
name: "LOWER",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return strings.ToLower(i[0].(string)), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var UpperFunction function = function{
|
|
||||||
name: "UPPER",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return strings.ToUpper(i[0].(string)), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var TrimFunction function = function{
|
|
||||||
name: "TRIM",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
return strings.TrimSpace(i[0].(string)), nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var LeftFunction function = function{
|
|
||||||
name: "LEFT",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
str := i[0].(string)
|
|
||||||
y := int(i[1].(int32))
|
|
||||||
|
|
||||||
if y > len(str) {
|
|
||||||
return str, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if y < 0 {
|
|
||||||
return nil, fmt.Errorf("LEFT y argument is < 0: %d", y)
|
|
||||||
}
|
|
||||||
|
|
||||||
return str[0:y], nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var RightFunction function = function{
|
|
||||||
name: "RIGHT",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
str := i[0].(string)
|
|
||||||
y := int(i[1].(int32))
|
|
||||||
|
|
||||||
if y > len(str) {
|
|
||||||
return str, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if y < 0 {
|
|
||||||
return nil, fmt.Errorf("RIGHT y argument is < 0: %d", y)
|
|
||||||
}
|
|
||||||
|
|
||||||
return str[len(str)-y:], nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var SubstringFunction function = function{
|
|
||||||
name: "SUBSTRING",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
str := i[0].(string)
|
|
||||||
pos := int(i[1].(int32))
|
|
||||||
|
|
||||||
if pos == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if pos < -len(str) || pos > len(str) {
|
|
||||||
return "", fmt.Errorf("SUBSTRING invalid pos argument: %d", pos)
|
|
||||||
}
|
|
||||||
|
|
||||||
var beginning int
|
|
||||||
if pos < 0 {
|
|
||||||
beginning = len(str) + pos
|
|
||||||
} else {
|
|
||||||
beginning = pos - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return str[beginning:], nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var SubstringWithLengthFunction function = function{
|
|
||||||
name: "SUBSTRING",
|
|
||||||
fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType, cesql.IntegerType},
|
|
||||||
fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
|
|
||||||
str := i[0].(string)
|
|
||||||
pos := int(i[1].(int32))
|
|
||||||
length := int(i[2].(int32))
|
|
||||||
|
|
||||||
if pos == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if pos < -len(str) || pos > len(str) {
|
|
||||||
return "", fmt.Errorf("SUBSTRING invalid pos argument: %d", pos)
|
|
||||||
}
|
|
||||||
|
|
||||||
var beginning int
|
|
||||||
if pos < 0 {
|
|
||||||
beginning = len(str) + pos
|
|
||||||
} else {
|
|
||||||
beginning = pos - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
var end int
|
|
||||||
if beginning+length > len(str) {
|
|
||||||
end = len(str)
|
|
||||||
} else {
|
|
||||||
end = beginning + length
|
|
||||||
}
|
|
||||||
|
|
||||||
return str[beginning:end], nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
@ -1,87 +0,0 @@
|
||||||
token literal names:
|
|
||||||
null
|
|
||||||
null
|
|
||||||
'('
|
|
||||||
')'
|
|
||||||
','
|
|
||||||
'\''
|
|
||||||
'"'
|
|
||||||
'AND'
|
|
||||||
'OR'
|
|
||||||
'XOR'
|
|
||||||
'NOT'
|
|
||||||
'*'
|
|
||||||
'/'
|
|
||||||
'%'
|
|
||||||
'+'
|
|
||||||
'-'
|
|
||||||
'='
|
|
||||||
'!='
|
|
||||||
'>'
|
|
||||||
'>='
|
|
||||||
'<'
|
|
||||||
'<>'
|
|
||||||
'<='
|
|
||||||
'LIKE'
|
|
||||||
'EXISTS'
|
|
||||||
'IN'
|
|
||||||
'TRUE'
|
|
||||||
'FALSE'
|
|
||||||
null
|
|
||||||
null
|
|
||||||
null
|
|
||||||
null
|
|
||||||
null
|
|
||||||
null
|
|
||||||
|
|
||||||
token symbolic names:
|
|
||||||
null
|
|
||||||
SPACE
|
|
||||||
LR_BRACKET
|
|
||||||
RR_BRACKET
|
|
||||||
COMMA
|
|
||||||
SINGLE_QUOTE_SYMB
|
|
||||||
DOUBLE_QUOTE_SYMB
|
|
||||||
AND
|
|
||||||
OR
|
|
||||||
XOR
|
|
||||||
NOT
|
|
||||||
STAR
|
|
||||||
DIVIDE
|
|
||||||
MODULE
|
|
||||||
PLUS
|
|
||||||
MINUS
|
|
||||||
EQUAL
|
|
||||||
NOT_EQUAL
|
|
||||||
GREATER
|
|
||||||
GREATER_OR_EQUAL
|
|
||||||
LESS
|
|
||||||
LESS_GREATER
|
|
||||||
LESS_OR_EQUAL
|
|
||||||
LIKE
|
|
||||||
EXISTS
|
|
||||||
IN
|
|
||||||
TRUE
|
|
||||||
FALSE
|
|
||||||
DQUOTED_STRING_LITERAL
|
|
||||||
SQUOTED_STRING_LITERAL
|
|
||||||
INTEGER_LITERAL
|
|
||||||
IDENTIFIER
|
|
||||||
IDENTIFIER_WITH_NUMBER
|
|
||||||
FUNCTION_IDENTIFIER_WITH_UNDERSCORE
|
|
||||||
|
|
||||||
rule names:
|
|
||||||
cesql
|
|
||||||
expression
|
|
||||||
atom
|
|
||||||
identifier
|
|
||||||
functionIdentifier
|
|
||||||
booleanLiteral
|
|
||||||
stringLiteral
|
|
||||||
integerLiteral
|
|
||||||
functionParameterList
|
|
||||||
setExpression
|
|
||||||
|
|
||||||
|
|
||||||
atn:
|
|
||||||
[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 35, 112, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 41, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 57, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 63, 10, 3, 3, 3, 3, 3, 7, 3, 67, 10, 3, 12, 3, 14, 3, 70, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 76, 10, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 7, 10, 92, 10, 10, 12, 10, 14, 10, 95, 11, 10, 5, 10, 97, 10, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 7, 11, 105, 10, 11, 12, 11, 14, 11, 108, 11, 11, 3, 11, 3, 11, 3, 11, 2, 3, 4, 12, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 2, 10, 3, 2, 13, 15, 3, 2, 16, 17, 3, 2, 18, 24, 3, 2, 9, 11, 3, 2, 33, 34, 4, 2, 33, 33, 35, 35, 3, 2, 28, 29, 3, 2, 30, 31, 2, 120, 2, 22, 3, 2, 2, 2, 4, 40, 3, 2, 2, 2, 6, 75, 3, 2, 2, 2, 8, 77, 3, 2, 2, 2, 10, 79, 3, 2, 2, 2, 12, 81, 3, 2, 2, 2, 14, 83, 3, 2, 2, 2, 16, 85, 3, 2, 2, 2, 18, 87, 3, 2, 2, 2, 20, 100, 3, 2, 2, 2, 22, 23, 5, 4, 3, 2, 23, 24, 7, 2, 2, 3, 24, 3, 3, 2, 2, 2, 25, 26, 8, 3, 1, 2, 26, 27, 5, 10, 6, 2, 27, 28, 5, 18, 10, 2, 28, 41, 3, 2, 2, 2, 29, 30, 7, 12, 2, 2, 30, 41, 5, 4, 3, 13, 31, 32, 7, 17, 2, 2, 32, 41, 5, 4, 3, 12, 33, 34, 7, 26, 2, 2, 34, 41, 5, 8, 5, 2, 35, 36, 7, 4, 2, 2, 36, 37, 5, 4, 3, 2, 37, 38, 7, 5, 2, 2, 38, 41, 3, 2, 2, 2, 39, 41, 5, 6, 4, 2, 40, 25, 3, 2, 2, 2, 40, 29, 3, 2, 2, 2, 40, 31, 3, 2, 2, 2, 40, 33, 3, 2, 2, 2, 40, 35, 3, 2, 2, 2, 40, 39, 3, 2, 2, 2, 41, 68, 3, 2, 2, 2, 42, 43, 12, 8, 2, 2, 43, 44, 9, 2, 2, 2, 44, 67, 5, 4, 3, 9, 45, 46, 12, 7, 2, 2, 46, 47, 9, 3, 2, 2, 47, 67, 5, 4, 3, 8, 48, 49, 12, 6, 2, 2, 49, 50, 9, 4, 2, 2, 50, 67, 5, 4, 3, 7, 51, 52, 12, 5, 2, 2, 52, 53, 9, 5, 2, 2, 53, 67, 5, 4, 3, 5, 54, 56, 12, 11, 2, 2, 55, 57, 7, 12, 2, 2, 56, 55, 3, 2, 2, 2, 56, 57, 3, 2, 2, 2, 57, 58, 3, 2, 2, 2, 58, 59, 7, 25, 2, 2, 59, 67, 5, 14, 8, 2, 60, 62, 12, 9, 2, 2, 61, 63, 7, 12, 2, 2, 62, 61, 3, 2, 2, 2, 62, 63, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 65, 7, 27, 2, 2, 65, 67, 5, 20, 11, 2, 66, 42, 3, 2, 2, 2, 66, 45, 3, 2, 2, 2, 66, 48, 3, 2, 2, 2, 66, 51, 3, 2, 2, 2, 66, 54, 3, 2, 2, 2, 66, 60, 3, 2, 2, 2, 67, 70, 3, 2, 2, 2, 68, 66, 3, 2, 2, 2, 68, 69, 3, 2, 2, 2, 69, 5, 3, 2, 2, 2, 70, 68, 3, 2, 2, 2, 71, 76, 5, 12, 7, 2, 72, 76, 5, 16, 9, 2, 73, 76, 5, 14, 8, 2, 74, 76, 5, 8, 5, 2, 75, 71, 3, 2, 2, 2, 75, 72, 3, 2, 2, 2, 75, 73, 3, 2, 2, 2, 75, 74, 3, 2, 2, 2, 76, 7, 3, 2, 2, 2, 77, 78, 9, 6, 2, 2, 78, 9, 3, 2, 2, 2, 79, 80, 9, 7, 2, 2, 80, 11, 3, 2, 2, 2, 81, 82, 9, 8, 2, 2, 82, 13, 3, 2, 2, 2, 83, 84, 9, 9, 2, 2, 84, 15, 3, 2, 2, 2, 85, 86, 7, 32, 2, 2, 86, 17, 3, 2, 2, 2, 87, 96, 7, 4, 2, 2, 88, 93, 5, 4, 3, 2, 89, 90, 7, 6, 2, 2, 90, 92, 5, 4, 3, 2, 91, 89, 3, 2, 2, 2, 92, 95, 3, 2, 2, 2, 93, 91, 3, 2, 2, 2, 93, 94, 3, 2, 2, 2, 94, 97, 3, 2, 2, 2, 95, 93, 3, 2, 2, 2, 96, 88, 3, 2, 2, 2, 96, 97, 3, 2, 2, 2, 97, 98, 3, 2, 2, 2, 98, 99, 7, 5, 2, 2, 99, 19, 3, 2, 2, 2, 100, 101, 7, 4, 2, 2, 101, 106, 5, 4, 3, 2, 102, 103, 7, 6, 2, 2, 103, 105, 5, 4, 3, 2, 104, 102, 3, 2, 2, 2, 105, 108, 3, 2, 2, 2, 106, 104, 3, 2, 2, 2, 106, 107, 3, 2, 2, 2, 107, 109, 3, 2, 2, 2, 108, 106, 3, 2, 2, 2, 109, 110, 7, 5, 2, 2, 110, 21, 3, 2, 2, 2, 11, 40, 56, 62, 66, 68, 75, 93, 96, 106]
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
||||||
SPACE=1
|
|
||||||
LR_BRACKET=2
|
|
||||||
RR_BRACKET=3
|
|
||||||
COMMA=4
|
|
||||||
SINGLE_QUOTE_SYMB=5
|
|
||||||
DOUBLE_QUOTE_SYMB=6
|
|
||||||
AND=7
|
|
||||||
OR=8
|
|
||||||
XOR=9
|
|
||||||
NOT=10
|
|
||||||
STAR=11
|
|
||||||
DIVIDE=12
|
|
||||||
MODULE=13
|
|
||||||
PLUS=14
|
|
||||||
MINUS=15
|
|
||||||
EQUAL=16
|
|
||||||
NOT_EQUAL=17
|
|
||||||
GREATER=18
|
|
||||||
GREATER_OR_EQUAL=19
|
|
||||||
LESS=20
|
|
||||||
LESS_GREATER=21
|
|
||||||
LESS_OR_EQUAL=22
|
|
||||||
LIKE=23
|
|
||||||
EXISTS=24
|
|
||||||
IN=25
|
|
||||||
TRUE=26
|
|
||||||
FALSE=27
|
|
||||||
DQUOTED_STRING_LITERAL=28
|
|
||||||
SQUOTED_STRING_LITERAL=29
|
|
||||||
INTEGER_LITERAL=30
|
|
||||||
IDENTIFIER=31
|
|
||||||
IDENTIFIER_WITH_NUMBER=32
|
|
||||||
FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
|
|
||||||
'('=2
|
|
||||||
')'=3
|
|
||||||
','=4
|
|
||||||
'\''=5
|
|
||||||
'"'=6
|
|
||||||
'AND'=7
|
|
||||||
'OR'=8
|
|
||||||
'XOR'=9
|
|
||||||
'NOT'=10
|
|
||||||
'*'=11
|
|
||||||
'/'=12
|
|
||||||
'%'=13
|
|
||||||
'+'=14
|
|
||||||
'-'=15
|
|
||||||
'='=16
|
|
||||||
'!='=17
|
|
||||||
'>'=18
|
|
||||||
'>='=19
|
|
||||||
'<'=20
|
|
||||||
'<>'=21
|
|
||||||
'<='=22
|
|
||||||
'LIKE'=23
|
|
||||||
'EXISTS'=24
|
|
||||||
'IN'=25
|
|
||||||
'TRUE'=26
|
|
||||||
'FALSE'=27
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -1,59 +0,0 @@
|
||||||
SPACE=1
|
|
||||||
LR_BRACKET=2
|
|
||||||
RR_BRACKET=3
|
|
||||||
COMMA=4
|
|
||||||
SINGLE_QUOTE_SYMB=5
|
|
||||||
DOUBLE_QUOTE_SYMB=6
|
|
||||||
AND=7
|
|
||||||
OR=8
|
|
||||||
XOR=9
|
|
||||||
NOT=10
|
|
||||||
STAR=11
|
|
||||||
DIVIDE=12
|
|
||||||
MODULE=13
|
|
||||||
PLUS=14
|
|
||||||
MINUS=15
|
|
||||||
EQUAL=16
|
|
||||||
NOT_EQUAL=17
|
|
||||||
GREATER=18
|
|
||||||
GREATER_OR_EQUAL=19
|
|
||||||
LESS=20
|
|
||||||
LESS_GREATER=21
|
|
||||||
LESS_OR_EQUAL=22
|
|
||||||
LIKE=23
|
|
||||||
EXISTS=24
|
|
||||||
IN=25
|
|
||||||
TRUE=26
|
|
||||||
FALSE=27
|
|
||||||
DQUOTED_STRING_LITERAL=28
|
|
||||||
SQUOTED_STRING_LITERAL=29
|
|
||||||
INTEGER_LITERAL=30
|
|
||||||
IDENTIFIER=31
|
|
||||||
IDENTIFIER_WITH_NUMBER=32
|
|
||||||
FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
|
|
||||||
'('=2
|
|
||||||
')'=3
|
|
||||||
','=4
|
|
||||||
'\''=5
|
|
||||||
'"'=6
|
|
||||||
'AND'=7
|
|
||||||
'OR'=8
|
|
||||||
'XOR'=9
|
|
||||||
'NOT'=10
|
|
||||||
'*'=11
|
|
||||||
'/'=12
|
|
||||||
'%'=13
|
|
||||||
'+'=14
|
|
||||||
'-'=15
|
|
||||||
'='=16
|
|
||||||
'!='=17
|
|
||||||
'>'=18
|
|
||||||
'>='=19
|
|
||||||
'<'=20
|
|
||||||
'<>'=21
|
|
||||||
'<='=22
|
|
||||||
'LIKE'=23
|
|
||||||
'EXISTS'=24
|
|
||||||
'IN'=25
|
|
||||||
'TRUE'=26
|
|
||||||
'FALSE'=27
|
|
||||||
|
|
@ -1,109 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
|
|
||||||
|
|
||||||
package gen // CESQLParser
|
|
||||||
import "github.com/antlr/antlr4/runtime/Go/antlr"
|
|
||||||
|
|
||||||
type BaseCESQLParserVisitor struct {
|
|
||||||
*antlr.BaseParseTreeVisitor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitCesql(ctx *CesqlContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitInExpression(ctx *InExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitBinaryComparisonExpression(ctx *BinaryComparisonExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitAtomExpression(ctx *AtomExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitExistsExpression(ctx *ExistsExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitBinaryLogicExpression(ctx *BinaryLogicExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitLikeExpression(ctx *LikeExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitFunctionInvocationExpression(ctx *FunctionInvocationExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitBinaryMultiplicativeExpression(ctx *BinaryMultiplicativeExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitUnaryLogicExpression(ctx *UnaryLogicExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitUnaryNumericExpression(ctx *UnaryNumericExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitSubExpression(ctx *SubExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitBinaryAdditiveExpression(ctx *BinaryAdditiveExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitBooleanAtom(ctx *BooleanAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitIntegerAtom(ctx *IntegerAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitStringAtom(ctx *StringAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitIdentifierAtom(ctx *IdentifierAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitIdentifier(ctx *IdentifierContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitFunctionIdentifier(ctx *FunctionIdentifierContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitStringLiteral(ctx *StringLiteralContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitIntegerLiteral(ctx *IntegerLiteralContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitFunctionParameterList(ctx *FunctionParameterListContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BaseCESQLParserVisitor) VisitSetExpression(ctx *SetExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
@ -1,231 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
|
|
||||||
|
|
||||||
package gen
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"github.com/antlr/antlr4/runtime/Go/antlr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Suppress unused import error
|
|
||||||
var _ = fmt.Printf
|
|
||||||
var _ = unicode.IsLetter
|
|
||||||
|
|
||||||
var serializedLexerAtn = []uint16{
|
|
||||||
3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 237,
|
|
||||||
8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7,
|
|
||||||
9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12,
|
|
||||||
4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4,
|
|
||||||
18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23,
|
|
||||||
9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9,
|
|
||||||
28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33,
|
|
||||||
4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4,
|
|
||||||
39, 9, 39, 4, 40, 9, 40, 3, 2, 6, 2, 83, 10, 2, 13, 2, 14, 2, 84, 3, 2,
|
|
||||||
3, 2, 3, 3, 6, 3, 90, 10, 3, 13, 3, 14, 3, 91, 3, 4, 3, 4, 3, 4, 3, 4,
|
|
||||||
3, 4, 3, 4, 7, 4, 100, 10, 4, 12, 4, 14, 4, 103, 11, 4, 3, 4, 3, 4, 3,
|
|
||||||
5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 113, 10, 5, 12, 5, 14, 5, 116, 11,
|
|
||||||
5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 7, 7, 124, 10, 7, 12, 7, 14, 7,
|
|
||||||
127, 11, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12,
|
|
||||||
3, 12, 3, 13, 3, 13, 5, 13, 141, 10, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3,
|
|
||||||
15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17,
|
|
||||||
3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3,
|
|
||||||
23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 27,
|
|
||||||
3, 27, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3,
|
|
||||||
30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32,
|
|
||||||
3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3,
|
|
||||||
34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 37, 6, 37, 217, 10, 37, 13, 37,
|
|
||||||
14, 37, 218, 3, 38, 6, 38, 222, 10, 38, 13, 38, 14, 38, 223, 3, 39, 6,
|
|
||||||
39, 227, 10, 39, 13, 39, 14, 39, 228, 3, 40, 3, 40, 7, 40, 233, 10, 40,
|
|
||||||
12, 40, 14, 40, 236, 11, 40, 2, 2, 41, 3, 3, 5, 2, 7, 2, 9, 2, 11, 2, 13,
|
|
||||||
2, 15, 4, 17, 5, 19, 6, 21, 7, 23, 8, 25, 2, 27, 9, 29, 10, 31, 11, 33,
|
|
||||||
12, 35, 13, 37, 14, 39, 15, 41, 16, 43, 17, 45, 18, 47, 19, 49, 20, 51,
|
|
||||||
21, 53, 22, 55, 23, 57, 24, 59, 25, 61, 26, 63, 27, 65, 28, 67, 29, 69,
|
|
||||||
30, 71, 31, 73, 32, 75, 33, 77, 34, 79, 35, 3, 2, 10, 5, 2, 11, 12, 15,
|
|
||||||
15, 34, 34, 5, 2, 50, 59, 67, 92, 99, 124, 4, 2, 36, 36, 94, 94, 4, 2,
|
|
||||||
41, 41, 94, 94, 3, 2, 50, 59, 3, 2, 67, 92, 4, 2, 67, 92, 97, 97, 4, 2,
|
|
||||||
67, 92, 99, 124, 2, 244, 2, 3, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3,
|
|
||||||
2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 27,
|
|
||||||
3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2,
|
|
||||||
35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2,
|
|
||||||
2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2,
|
|
||||||
2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2,
|
|
||||||
2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3,
|
|
||||||
2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73,
|
|
||||||
3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 3,
|
|
||||||
82, 3, 2, 2, 2, 5, 89, 3, 2, 2, 2, 7, 93, 3, 2, 2, 2, 9, 106, 3, 2, 2,
|
|
||||||
2, 11, 119, 3, 2, 2, 2, 13, 121, 3, 2, 2, 2, 15, 128, 3, 2, 2, 2, 17, 130,
|
|
||||||
3, 2, 2, 2, 19, 132, 3, 2, 2, 2, 21, 134, 3, 2, 2, 2, 23, 136, 3, 2, 2,
|
|
||||||
2, 25, 140, 3, 2, 2, 2, 27, 142, 3, 2, 2, 2, 29, 146, 3, 2, 2, 2, 31, 149,
|
|
||||||
3, 2, 2, 2, 33, 153, 3, 2, 2, 2, 35, 157, 3, 2, 2, 2, 37, 159, 3, 2, 2,
|
|
||||||
2, 39, 161, 3, 2, 2, 2, 41, 163, 3, 2, 2, 2, 43, 165, 3, 2, 2, 2, 45, 167,
|
|
||||||
3, 2, 2, 2, 47, 169, 3, 2, 2, 2, 49, 172, 3, 2, 2, 2, 51, 174, 3, 2, 2,
|
|
||||||
2, 53, 177, 3, 2, 2, 2, 55, 179, 3, 2, 2, 2, 57, 182, 3, 2, 2, 2, 59, 185,
|
|
||||||
3, 2, 2, 2, 61, 190, 3, 2, 2, 2, 63, 197, 3, 2, 2, 2, 65, 200, 3, 2, 2,
|
|
||||||
2, 67, 205, 3, 2, 2, 2, 69, 211, 3, 2, 2, 2, 71, 213, 3, 2, 2, 2, 73, 216,
|
|
||||||
3, 2, 2, 2, 75, 221, 3, 2, 2, 2, 77, 226, 3, 2, 2, 2, 79, 230, 3, 2, 2,
|
|
||||||
2, 81, 83, 9, 2, 2, 2, 82, 81, 3, 2, 2, 2, 83, 84, 3, 2, 2, 2, 84, 82,
|
|
||||||
3, 2, 2, 2, 84, 85, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 87, 8, 2, 2, 2,
|
|
||||||
87, 4, 3, 2, 2, 2, 88, 90, 9, 3, 2, 2, 89, 88, 3, 2, 2, 2, 90, 91, 3, 2,
|
|
||||||
2, 2, 91, 89, 3, 2, 2, 2, 91, 92, 3, 2, 2, 2, 92, 6, 3, 2, 2, 2, 93, 101,
|
|
||||||
7, 36, 2, 2, 94, 95, 7, 94, 2, 2, 95, 100, 11, 2, 2, 2, 96, 97, 7, 36,
|
|
||||||
2, 2, 97, 100, 7, 36, 2, 2, 98, 100, 10, 4, 2, 2, 99, 94, 3, 2, 2, 2, 99,
|
|
||||||
96, 3, 2, 2, 2, 99, 98, 3, 2, 2, 2, 100, 103, 3, 2, 2, 2, 101, 99, 3, 2,
|
|
||||||
2, 2, 101, 102, 3, 2, 2, 2, 102, 104, 3, 2, 2, 2, 103, 101, 3, 2, 2, 2,
|
|
||||||
104, 105, 7, 36, 2, 2, 105, 8, 3, 2, 2, 2, 106, 114, 7, 41, 2, 2, 107,
|
|
||||||
108, 7, 94, 2, 2, 108, 113, 11, 2, 2, 2, 109, 110, 7, 41, 2, 2, 110, 113,
|
|
||||||
7, 41, 2, 2, 111, 113, 10, 5, 2, 2, 112, 107, 3, 2, 2, 2, 112, 109, 3,
|
|
||||||
2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 116, 3, 2, 2, 2, 114, 112, 3, 2, 2,
|
|
||||||
2, 114, 115, 3, 2, 2, 2, 115, 117, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117,
|
|
||||||
118, 7, 41, 2, 2, 118, 10, 3, 2, 2, 2, 119, 120, 9, 6, 2, 2, 120, 12, 3,
|
|
||||||
2, 2, 2, 121, 125, 9, 7, 2, 2, 122, 124, 9, 8, 2, 2, 123, 122, 3, 2, 2,
|
|
||||||
2, 124, 127, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126,
|
|
||||||
14, 3, 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 129, 7, 42, 2, 2, 129, 16, 3,
|
|
||||||
2, 2, 2, 130, 131, 7, 43, 2, 2, 131, 18, 3, 2, 2, 2, 132, 133, 7, 46, 2,
|
|
||||||
2, 133, 20, 3, 2, 2, 2, 134, 135, 7, 41, 2, 2, 135, 22, 3, 2, 2, 2, 136,
|
|
||||||
137, 7, 36, 2, 2, 137, 24, 3, 2, 2, 2, 138, 141, 5, 21, 11, 2, 139, 141,
|
|
||||||
5, 23, 12, 2, 140, 138, 3, 2, 2, 2, 140, 139, 3, 2, 2, 2, 141, 26, 3, 2,
|
|
||||||
2, 2, 142, 143, 7, 67, 2, 2, 143, 144, 7, 80, 2, 2, 144, 145, 7, 70, 2,
|
|
||||||
2, 145, 28, 3, 2, 2, 2, 146, 147, 7, 81, 2, 2, 147, 148, 7, 84, 2, 2, 148,
|
|
||||||
30, 3, 2, 2, 2, 149, 150, 7, 90, 2, 2, 150, 151, 7, 81, 2, 2, 151, 152,
|
|
||||||
7, 84, 2, 2, 152, 32, 3, 2, 2, 2, 153, 154, 7, 80, 2, 2, 154, 155, 7, 81,
|
|
||||||
2, 2, 155, 156, 7, 86, 2, 2, 156, 34, 3, 2, 2, 2, 157, 158, 7, 44, 2, 2,
|
|
||||||
158, 36, 3, 2, 2, 2, 159, 160, 7, 49, 2, 2, 160, 38, 3, 2, 2, 2, 161, 162,
|
|
||||||
7, 39, 2, 2, 162, 40, 3, 2, 2, 2, 163, 164, 7, 45, 2, 2, 164, 42, 3, 2,
|
|
||||||
2, 2, 165, 166, 7, 47, 2, 2, 166, 44, 3, 2, 2, 2, 167, 168, 7, 63, 2, 2,
|
|
||||||
168, 46, 3, 2, 2, 2, 169, 170, 7, 35, 2, 2, 170, 171, 7, 63, 2, 2, 171,
|
|
||||||
48, 3, 2, 2, 2, 172, 173, 7, 64, 2, 2, 173, 50, 3, 2, 2, 2, 174, 175, 7,
|
|
||||||
64, 2, 2, 175, 176, 7, 63, 2, 2, 176, 52, 3, 2, 2, 2, 177, 178, 7, 62,
|
|
||||||
2, 2, 178, 54, 3, 2, 2, 2, 179, 180, 7, 62, 2, 2, 180, 181, 7, 64, 2, 2,
|
|
||||||
181, 56, 3, 2, 2, 2, 182, 183, 7, 62, 2, 2, 183, 184, 7, 63, 2, 2, 184,
|
|
||||||
58, 3, 2, 2, 2, 185, 186, 7, 78, 2, 2, 186, 187, 7, 75, 2, 2, 187, 188,
|
|
||||||
7, 77, 2, 2, 188, 189, 7, 71, 2, 2, 189, 60, 3, 2, 2, 2, 190, 191, 7, 71,
|
|
||||||
2, 2, 191, 192, 7, 90, 2, 2, 192, 193, 7, 75, 2, 2, 193, 194, 7, 85, 2,
|
|
||||||
2, 194, 195, 7, 86, 2, 2, 195, 196, 7, 85, 2, 2, 196, 62, 3, 2, 2, 2, 197,
|
|
||||||
198, 7, 75, 2, 2, 198, 199, 7, 80, 2, 2, 199, 64, 3, 2, 2, 2, 200, 201,
|
|
||||||
7, 86, 2, 2, 201, 202, 7, 84, 2, 2, 202, 203, 7, 87, 2, 2, 203, 204, 7,
|
|
||||||
71, 2, 2, 204, 66, 3, 2, 2, 2, 205, 206, 7, 72, 2, 2, 206, 207, 7, 67,
|
|
||||||
2, 2, 207, 208, 7, 78, 2, 2, 208, 209, 7, 85, 2, 2, 209, 210, 7, 71, 2,
|
|
||||||
2, 210, 68, 3, 2, 2, 2, 211, 212, 5, 7, 4, 2, 212, 70, 3, 2, 2, 2, 213,
|
|
||||||
214, 5, 9, 5, 2, 214, 72, 3, 2, 2, 2, 215, 217, 5, 11, 6, 2, 216, 215,
|
|
||||||
3, 2, 2, 2, 217, 218, 3, 2, 2, 2, 218, 216, 3, 2, 2, 2, 218, 219, 3, 2,
|
|
||||||
2, 2, 219, 74, 3, 2, 2, 2, 220, 222, 9, 9, 2, 2, 221, 220, 3, 2, 2, 2,
|
|
||||||
222, 223, 3, 2, 2, 2, 223, 221, 3, 2, 2, 2, 223, 224, 3, 2, 2, 2, 224,
|
|
||||||
76, 3, 2, 2, 2, 225, 227, 9, 3, 2, 2, 226, 225, 3, 2, 2, 2, 227, 228, 3,
|
|
||||||
2, 2, 2, 228, 226, 3, 2, 2, 2, 228, 229, 3, 2, 2, 2, 229, 78, 3, 2, 2,
|
|
||||||
2, 230, 234, 9, 7, 2, 2, 231, 233, 9, 8, 2, 2, 232, 231, 3, 2, 2, 2, 233,
|
|
||||||
236, 3, 2, 2, 2, 234, 232, 3, 2, 2, 2, 234, 235, 3, 2, 2, 2, 235, 80, 3,
|
|
||||||
2, 2, 2, 236, 234, 3, 2, 2, 2, 15, 2, 84, 91, 99, 101, 112, 114, 125, 140,
|
|
||||||
218, 223, 228, 234, 3, 8, 2, 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
var lexerChannelNames = []string{
|
|
||||||
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
|
|
||||||
}
|
|
||||||
|
|
||||||
var lexerModeNames = []string{
|
|
||||||
"DEFAULT_MODE",
|
|
||||||
}
|
|
||||||
|
|
||||||
var lexerLiteralNames = []string{
|
|
||||||
"", "", "'('", "')'", "','", "'''", "'\"'", "'AND'", "'OR'", "'XOR'", "'NOT'",
|
|
||||||
"'*'", "'/'", "'%'", "'+'", "'-'", "'='", "'!='", "'>'", "'>='", "'<'",
|
|
||||||
"'<>'", "'<='", "'LIKE'", "'EXISTS'", "'IN'", "'TRUE'", "'FALSE'",
|
|
||||||
}
|
|
||||||
|
|
||||||
var lexerSymbolicNames = []string{
|
|
||||||
"", "SPACE", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
|
|
||||||
"DOUBLE_QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE",
|
|
||||||
"PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS",
|
|
||||||
"LESS_GREATER", "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE",
|
|
||||||
"DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL",
|
|
||||||
"IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
|
|
||||||
}
|
|
||||||
|
|
||||||
var lexerRuleNames = []string{
|
|
||||||
"SPACE", "ID_LITERAL", "DQUOTA_STRING", "SQUOTA_STRING", "INT_DIGIT", "FN_LITERAL",
|
|
||||||
"LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB", "DOUBLE_QUOTE_SYMB",
|
|
||||||
"QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE", "PLUS",
|
|
||||||
"MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS", "LESS_GREATER",
|
|
||||||
"LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE", "DQUOTED_STRING_LITERAL",
|
|
||||||
"SQUOTED_STRING_LITERAL", "INTEGER_LITERAL", "IDENTIFIER", "IDENTIFIER_WITH_NUMBER",
|
|
||||||
"FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
|
|
||||||
}
|
|
||||||
|
|
||||||
type CESQLParserLexer struct {
|
|
||||||
*antlr.BaseLexer
|
|
||||||
channelNames []string
|
|
||||||
modeNames []string
|
|
||||||
// TODO: EOF string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCESQLParserLexer produces a new lexer instance for the optional input antlr.CharStream.
|
|
||||||
//
|
|
||||||
// The *CESQLParserLexer instance produced may be reused by calling the SetInputStream method.
|
|
||||||
// The initial lexer configuration is expensive to construct, and the object is not thread-safe;
|
|
||||||
// however, if used within a Golang sync.Pool, the construction cost amortizes well and the
|
|
||||||
// objects can be used in a thread-safe manner.
|
|
||||||
func NewCESQLParserLexer(input antlr.CharStream) *CESQLParserLexer {
|
|
||||||
l := new(CESQLParserLexer)
|
|
||||||
lexerDeserializer := antlr.NewATNDeserializer(nil)
|
|
||||||
lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn)
|
|
||||||
lexerDecisionToDFA := make([]*antlr.DFA, len(lexerAtn.DecisionToState))
|
|
||||||
for index, ds := range lexerAtn.DecisionToState {
|
|
||||||
lexerDecisionToDFA[index] = antlr.NewDFA(ds, index)
|
|
||||||
}
|
|
||||||
l.BaseLexer = antlr.NewBaseLexer(input)
|
|
||||||
l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache())
|
|
||||||
|
|
||||||
l.channelNames = lexerChannelNames
|
|
||||||
l.modeNames = lexerModeNames
|
|
||||||
l.RuleNames = lexerRuleNames
|
|
||||||
l.LiteralNames = lexerLiteralNames
|
|
||||||
l.SymbolicNames = lexerSymbolicNames
|
|
||||||
l.GrammarFileName = "CESQLParser.g4"
|
|
||||||
// TODO: l.EOF = antlr.TokenEOF
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// CESQLParserLexer tokens.
|
|
||||||
const (
|
|
||||||
CESQLParserLexerSPACE = 1
|
|
||||||
CESQLParserLexerLR_BRACKET = 2
|
|
||||||
CESQLParserLexerRR_BRACKET = 3
|
|
||||||
CESQLParserLexerCOMMA = 4
|
|
||||||
CESQLParserLexerSINGLE_QUOTE_SYMB = 5
|
|
||||||
CESQLParserLexerDOUBLE_QUOTE_SYMB = 6
|
|
||||||
CESQLParserLexerAND = 7
|
|
||||||
CESQLParserLexerOR = 8
|
|
||||||
CESQLParserLexerXOR = 9
|
|
||||||
CESQLParserLexerNOT = 10
|
|
||||||
CESQLParserLexerSTAR = 11
|
|
||||||
CESQLParserLexerDIVIDE = 12
|
|
||||||
CESQLParserLexerMODULE = 13
|
|
||||||
CESQLParserLexerPLUS = 14
|
|
||||||
CESQLParserLexerMINUS = 15
|
|
||||||
CESQLParserLexerEQUAL = 16
|
|
||||||
CESQLParserLexerNOT_EQUAL = 17
|
|
||||||
CESQLParserLexerGREATER = 18
|
|
||||||
CESQLParserLexerGREATER_OR_EQUAL = 19
|
|
||||||
CESQLParserLexerLESS = 20
|
|
||||||
CESQLParserLexerLESS_GREATER = 21
|
|
||||||
CESQLParserLexerLESS_OR_EQUAL = 22
|
|
||||||
CESQLParserLexerLIKE = 23
|
|
||||||
CESQLParserLexerEXISTS = 24
|
|
||||||
CESQLParserLexerIN = 25
|
|
||||||
CESQLParserLexerTRUE = 26
|
|
||||||
CESQLParserLexerFALSE = 27
|
|
||||||
CESQLParserLexerDQUOTED_STRING_LITERAL = 28
|
|
||||||
CESQLParserLexerSQUOTED_STRING_LITERAL = 29
|
|
||||||
CESQLParserLexerINTEGER_LITERAL = 30
|
|
||||||
CESQLParserLexerIDENTIFIER = 31
|
|
||||||
CESQLParserLexerIDENTIFIER_WITH_NUMBER = 32
|
|
||||||
CESQLParserLexerFUNCTION_IDENTIFIER_WITH_UNDERSCORE = 33
|
|
||||||
)
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,86 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
|
|
||||||
|
|
||||||
package gen // CESQLParser
|
|
||||||
import "github.com/antlr/antlr4/runtime/Go/antlr"
|
|
||||||
|
|
||||||
// A complete Visitor for a parse tree produced by CESQLParserParser.
|
|
||||||
type CESQLParserVisitor interface {
|
|
||||||
antlr.ParseTreeVisitor
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#cesql.
|
|
||||||
VisitCesql(ctx *CesqlContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#inExpression.
|
|
||||||
VisitInExpression(ctx *InExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#binaryComparisonExpression.
|
|
||||||
VisitBinaryComparisonExpression(ctx *BinaryComparisonExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#atomExpression.
|
|
||||||
VisitAtomExpression(ctx *AtomExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#existsExpression.
|
|
||||||
VisitExistsExpression(ctx *ExistsExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#binaryLogicExpression.
|
|
||||||
VisitBinaryLogicExpression(ctx *BinaryLogicExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#likeExpression.
|
|
||||||
VisitLikeExpression(ctx *LikeExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#functionInvocationExpression.
|
|
||||||
VisitFunctionInvocationExpression(ctx *FunctionInvocationExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#binaryMultiplicativeExpression.
|
|
||||||
VisitBinaryMultiplicativeExpression(ctx *BinaryMultiplicativeExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#unaryLogicExpression.
|
|
||||||
VisitUnaryLogicExpression(ctx *UnaryLogicExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#unaryNumericExpression.
|
|
||||||
VisitUnaryNumericExpression(ctx *UnaryNumericExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#subExpression.
|
|
||||||
VisitSubExpression(ctx *SubExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#binaryAdditiveExpression.
|
|
||||||
VisitBinaryAdditiveExpression(ctx *BinaryAdditiveExpressionContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#booleanAtom.
|
|
||||||
VisitBooleanAtom(ctx *BooleanAtomContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#integerAtom.
|
|
||||||
VisitIntegerAtom(ctx *IntegerAtomContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#stringAtom.
|
|
||||||
VisitStringAtom(ctx *StringAtomContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#identifierAtom.
|
|
||||||
VisitIdentifierAtom(ctx *IdentifierAtomContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#identifier.
|
|
||||||
VisitIdentifier(ctx *IdentifierContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#functionIdentifier.
|
|
||||||
VisitFunctionIdentifier(ctx *FunctionIdentifierContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#booleanLiteral.
|
|
||||||
VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#stringLiteral.
|
|
||||||
VisitStringLiteral(ctx *StringLiteralContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#integerLiteral.
|
|
||||||
VisitIntegerLiteral(ctx *IntegerLiteralContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#functionParameterList.
|
|
||||||
VisitFunctionParameterList(ctx *FunctionParameterListContext) interface{}
|
|
||||||
|
|
||||||
// Visit a parse tree produced by CESQLParserParser#setExpression.
|
|
||||||
VisitSetExpression(ctx *SetExpressionContext) interface{}
|
|
||||||
}
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
module github.com/cloudevents/sdk-go/sql/v2
|
|
||||||
|
|
||||||
go 1.14
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/antlr/antlr4 v0.0.0-20210105192202-5c2b686f95e1
|
|
||||||
github.com/cloudevents/sdk-go/v2 v2.7.0
|
|
||||||
github.com/stretchr/testify v1.5.1
|
|
||||||
sigs.k8s.io/yaml v1.2.0
|
|
||||||
)
|
|
||||||
|
|
@ -1,55 +0,0 @@
|
||||||
github.com/antlr/antlr4 v0.0.0-20210105192202-5c2b686f95e1 h1:9K5yytxEEQc4yIn6c1rvQD6qQilQn9mYIF7pXKPT8i4=
|
|
||||||
github.com/antlr/antlr4 v0.0.0-20210105192202-5c2b686f95e1/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y=
|
|
||||||
github.com/cloudevents/sdk-go/v2 v2.7.0 h1:Pt+cOKWNG0tZZKRzuvfVsxcWArO0eq/UPKUxskyuSb8=
|
|
||||||
github.com/cloudevents/sdk-go/v2 v2.7.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
|
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
|
||||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
|
||||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
|
||||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|
||||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
|
||||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
|
||||||
|
|
@ -1,44 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package parser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"github.com/antlr/antlr4/runtime/Go/antlr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Took from https://github.com/antlr/antlr4/blob/master/doc/resources/case_changing_stream.go
|
|
||||||
|
|
||||||
// CaseChangingStream wraps an existing CharStream, but upper cases, or
|
|
||||||
// lower cases the input before it is tokenized.
|
|
||||||
type CaseChangingStream struct {
|
|
||||||
antlr.CharStream
|
|
||||||
|
|
||||||
upper bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCaseChangingStream returns a new CaseChangingStream that forces
|
|
||||||
// all tokens read from the underlying stream to be either upper case
|
|
||||||
// or lower case based on the upper argument.
|
|
||||||
func NewCaseChangingStream(in antlr.CharStream, upper bool) *CaseChangingStream {
|
|
||||||
return &CaseChangingStream{in, upper}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LA gets the value of the symbol at offset from the current position
|
|
||||||
// from the underlying CharStream and converts it to either upper case
|
|
||||||
// or lower case.
|
|
||||||
func (is *CaseChangingStream) LA(offset int) int {
|
|
||||||
in := is.CharStream.LA(offset)
|
|
||||||
if in < 0 {
|
|
||||||
// Such as antlr.TokenEOF which is -1
|
|
||||||
return in
|
|
||||||
}
|
|
||||||
if is.upper {
|
|
||||||
return int(unicode.ToUpper(rune(in)))
|
|
||||||
}
|
|
||||||
return int(unicode.ToLower(rune(in)))
|
|
||||||
}
|
|
||||||
|
|
@ -1,343 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package parser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/antlr/antlr4/runtime/Go/antlr"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/expression"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/gen"
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type expressionVisitor struct {
|
|
||||||
parsingErrors []error
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ gen.CESQLParserVisitor = (*expressionVisitor)(nil)
|
|
||||||
|
|
||||||
func NewExpressionVisitor() gen.CESQLParserVisitor {
|
|
||||||
return &expressionVisitor{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// antlr.ParseTreeVisitor implementation
|
|
||||||
|
|
||||||
func (v *expressionVisitor) Visit(tree antlr.ParseTree) interface{} {
|
|
||||||
// If you're wondering why I had to manually implement this stuff:
|
|
||||||
// https://github.com/antlr/antlr4/issues/2504
|
|
||||||
switch tree.(type) {
|
|
||||||
case *gen.CesqlContext:
|
|
||||||
return v.VisitCesql(tree.(*gen.CesqlContext))
|
|
||||||
case *gen.AtomExpressionContext:
|
|
||||||
return v.VisitAtomExpression(tree.(*gen.AtomExpressionContext))
|
|
||||||
case *gen.UnaryNumericExpressionContext:
|
|
||||||
return v.VisitUnaryNumericExpression(tree.(*gen.UnaryNumericExpressionContext))
|
|
||||||
case *gen.UnaryLogicExpressionContext:
|
|
||||||
return v.VisitUnaryLogicExpression(tree.(*gen.UnaryLogicExpressionContext))
|
|
||||||
case *gen.BooleanAtomContext:
|
|
||||||
return v.VisitBooleanAtom(tree.(*gen.BooleanAtomContext))
|
|
||||||
case *gen.BooleanLiteralContext:
|
|
||||||
return v.VisitBooleanLiteral(tree.(*gen.BooleanLiteralContext))
|
|
||||||
case *gen.IntegerAtomContext:
|
|
||||||
return v.VisitIntegerAtom(tree.(*gen.IntegerAtomContext))
|
|
||||||
case *gen.IntegerLiteralContext:
|
|
||||||
return v.VisitIntegerLiteral(tree.(*gen.IntegerLiteralContext))
|
|
||||||
case *gen.StringAtomContext:
|
|
||||||
return v.VisitStringAtom(tree.(*gen.StringAtomContext))
|
|
||||||
case *gen.StringLiteralContext:
|
|
||||||
return v.VisitStringLiteral(tree.(*gen.StringLiteralContext))
|
|
||||||
case *gen.ExistsExpressionContext:
|
|
||||||
return v.VisitExistsExpression(tree.(*gen.ExistsExpressionContext))
|
|
||||||
case *gen.InExpressionContext:
|
|
||||||
return v.VisitInExpression(tree.(*gen.InExpressionContext))
|
|
||||||
case *gen.IdentifierAtomContext:
|
|
||||||
return v.VisitIdentifierAtom(tree.(*gen.IdentifierAtomContext))
|
|
||||||
case *gen.IdentifierContext:
|
|
||||||
return v.VisitIdentifier(tree.(*gen.IdentifierContext))
|
|
||||||
case *gen.BinaryMultiplicativeExpressionContext:
|
|
||||||
return v.VisitBinaryMultiplicativeExpression(tree.(*gen.BinaryMultiplicativeExpressionContext))
|
|
||||||
case *gen.BinaryAdditiveExpressionContext:
|
|
||||||
return v.VisitBinaryAdditiveExpression(tree.(*gen.BinaryAdditiveExpressionContext))
|
|
||||||
case *gen.SubExpressionContext:
|
|
||||||
return v.VisitSubExpression(tree.(*gen.SubExpressionContext))
|
|
||||||
case *gen.BinaryLogicExpressionContext:
|
|
||||||
return v.VisitBinaryLogicExpression(tree.(*gen.BinaryLogicExpressionContext))
|
|
||||||
case *gen.BinaryComparisonExpressionContext:
|
|
||||||
return v.VisitBinaryComparisonExpression(tree.(*gen.BinaryComparisonExpressionContext))
|
|
||||||
case *gen.LikeExpressionContext:
|
|
||||||
return v.VisitLikeExpression(tree.(*gen.LikeExpressionContext))
|
|
||||||
case *gen.FunctionInvocationExpressionContext:
|
|
||||||
return v.VisitFunctionInvocationExpression(tree.(*gen.FunctionInvocationExpressionContext))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitChildren(node antlr.RuleNode) interface{} {
|
|
||||||
return v.Visit(node.GetChild(0).(antlr.ParseTree))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitTerminal(node antlr.TerminalNode) interface{} {
|
|
||||||
// We never visit terminal nodes
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitErrorNode(node antlr.ErrorNode) interface{} {
|
|
||||||
// We already collect errors using the error listener
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// gen.CESQLParserVisitor implementation
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitInExpression(ctx *gen.InExpressionContext) interface{} {
|
|
||||||
leftExpression := v.Visit(ctx.Expression()).(cesql.Expression)
|
|
||||||
|
|
||||||
var setExpression []cesql.Expression
|
|
||||||
|
|
||||||
for _, expr := range ctx.SetExpression().(*gen.SetExpressionContext).AllExpression() {
|
|
||||||
setExpression = append(setExpression, v.Visit(expr).(cesql.Expression))
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.NOT() != nil {
|
|
||||||
return expression.NewNotExpression(expression.NewInExpression(leftExpression, setExpression))
|
|
||||||
}
|
|
||||||
|
|
||||||
return expression.NewInExpression(leftExpression, setExpression)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitBinaryComparisonExpression(ctx *gen.BinaryComparisonExpressionContext) interface{} {
|
|
||||||
if ctx.LESS() != nil {
|
|
||||||
return expression.NewLessExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else if ctx.LESS_OR_EQUAL() != nil {
|
|
||||||
return expression.NewLessOrEqualExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else if ctx.GREATER() != nil {
|
|
||||||
return expression.NewGreaterExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else if ctx.GREATER_OR_EQUAL() != nil {
|
|
||||||
return expression.NewGreaterOrEqualExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else if ctx.EQUAL() != nil {
|
|
||||||
return expression.NewEqualExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
return expression.NewNotEqualExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitExistsExpression(ctx *gen.ExistsExpressionContext) interface{} {
|
|
||||||
return expression.NewExistsExpression(strings.ToLower(ctx.Identifier().GetText()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitBinaryLogicExpression(ctx *gen.BinaryLogicExpressionContext) interface{} {
|
|
||||||
if ctx.AND() != nil {
|
|
||||||
return expression.NewAndExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else if ctx.OR() != nil {
|
|
||||||
return expression.NewOrExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
return expression.NewXorExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitLikeExpression(ctx *gen.LikeExpressionContext) interface{} {
|
|
||||||
patternContext := ctx.StringLiteral().(*gen.StringLiteralContext)
|
|
||||||
|
|
||||||
var pattern string
|
|
||||||
if patternContext.DQUOTED_STRING_LITERAL() != nil {
|
|
||||||
// Parse double quoted string
|
|
||||||
pattern = dQuotedStringToString(patternContext.DQUOTED_STRING_LITERAL().GetText())
|
|
||||||
} else {
|
|
||||||
// Parse single quoted string
|
|
||||||
pattern = sQuotedStringToString(patternContext.SQUOTED_STRING_LITERAL().GetText())
|
|
||||||
}
|
|
||||||
|
|
||||||
likeExpression, err := expression.NewLikeExpression(v.Visit(ctx.Expression()).(cesql.Expression), pattern)
|
|
||||||
if err != nil {
|
|
||||||
v.parsingErrors = append(v.parsingErrors, err)
|
|
||||||
return noopExpression{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.NOT() != nil {
|
|
||||||
return expression.NewNotExpression(likeExpression)
|
|
||||||
}
|
|
||||||
|
|
||||||
return likeExpression
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitFunctionInvocationExpression(ctx *gen.FunctionInvocationExpressionContext) interface{} {
|
|
||||||
paramsCtx := ctx.FunctionParameterList().(*gen.FunctionParameterListContext)
|
|
||||||
|
|
||||||
name := ctx.FunctionIdentifier().GetText()
|
|
||||||
|
|
||||||
var args []cesql.Expression
|
|
||||||
for _, expr := range paramsCtx.AllExpression() {
|
|
||||||
args = append(args, v.Visit(expr).(cesql.Expression))
|
|
||||||
}
|
|
||||||
|
|
||||||
return expression.NewFunctionInvocationExpression(strings.ToUpper(name), args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitBinaryMultiplicativeExpression(ctx *gen.BinaryMultiplicativeExpressionContext) interface{} {
|
|
||||||
if ctx.STAR() != nil {
|
|
||||||
return expression.NewMultiplicationExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else if ctx.MODULE() != nil {
|
|
||||||
return expression.NewModuleExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
return expression.NewDivisionExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitUnaryLogicExpression(ctx *gen.UnaryLogicExpressionContext) interface{} {
|
|
||||||
return expression.NewNotExpression(
|
|
||||||
v.Visit(ctx.Expression()).(cesql.Expression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitUnaryNumericExpression(ctx *gen.UnaryNumericExpressionContext) interface{} {
|
|
||||||
return expression.NewNegateExpression(
|
|
||||||
v.Visit(ctx.Expression()).(cesql.Expression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitSubExpression(ctx *gen.SubExpressionContext) interface{} {
|
|
||||||
return v.Visit(ctx.Expression())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitBinaryAdditiveExpression(ctx *gen.BinaryAdditiveExpressionContext) interface{} {
|
|
||||||
if ctx.PLUS() != nil {
|
|
||||||
return expression.NewSumExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
return expression.NewDifferenceExpression(
|
|
||||||
v.Visit(ctx.Expression(0)).(cesql.Expression),
|
|
||||||
v.Visit(ctx.Expression(1)).(cesql.Expression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitIdentifier(ctx *gen.IdentifierContext) interface{} {
|
|
||||||
return expression.NewIdentifierExpression(strings.ToLower(ctx.GetText()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitBooleanLiteral(ctx *gen.BooleanLiteralContext) interface{} {
|
|
||||||
return expression.NewLiteralExpression(ctx.TRUE() != nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitStringLiteral(ctx *gen.StringLiteralContext) interface{} {
|
|
||||||
var str string
|
|
||||||
if ctx.DQUOTED_STRING_LITERAL() != nil {
|
|
||||||
// Parse double quoted string
|
|
||||||
str = dQuotedStringToString(ctx.DQUOTED_STRING_LITERAL().GetText())
|
|
||||||
} else {
|
|
||||||
// Parse single quoted string
|
|
||||||
str = sQuotedStringToString(ctx.SQUOTED_STRING_LITERAL().GetText())
|
|
||||||
}
|
|
||||||
|
|
||||||
return expression.NewLiteralExpression(str)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitIntegerLiteral(ctx *gen.IntegerLiteralContext) interface{} {
|
|
||||||
val, err := strconv.Atoi(ctx.GetText())
|
|
||||||
if err != nil {
|
|
||||||
v.parsingErrors = append(v.parsingErrors, err)
|
|
||||||
}
|
|
||||||
return expression.NewLiteralExpression(int32(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
// gen.CESQLParserVisitor implementation - noop methods
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitCesql(ctx *gen.CesqlContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitAtomExpression(ctx *gen.AtomExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitBooleanAtom(ctx *gen.BooleanAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitIntegerAtom(ctx *gen.IntegerAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitStringAtom(ctx *gen.StringAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitIdentifierAtom(ctx *gen.IdentifierAtomContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitSetExpression(ctx *gen.SetExpressionContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitFunctionIdentifier(ctx *gen.FunctionIdentifierContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *expressionVisitor) VisitFunctionParameterList(ctx *gen.FunctionParameterListContext) interface{} {
|
|
||||||
return v.VisitChildren(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// noop expression. This is necessary to continue to walk through the tree, even if there's a failure in the parsing
|
|
||||||
|
|
||||||
type noopExpression struct{}
|
|
||||||
|
|
||||||
func (n noopExpression) Evaluate(cloudevents.Event) (interface{}, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Utilities
|
|
||||||
|
|
||||||
func dQuotedStringToString(str string) string {
|
|
||||||
str = str[1 : len(str)-1]
|
|
||||||
return strings.ReplaceAll(str, "\\\"", "\"")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sQuotedStringToString(str string) string {
|
|
||||||
str = str[1 : len(str)-1]
|
|
||||||
return strings.ReplaceAll(str, "\\'", "'")
|
|
||||||
}
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package parser
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/antlr/antlr4/runtime/Go/antlr"
|
|
||||||
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/gen"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Parser struct {
|
|
||||||
// TODO parser options
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) Parse(input string) (v2.Expression, error) {
|
|
||||||
var is antlr.CharStream = antlr.NewInputStream(input)
|
|
||||||
is = NewCaseChangingStream(is, true)
|
|
||||||
|
|
||||||
// Create the JSON Lexer
|
|
||||||
lexer := gen.NewCESQLParserLexer(is)
|
|
||||||
var stream antlr.TokenStream = antlr.NewCommonTokenStream(lexer, antlr.TokenDefaultChannel)
|
|
||||||
|
|
||||||
// Create the JSON Parser
|
|
||||||
antlrParser := gen.NewCESQLParserParser(stream)
|
|
||||||
antlrParser.RemoveErrorListeners()
|
|
||||||
collectingErrorListener := errorListener{}
|
|
||||||
antlrParser.AddErrorListener(&collectingErrorListener)
|
|
||||||
|
|
||||||
// Finally walk the tree
|
|
||||||
visitor := expressionVisitor{}
|
|
||||||
result := antlrParser.Cesql().Accept(&visitor)
|
|
||||||
|
|
||||||
if result == nil {
|
|
||||||
return nil, mergeErrs(append(collectingErrorListener.errs, visitor.parsingErrors...))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.(v2.Expression), mergeErrs(append(collectingErrorListener.errs, visitor.parsingErrors...))
|
|
||||||
}
|
|
||||||
|
|
||||||
type errorListener struct {
|
|
||||||
antlr.DefaultErrorListener
|
|
||||||
errs []error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *errorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
|
|
||||||
d.errs = append(d.errs, fmt.Errorf("syntax error: %v", e.GetMessage()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeErrs(errs []error) error {
|
|
||||||
if len(errs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errStrings []string
|
|
||||||
for _, err := range errs {
|
|
||||||
errStrings = append(errStrings, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(strings.Join(errStrings, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultParser = Parser{}
|
|
||||||
|
|
||||||
func Parse(input string) (v2.Expression, error) {
|
|
||||||
return defaultParser.Parse(input)
|
|
||||||
}
|
|
||||||
|
|
@ -1,107 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/sql/v2/function"
|
|
||||||
)
|
|
||||||
|
|
||||||
type functionItem struct {
|
|
||||||
fixedArgsFunctions map[int]cesql.Function
|
|
||||||
variadicFunction cesql.Function
|
|
||||||
}
|
|
||||||
|
|
||||||
type functionTable map[string]*functionItem
|
|
||||||
|
|
||||||
func (table functionTable) AddFunction(function cesql.Function) error {
|
|
||||||
item := table[function.Name()]
|
|
||||||
if item == nil {
|
|
||||||
item = &functionItem{
|
|
||||||
fixedArgsFunctions: make(map[int]cesql.Function),
|
|
||||||
}
|
|
||||||
table[function.Name()] = item
|
|
||||||
}
|
|
||||||
|
|
||||||
if function.IsVariadic() {
|
|
||||||
if item.variadicFunction != nil {
|
|
||||||
return errors.New("cannot add the variadic function, " +
|
|
||||||
"because there is already another variadic function defined with the same name")
|
|
||||||
}
|
|
||||||
maxArity := -1
|
|
||||||
for a := range item.fixedArgsFunctions {
|
|
||||||
if a > maxArity {
|
|
||||||
maxArity = a
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if maxArity >= function.Arity() {
|
|
||||||
return errors.New("cannot add the variadic function, " +
|
|
||||||
"because there is already another function defined with the same name and same or greater arity")
|
|
||||||
}
|
|
||||||
|
|
||||||
item.variadicFunction = function
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
if _, ok := item.fixedArgsFunctions[function.Arity()]; ok {
|
|
||||||
return errors.New("cannot add the function, " +
|
|
||||||
"because there is already another function defined with the same arity and same name")
|
|
||||||
}
|
|
||||||
|
|
||||||
item.fixedArgsFunctions[function.Arity()] = function
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (table functionTable) ResolveFunction(name string, args int) cesql.Function {
|
|
||||||
item := table[strings.ToUpper(name)]
|
|
||||||
if item == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if fn, ok := item.fixedArgsFunctions[args]; ok {
|
|
||||||
return fn
|
|
||||||
}
|
|
||||||
|
|
||||||
if item.variadicFunction == nil || item.variadicFunction.Arity() > args {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return item.variadicFunction
|
|
||||||
}
|
|
||||||
|
|
||||||
var globalFunctionTable = functionTable{}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for _, fn := range []cesql.Function{
|
|
||||||
function.IntFunction,
|
|
||||||
function.BoolFunction,
|
|
||||||
function.StringFunction,
|
|
||||||
function.IsBoolFunction,
|
|
||||||
function.IsIntFunction,
|
|
||||||
function.AbsFunction,
|
|
||||||
function.LengthFunction,
|
|
||||||
function.ConcatFunction,
|
|
||||||
function.ConcatWSFunction,
|
|
||||||
function.LowerFunction,
|
|
||||||
function.UpperFunction,
|
|
||||||
function.TrimFunction,
|
|
||||||
function.LeftFunction,
|
|
||||||
function.RightFunction,
|
|
||||||
function.SubstringFunction,
|
|
||||||
function.SubstringWithLengthFunction,
|
|
||||||
} {
|
|
||||||
if err := globalFunctionTable.AddFunction(fn); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ResolveFunction(name string, args int) cesql.Function {
|
|
||||||
return globalFunctionTable.ResolveFunction(name, args)
|
|
||||||
}
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package v2
|
|
||||||
|
|
||||||
type Type uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
StringType Type = iota
|
|
||||||
IntegerType
|
|
||||||
BooleanType
|
|
||||||
AnyType
|
|
||||||
)
|
|
||||||
|
|
||||||
func TypePtr(t Type) *Type {
|
|
||||||
return &t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Type) IsSameType(val interface{}) bool {
|
|
||||||
return TypeFromVal(val) == t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Type) String() string {
|
|
||||||
switch t {
|
|
||||||
case IntegerType:
|
|
||||||
return "Integer"
|
|
||||||
case BooleanType:
|
|
||||||
return "Boolean"
|
|
||||||
case StringType:
|
|
||||||
return "String"
|
|
||||||
}
|
|
||||||
return "Any"
|
|
||||||
}
|
|
||||||
|
|
||||||
func TypeFromVal(val interface{}) Type {
|
|
||||||
switch val.(type) {
|
|
||||||
case string:
|
|
||||||
return StringType
|
|
||||||
case int32:
|
|
||||||
return IntegerType
|
|
||||||
case bool:
|
|
||||||
return BooleanType
|
|
||||||
}
|
|
||||||
return AnyType
|
|
||||||
}
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
cesql "github.com/cloudevents/sdk-go/sql/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Cast(val interface{}, target cesql.Type) (interface{}, error) {
|
|
||||||
if target.IsSameType(val) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
switch target {
|
|
||||||
case cesql.StringType:
|
|
||||||
switch val.(type) {
|
|
||||||
case int32:
|
|
||||||
return strconv.Itoa(int(val.(int32))), nil
|
|
||||||
case bool:
|
|
||||||
if val.(bool) {
|
|
||||||
return "true", nil
|
|
||||||
} else {
|
|
||||||
return "false", nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Casting to string is always defined
|
|
||||||
return fmt.Sprintf("%v", val), nil
|
|
||||||
case cesql.IntegerType:
|
|
||||||
switch val.(type) {
|
|
||||||
case string:
|
|
||||||
v, err := strconv.Atoi(val.(string))
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("cannot cast from String to Integer: %w", err)
|
|
||||||
}
|
|
||||||
return int32(v), err
|
|
||||||
}
|
|
||||||
return 0, fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target)
|
|
||||||
case cesql.BooleanType:
|
|
||||||
switch val.(type) {
|
|
||||||
case string:
|
|
||||||
lowerCase := strings.ToLower(val.(string))
|
|
||||||
if lowerCase == "true" {
|
|
||||||
return true, nil
|
|
||||||
} else if lowerCase == "false" {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, fmt.Errorf("cannot cast String to Boolean, actual value: %v", val)
|
|
||||||
}
|
|
||||||
return false, fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AnyType doesn't need casting
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func CanCast(val interface{}, target cesql.Type) bool {
|
|
||||||
_, err := Cast(val, target)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,67 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2021 The CloudEvents Authors
|
|
||||||
SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
cloudevents "github.com/cloudevents/sdk-go/v2"
|
|
||||||
"github.com/cloudevents/sdk-go/v2/binding/spec"
|
|
||||||
"github.com/cloudevents/sdk-go/v2/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetAttribute(event cloudevents.Event, attributeName string) interface{} {
|
|
||||||
var val interface{}
|
|
||||||
|
|
||||||
if a := spec.V1.Attribute(attributeName); a != nil { // Standard attribute
|
|
||||||
val = a.Get(event.Context)
|
|
||||||
} else {
|
|
||||||
val = event.Extensions()[attributeName]
|
|
||||||
}
|
|
||||||
|
|
||||||
if val == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type cohercion
|
|
||||||
switch val.(type) {
|
|
||||||
case bool, int32, string:
|
|
||||||
return val
|
|
||||||
case int8:
|
|
||||||
return int32(val.(int8))
|
|
||||||
case uint8:
|
|
||||||
return int32(val.(uint8))
|
|
||||||
case int16:
|
|
||||||
return int32(val.(int16))
|
|
||||||
case uint16:
|
|
||||||
return int32(val.(uint16))
|
|
||||||
case uint32:
|
|
||||||
return int32(val.(uint32))
|
|
||||||
case int64:
|
|
||||||
return int32(val.(int64))
|
|
||||||
case uint64:
|
|
||||||
return int32(val.(uint64))
|
|
||||||
case time.Time:
|
|
||||||
return val.(time.Time).Format(time.RFC3339Nano)
|
|
||||||
case []byte:
|
|
||||||
return types.FormatBinary(val.([]byte))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%v", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ContainsAttribute(event cloudevents.Event, attributeName string) bool {
|
|
||||||
if attributeName == "specversion" || attributeName == "id" || attributeName == "source" || attributeName == "type" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if attr := spec.V1.Attribute(attributeName); attr != nil {
|
|
||||||
return attr.Get(event.Context) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := event.Extensions()[attributeName]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"knative.dev/pkg/apis"
|
"knative.dev/pkg/apis"
|
||||||
"knative.dev/pkg/kmp"
|
"knative.dev/pkg/kmp"
|
||||||
|
|
@ -194,17 +193,6 @@ func ValidateSubscriptionAPIFiltersList(ctx context.Context, filters []Subscript
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateCESQLExpression(ctx context.Context, expression string) (errs *apis.FieldError) {
|
|
||||||
if expression == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
_, err := cesqlparser.Parse(expression)
|
|
||||||
if err != nil {
|
|
||||||
return apis.ErrInvalidValue(expression, apis.CurrentField, err.Error())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPIFilter) (errs *apis.FieldError) {
|
func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPIFilter) (errs *apis.FieldError) {
|
||||||
if filter == nil {
|
if filter == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -223,8 +211,6 @@ func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPI
|
||||||
ValidateSubscriptionAPIFiltersList(ctx, filter.Any).ViaField("any"),
|
ValidateSubscriptionAPIFiltersList(ctx, filter.Any).ViaField("any"),
|
||||||
).Also(
|
).Also(
|
||||||
ValidateSubscriptionAPIFilter(ctx, filter.Not).ViaField("not"),
|
ValidateSubscriptionAPIFilter(ctx, filter.Not).ViaField("not"),
|
||||||
).Also(
|
|
||||||
ValidateCESQLExpression(ctx, filter.SQL).ViaField("sql"),
|
|
||||||
)
|
)
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,6 @@ contrib.go.opencensus.io/exporter/prometheus
|
||||||
github.com/PuerkitoBio/purell
|
github.com/PuerkitoBio/purell
|
||||||
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
|
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
|
||||||
github.com/PuerkitoBio/urlesc
|
github.com/PuerkitoBio/urlesc
|
||||||
# github.com/antlr/antlr4 v0.0.0-20210105192202-5c2b686f95e1
|
|
||||||
github.com/antlr/antlr4/runtime/Go/antlr
|
|
||||||
# github.com/beorn7/perks v1.0.1
|
# github.com/beorn7/perks v1.0.1
|
||||||
github.com/beorn7/perks/quantile
|
github.com/beorn7/perks/quantile
|
||||||
# github.com/blendle/zapdriver v1.3.1
|
# github.com/blendle/zapdriver v1.3.1
|
||||||
|
|
@ -23,14 +21,6 @@ github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1
|
||||||
github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1
|
github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1
|
||||||
# github.com/cespare/xxhash/v2 v2.1.1
|
# github.com/cespare/xxhash/v2 v2.1.1
|
||||||
github.com/cespare/xxhash/v2
|
github.com/cespare/xxhash/v2
|
||||||
# github.com/cloudevents/sdk-go/sql/v2 v2.7.0
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2/expression
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2/function
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2/gen
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2/parser
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2/runtime
|
|
||||||
github.com/cloudevents/sdk-go/sql/v2/utils
|
|
||||||
# github.com/cloudevents/sdk-go/v2 v2.7.0
|
# github.com/cloudevents/sdk-go/v2 v2.7.0
|
||||||
github.com/cloudevents/sdk-go/v2
|
github.com/cloudevents/sdk-go/v2
|
||||||
github.com/cloudevents/sdk-go/v2/binding
|
github.com/cloudevents/sdk-go/v2/binding
|
||||||
|
|
@ -746,7 +736,7 @@ k8s.io/utils/buffer
|
||||||
k8s.io/utils/integer
|
k8s.io/utils/integer
|
||||||
k8s.io/utils/pointer
|
k8s.io/utils/pointer
|
||||||
k8s.io/utils/trace
|
k8s.io/utils/trace
|
||||||
# knative.dev/eventing v0.28.1-0.20211222075718-4d705ede0dfa
|
# knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455
|
||||||
## explicit
|
## explicit
|
||||||
knative.dev/eventing/pkg/apis/config
|
knative.dev/eventing/pkg/apis/config
|
||||||
knative.dev/eventing/pkg/apis/duck
|
knative.dev/eventing/pkg/apis/duck
|
||||||
|
|
@ -775,7 +765,7 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake
|
||||||
# knative.dev/hack v0.0.0-20211222071919-abd085fc43de
|
# knative.dev/hack v0.0.0-20211222071919-abd085fc43de
|
||||||
## explicit
|
## explicit
|
||||||
knative.dev/hack
|
knative.dev/hack
|
||||||
# knative.dev/networking v0.0.0-20211222131718-2d4af360eb01
|
# knative.dev/networking v0.0.0-20211223013028-62388a5f2853
|
||||||
## explicit
|
## explicit
|
||||||
knative.dev/networking/pkg
|
knative.dev/networking/pkg
|
||||||
knative.dev/networking/pkg/apis/networking
|
knative.dev/networking/pkg/apis/networking
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue